mirror of
https://gitlab.com/famedly/conduit.git
synced 2025-06-27 16:35:59 +00:00
Merge branch 'next' into 'morg-notary'
# Conflicts: # DEPLOY.md
This commit is contained in:
commit
41a6bb5ca5
81 changed files with 3521 additions and 2930 deletions
4
.envrc
4
.envrc
|
@ -1 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
use flake
|
||||
|
||||
PATH_add bin
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -68,3 +68,6 @@ cached_target
|
|||
|
||||
# Direnv cache
|
||||
/.direnv
|
||||
|
||||
# Gitlab CI cache
|
||||
/.gitlab-ci.d
|
||||
|
|
393
.gitlab-ci.yml
393
.gitlab-ci.yml
|
@ -1,244 +1,167 @@
|
|||
stages:
|
||||
- build
|
||||
- build docker image
|
||||
- test
|
||||
- upload artifacts
|
||||
- ci
|
||||
- artifacts
|
||||
- publish
|
||||
|
||||
variables:
|
||||
# Make GitLab CI go fast:
|
||||
GIT_SUBMODULE_STRATEGY: recursive
|
||||
FF_USE_FASTZIP: 1
|
||||
CACHE_COMPRESSION_LEVEL: fastest
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Create and publish docker image #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
.docker-shared-settings:
|
||||
stage: "build docker image"
|
||||
needs: []
|
||||
tags: [ "docker" ]
|
||||
variables:
|
||||
# Docker in Docker:
|
||||
DOCKER_BUILDKIT: 1
|
||||
image:
|
||||
name: docker.io/docker
|
||||
services:
|
||||
- name: docker.io/docker:dind
|
||||
alias: docker
|
||||
script:
|
||||
- apk add openssh-client
|
||||
- eval $(ssh-agent -s)
|
||||
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
||||
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
|
||||
- sh .gitlab/setup-buildx-remote-builders.sh
|
||||
# Authorize against this project's own image registry:
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
# Build multiplatform image and push to temporary tag:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--pull
|
||||
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
--push
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Build multiplatform image to deb stage and extract their .deb files:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--target "packager-result"
|
||||
--output="type=local,dest=/tmp/build-output"
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Build multiplatform image to binary stage and extract their binaries:
|
||||
- >
|
||||
docker buildx build
|
||||
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||
--target "builder-result"
|
||||
--output="type=local,dest=/tmp/build-output"
|
||||
--provenance=false
|
||||
--file "Dockerfile" .
|
||||
# Copy to GitLab container registry:
|
||||
- >
|
||||
docker buildx imagetools create
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG"
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
|
||||
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
# if DockerHub credentials exist, also copy to dockerhub:
|
||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
||||
- >
|
||||
if [ -n "${DOCKER_HUB}" ]; then
|
||||
docker buildx imagetools create
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG"
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
|
||||
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||
; fi
|
||||
- mv /tmp/build-output ./
|
||||
artifacts:
|
||||
paths:
|
||||
- "./build-output/"
|
||||
|
||||
docker:next:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
|
||||
variables:
|
||||
TAG: "matrix-conduit:next"
|
||||
|
||||
docker:master:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
|
||||
variables:
|
||||
TAG: "matrix-conduit:latest"
|
||||
|
||||
docker:tags:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
|
||||
variables:
|
||||
TAG: "matrix-conduit:$CI_COMMIT_TAG"
|
||||
|
||||
|
||||
docker build debugging:
|
||||
extends: .docker-shared-settings
|
||||
rules:
|
||||
- if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/"
|
||||
variables:
|
||||
TAG: "matrix-conduit-docker-tests:latest"
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Run tests #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
cargo check:
|
||||
stage: test
|
||||
image: docker.io/rust:1.70.0-bullseye
|
||||
needs: []
|
||||
interruptible: true
|
||||
before_script:
|
||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- cargo check
|
||||
|
||||
|
||||
.test-shared-settings:
|
||||
stage: "test"
|
||||
needs: []
|
||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest"
|
||||
tags: ["docker"]
|
||||
variables:
|
||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||
interruptible: true
|
||||
|
||||
test:cargo:
|
||||
extends: .test-shared-settings
|
||||
before_script:
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- rustc --version && cargo --version # Print version info for debugging
|
||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast"
|
||||
|
||||
test:clippy:
|
||||
extends: .test-shared-settings
|
||||
allow_failure: true
|
||||
before_script:
|
||||
- rustup component add clippy
|
||||
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||
script:
|
||||
- rustc --version && cargo --version # Print version info for debugging
|
||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
codequality: gl-code-quality-report.json
|
||||
|
||||
test:format:
|
||||
extends: .test-shared-settings
|
||||
before_script:
|
||||
- rustup component add rustfmt
|
||||
script:
|
||||
- cargo fmt --all -- --check
|
||||
|
||||
test:audit:
|
||||
extends: .test-shared-settings
|
||||
allow_failure: true
|
||||
script:
|
||||
- cargo audit --color always || true
|
||||
- cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
sast: gl-sast-report.json
|
||||
|
||||
test:dockerlint:
|
||||
stage: "test"
|
||||
needs: []
|
||||
image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine
|
||||
interruptible: true
|
||||
script:
|
||||
- hadolint --version
|
||||
# First pass: Print for CI log:
|
||||
- >
|
||||
hadolint
|
||||
--no-fail --verbose
|
||||
./Dockerfile
|
||||
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
||||
- >
|
||||
hadolint
|
||||
--format gitlab_codeclimate
|
||||
--failure-threshold error
|
||||
./Dockerfile > dockerlint.json
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
codequality: dockerlint.json
|
||||
paths:
|
||||
- dockerlint.json
|
||||
rules:
|
||||
- if: '$CI_COMMIT_REF_NAME != "master"'
|
||||
changes:
|
||||
- docker/*Dockerfile
|
||||
- Dockerfile
|
||||
- .gitlab-ci.yml
|
||||
- if: '$CI_COMMIT_REF_NAME == "master"'
|
||||
- if: '$CI_COMMIT_REF_NAME == "next"'
|
||||
|
||||
# --------------------------------------------------------------------- #
|
||||
# Store binaries as package so they have download urls #
|
||||
# --------------------------------------------------------------------- #
|
||||
|
||||
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
|
||||
|
||||
#publish:package:
|
||||
# stage: "upload artifacts"
|
||||
# needs:
|
||||
# - "docker:tags"
|
||||
# rules:
|
||||
# - if: "$CI_COMMIT_TAG"
|
||||
# image: curlimages/curl:latest
|
||||
# tags: ["docker"]
|
||||
# variables:
|
||||
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
||||
# script:
|
||||
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
|
||||
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
|
||||
# Makes some things print in color
|
||||
TERM: ansi
|
||||
|
||||
# Avoid duplicate pipelines
|
||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||
workflow:
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||
- if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS"
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
||||
when: never
|
||||
- if: "$CI_COMMIT_BRANCH"
|
||||
- if: "$CI_COMMIT_TAG"
|
||||
- if: $CI
|
||||
|
||||
before_script:
|
||||
# Enable nix-command and flakes
|
||||
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add our own binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add alternate binary cache
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add crane binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Add nix-community binary cache
|
||||
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||
|
||||
# Install direnv and nix-direnv
|
||||
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||
|
||||
# Allow .envrc
|
||||
- if command -v nix > /dev/null; then direnv allow; fi
|
||||
|
||||
# Set CARGO_HOME to a cacheable path
|
||||
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||
|
||||
ci:
|
||||
stage: ci
|
||||
image: nixos/nix:2.20.4
|
||||
script:
|
||||
# Cache the inputs required for the devShell
|
||||
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
|
||||
|
||||
- direnv exec . engage
|
||||
cache:
|
||||
key: nix
|
||||
paths:
|
||||
- target
|
||||
- .gitlab-ci.d
|
||||
rules:
|
||||
# CI on upstream runners (only available for maintainers)
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
||||
# Manual CI on unprotected branches that are not MRs
|
||||
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
||||
when: manual
|
||||
# Manual CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
artifacts:
|
||||
stage: artifacts
|
||||
image: nixos/nix:2.20.4
|
||||
script:
|
||||
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
|
||||
- cp result/bin/conduit x86_64-unknown-linux-musl
|
||||
|
||||
- mkdir -p target/release
|
||||
- cp result/bin/conduit target/release
|
||||
- direnv exec . cargo deb --no-build
|
||||
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
|
||||
|
||||
# Since the OCI image package is based on the binary package, this has the
|
||||
# fun side effect of uploading the normal binary too. Conduit users who are
|
||||
# deploying with Nix can leverage this fact by adding our binary cache to
|
||||
# their systems.
|
||||
#
|
||||
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
|
||||
# output, we don't build it because it would be largely redundant to this
|
||||
# one since it's all containerized anyway.
|
||||
- ./bin/nix-build-and-cache .#oci-image
|
||||
- cp result oci-image-amd64.tar.gz
|
||||
|
||||
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
|
||||
- cp result/bin/conduit aarch64-unknown-linux-musl
|
||||
|
||||
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
|
||||
- cp result oci-image-arm64v8.tar.gz
|
||||
artifacts:
|
||||
paths:
|
||||
- x86_64-unknown-linux-musl
|
||||
- aarch64-unknown-linux-musl
|
||||
- x86_64-unknown-linux-musl.deb
|
||||
- oci-image-amd64.tar.gz
|
||||
- oci-image-arm64v8.tar.gz
|
||||
rules:
|
||||
# CI required for all MRs
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
# Optional CI on forks
|
||||
- if: $IS_UPSTREAM_CI != "true"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: $CI
|
||||
interruptible: true
|
||||
|
||||
.push-oci-image:
|
||||
stage: publish
|
||||
image: docker:25.0.0
|
||||
services:
|
||||
- docker:25.0.0-dind
|
||||
variables:
|
||||
IMAGE_SUFFIX_AMD64: amd64
|
||||
IMAGE_SUFFIX_ARM64V8: arm64v8
|
||||
script:
|
||||
- docker load -i oci-image-amd64.tar.gz
|
||||
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
|
||||
- docker load -i oci-image-arm64v8.tar.gz
|
||||
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
|
||||
# Tag and push the architecture specific images
|
||||
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
|
||||
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
# Tag the multi-arch image
|
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
|
||||
# Tag and push the git ref
|
||||
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
|
||||
# Tag git tags as 'latest'
|
||||
- |
|
||||
if [[ -n "$CI_COMMIT_TAG" ]]; then
|
||||
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
|
||||
docker manifest push $IMAGE_NAME:latest
|
||||
fi
|
||||
dependencies:
|
||||
- artifacts
|
||||
only:
|
||||
- next
|
||||
- master
|
||||
- tags
|
||||
|
||||
oci-image:push-gitlab:
|
||||
extends: .push-oci-image
|
||||
variables:
|
||||
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
|
||||
oci-image:push-dockerhub:
|
||||
extends: .push-oci-image
|
||||
variables:
|
||||
IMAGE_NAME: matrixconduit/matrix-conduit
|
||||
before_script:
|
||||
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
|
||||
|
|
1700
Cargo.lock
generated
1700
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
47
Cargo.toml
47
Cargo.toml
|
@ -1,3 +1,14 @@
|
|||
# Keep alphabetically sorted
|
||||
[workspace.lints.rust]
|
||||
explicit_outlives_requirements = "warn"
|
||||
unused_qualifications = "warn"
|
||||
|
||||
# Keep alphabetically sorted
|
||||
[workspace.lints.clippy]
|
||||
cloned_instead_of_copied = "warn"
|
||||
dbg_macro = "warn"
|
||||
str_to_string = "warn"
|
||||
|
||||
[package]
|
||||
name = "conduit"
|
||||
description = "A Matrix homeserver written in Rust"
|
||||
|
@ -9,14 +20,14 @@ readme = "README.md"
|
|||
version = "0.7.0-alpha"
|
||||
edition = "2021"
|
||||
|
||||
# When changing this, make sure to update the `flake.lock` file by running
|
||||
# `nix flake update`. If you don't have Nix installed or otherwise don't know
|
||||
# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in
|
||||
# the matrix room.
|
||||
rust-version = "1.70.0"
|
||||
# See also `rust-toolchain.toml`
|
||||
rust-version = "1.75.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Web framework
|
||||
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||
|
@ -26,7 +37,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv
|
|||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "3bd58e3c899457c2d55c45268dcb8a65ae682d54", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "1a1c61ee1e8f0936e956a3b69c931ce12ee28475", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
|
||||
|
@ -53,7 +64,8 @@ rand = "0.8.5"
|
|||
# Used to hash passwords
|
||||
rust-argon2 = "1.0.0"
|
||||
# Used to send requests
|
||||
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
||||
hyper = "0.14.26"
|
||||
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
|
||||
# Used for conduit::Error type
|
||||
thiserror = "1.0.40"
|
||||
# Used to generate thumbnails for images
|
||||
|
@ -61,13 +73,13 @@ image = { version = "0.24.6", default-features = false, features = ["jpeg", "png
|
|||
# Used to encode server public key
|
||||
base64 = "0.21.2"
|
||||
# Used when hashing the state
|
||||
ring = "0.16.20"
|
||||
ring = "0.17.7"
|
||||
# Used when querying the SRV record of other servers
|
||||
trust-dns-resolver = "0.22.0"
|
||||
# Used to find matching events for appservices
|
||||
regex = "1.8.1"
|
||||
# jwt jsonwebtokens
|
||||
jsonwebtoken = "8.3.0"
|
||||
jsonwebtoken = "9.2.0"
|
||||
# Performance measurements
|
||||
tracing = { version = "0.1.37", features = [] }
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
|
@ -78,15 +90,13 @@ tracing-opentelemetry = "0.18.0"
|
|||
lru-cache = "0.1.2"
|
||||
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
|
||||
parking_lot = { version = "0.12.1", optional = true }
|
||||
crossbeam = { version = "0.8.2", optional = true }
|
||||
# crossbeam = { version = "0.8.2", optional = true }
|
||||
num_cpus = "1.15.0"
|
||||
threadpool = "1.8.1"
|
||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||
# Used for ruma wrapper
|
||||
serde_html_form = "0.2.0"
|
||||
|
||||
rocksdb = { version = "0.21.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
|
||||
|
||||
thread_local = "1.1.7"
|
||||
# used for TURN server authentication
|
||||
hmac = "0.12.1"
|
||||
|
@ -104,6 +114,15 @@ async-trait = "0.1.68"
|
|||
|
||||
sd-notify = { version = "0.4.1", optional = true }
|
||||
|
||||
[dependencies.rocksdb]
|
||||
package = "rust-rocksdb"
|
||||
version = "0.22.7"
|
||||
optional = true
|
||||
features = [
|
||||
"multi-threaded-cf",
|
||||
"zstd",
|
||||
]
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
nix = { version = "0.26.2", features = ["resource"] }
|
||||
|
||||
|
@ -112,7 +131,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
|
|||
#backend_sled = ["sled"]
|
||||
backend_persy = ["persy", "parking_lot"]
|
||||
backend_sqlite = ["sqlite"]
|
||||
backend_heed = ["heed", "crossbeam"]
|
||||
#backend_heed = ["heed", "crossbeam"]
|
||||
backend_rocksdb = ["rocksdb"]
|
||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||
|
|
84
DEPLOY.md
84
DEPLOY.md
|
@ -12,11 +12,13 @@ only offer Linux binaries.
|
|||
|
||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
|
||||
|
||||
| CPU Architecture | Download stable version | Download development version |
|
||||
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
|
||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
|
||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
|
||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
|
||||
**Stable versions:**
|
||||
|
||||
| CPU Architecture | Download stable version |
|
||||
| ------------------------------------------- | --------------------------------------------------------------- |
|
||||
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
|
||||
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
|
||||
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
|
||||
|
||||
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
|
||||
|
@ -24,15 +26,19 @@ If you use a system with an older glibc version (e.g. RHEL8), you might need to
|
|||
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
|
||||
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
|
||||
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
|
||||
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
|
||||
[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
|
||||
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
|
||||
|
||||
**Latest versions:**
|
||||
|
||||
| Target | Type | Download |
|
||||
|-|-|-|
|
||||
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
|
||||
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
|
||||
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
|
||||
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
|
||||
|
||||
```bash
|
||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||
|
@ -133,58 +139,12 @@ $ sudo systemctl daemon-reload
|
|||
|
||||
## Creating the Conduit configuration file
|
||||
|
||||
Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment
|
||||
to read it. You need to change at least the server name.**
|
||||
Now we need to create the Conduit's config file in
|
||||
`/etc/matrix-conduit/conduit.toml`. Paste in the contents of
|
||||
[`conduit-example.toml`](./conduit-example.toml) **and take a moment to read it.
|
||||
You need to change at least the server name.**
|
||||
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
|
||||
|
||||
```toml
|
||||
[global]
|
||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
||||
# and room ids. Examples: matrix.org, conduit.rs
|
||||
|
||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
||||
|
||||
# If that's not possible for you, you can create /.well-known files to redirect
|
||||
# requests. See
|
||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||
# and
|
||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||
# for more information
|
||||
|
||||
# YOU NEED TO EDIT THIS
|
||||
#server_name = "your.server.name"
|
||||
|
||||
# This is the only directory where Conduit will save its data
|
||||
database_path = "/var/lib/matrix-conduit/"
|
||||
database_backend = "rocksdb"
|
||||
|
||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||
# Docker users: Don't change this, you'll need to map an external port to this.
|
||||
port = 6167
|
||||
|
||||
# Max size for uploads
|
||||
max_request_size = 20_000_000 # in bytes
|
||||
|
||||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
allow_federation = true
|
||||
allow_check_for_updates = true
|
||||
|
||||
# Server to get public keys from. You probably shouldn't change this.
|
||||
# Defaults to matrix.org if not provided.
|
||||
#trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||
```
|
||||
|
||||
## Setting the correct file permissions
|
||||
|
||||
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
|
||||
|
@ -274,7 +234,7 @@ server {
|
|||
client_max_body_size 20M;
|
||||
|
||||
location /_matrix/ {
|
||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||
proxy_pass http://127.0.0.1:6167;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout 5m;
|
||||
|
|
132
Dockerfile
132
Dockerfile
|
@ -1,132 +0,0 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM docker.io/rust:1.70-bullseye AS base
|
||||
|
||||
FROM base AS builder
|
||||
WORKDIR /usr/src/conduit
|
||||
|
||||
# Install required packages to build Conduit and it's dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
||||
|
||||
# == Build dependencies without our own code separately for caching ==
|
||||
#
|
||||
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
||||
#
|
||||
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
||||
# request that would allow just dependencies to be compiled, presumably
|
||||
# regardless of whether source files are available.
|
||||
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
RUN cargo build --release && rm -r src
|
||||
|
||||
# Copy over actual Conduit sources
|
||||
COPY src src
|
||||
|
||||
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
||||
# otherwise the build with the fake main.rs from above is newer than the
|
||||
# source files (COPY preserves timestamps).
|
||||
#
|
||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||
FROM scratch AS builder-result
|
||||
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
|
||||
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM base AS build-cargo-deb
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
dpkg \
|
||||
dpkg-dev \
|
||||
liblzma-dev
|
||||
|
||||
RUN cargo install cargo-deb
|
||||
# => binary is in /usr/local/cargo/bin/cargo-deb
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Package conduit build-result into a .deb package:
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM builder AS packager
|
||||
WORKDIR /usr/src/conduit
|
||||
|
||||
COPY ./LICENSE ./LICENSE
|
||||
COPY ./README.md ./README.md
|
||||
COPY debian ./debian
|
||||
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
|
||||
|
||||
# --no-build makes cargo-deb reuse already compiled project
|
||||
RUN cargo deb --no-build
|
||||
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
|
||||
|
||||
|
||||
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||
FROM scratch AS packager-result
|
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
# Stuff below this line actually ends up in the resulting docker image
|
||||
# ---------------------------------------------------------------------------------------------------------------
|
||||
FROM docker.io/debian:bullseye-slim AS runner
|
||||
|
||||
# Standard port on which Conduit launches.
|
||||
# You still need to map the port when using the docker command or docker-compose.
|
||||
EXPOSE 6167
|
||||
|
||||
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
||||
|
||||
ENV CONDUIT_PORT=6167 \
|
||||
CONDUIT_ADDRESS="0.0.0.0" \
|
||||
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
||||
CONDUIT_CONFIG=''
|
||||
# └─> Set no config file to do all configuration with env vars
|
||||
|
||||
# Conduit needs:
|
||||
# dpkg: to install conduit.deb
|
||||
# ca-certificates: for https
|
||||
# iproute2 & wget: for the healthcheck script
|
||||
RUN apt-get update && apt-get -y --no-install-recommends install \
|
||||
dpkg \
|
||||
ca-certificates \
|
||||
iproute2 \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||
|
||||
# Install conduit.deb:
|
||||
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
|
||||
RUN dpkg -i /srv/conduit/*.deb
|
||||
|
||||
# Improve security: Don't run stuff as root, that does not need to run as root
|
||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
||||
ARG USER_ID=1000
|
||||
ARG GROUP_ID=1000
|
||||
RUN set -x ; \
|
||||
groupadd -r -g ${GROUP_ID} conduit ; \
|
||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
||||
|
||||
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||
chmod +x /srv/conduit/healthcheck.sh && \
|
||||
mkdir -p ${DEFAULT_DB_PATH} && \
|
||||
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
||||
|
||||
# Change user to conduit, no root permissions afterwards:
|
||||
USER conduit
|
||||
# Set container home directory
|
||||
WORKDIR /srv/conduit
|
||||
|
||||
# Run Conduit and print backtraces on panics
|
||||
ENV RUST_BACKTRACE=1
|
||||
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]
|
28
README.md
28
README.md
|
@ -31,8 +31,6 @@ There are still a few important features missing:
|
|||
- E2EE emoji comparison over federation (E2EE chat works)
|
||||
- Outgoing read receipts, typing, presence over federation (incoming works)
|
||||
|
||||
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||
|
||||
#### How can I deploy my own?
|
||||
|
||||
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
||||
|
@ -44,13 +42,21 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md]
|
|||
|
||||
#### How can I contribute?
|
||||
|
||||
1. Look for an issue you would like to work on and make sure it's not assigned
|
||||
to other users
|
||||
2. Ask someone to assign the issue to you (comment on the issue or chat in
|
||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org))
|
||||
3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :)
|
||||
1. Look for an issue you would like to work on and make sure no one else is currently working on it.
|
||||
2. Tell us that you are working on the issue (comment on the issue or chat in
|
||||
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions.
|
||||
3. Fork the repo, create a new branch and push commits.
|
||||
4. Submit a MR
|
||||
|
||||
#### Contact
|
||||
|
||||
If you have any questions, feel free to
|
||||
- Ask in `#conduit:fachschaften.org` on Matrix
|
||||
- Write an E-Mail to `conduit@koesters.xyz`
|
||||
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
|
||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||
|
||||
|
||||
#### Thanks to
|
||||
|
||||
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||
|
@ -60,14 +66,6 @@ Thanks to the contributors to Conduit and all libraries we use, for example:
|
|||
- Ruma: A clean library for the Matrix Spec in Rust
|
||||
- axum: A modular web framework
|
||||
|
||||
#### Contact
|
||||
|
||||
If you run into any question, feel free to
|
||||
- Ask us in `#conduit:fachschaften.org` on Matrix
|
||||
- Write an E-Mail to `conduit@koesters.xyz`
|
||||
- Send an direct message to `timokoesters@fachschaften.org` on Matrix
|
||||
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||
|
||||
#### Donate
|
||||
|
||||
Liberapay: <https://liberapay.com/timokoesters/>\
|
||||
|
|
37
bin/complement
Executable file
37
bin/complement
Executable file
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Path to Complement's source code
|
||||
COMPLEMENT_SRC="$1"
|
||||
|
||||
# A `.jsonl` file to write test logs to
|
||||
LOG_FILE="$2"
|
||||
|
||||
# A `.jsonl` file to write test results to
|
||||
RESULTS_FILE="$3"
|
||||
|
||||
OCI_IMAGE="complement-conduit:dev"
|
||||
|
||||
env \
|
||||
-C "$(git rev-parse --show-toplevel)" \
|
||||
docker build \
|
||||
--tag "$OCI_IMAGE" \
|
||||
--file complement/Dockerfile \
|
||||
.
|
||||
|
||||
# It's okay (likely, even) that `go test` exits nonzero
|
||||
set +o pipefail
|
||||
env \
|
||||
-C "$COMPLEMENT_SRC" \
|
||||
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
|
||||
go test -json ./tests | tee "$LOG_FILE"
|
||||
set -o pipefail
|
||||
|
||||
# Post-process the results into an easy-to-compare format
|
||||
cat "$LOG_FILE" | jq -c '
|
||||
select(
|
||||
(.Action == "pass" or .Action == "fail" or .Action == "skip")
|
||||
and .Test != null
|
||||
) | {Action: .Action, Test: .Test}
|
||||
' | sort > "$RESULTS_FILE"
|
26
bin/nix-build-and-cache
Executable file
26
bin/nix-build-and-cache
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# The first argument must be the desired installable
|
||||
INSTALLABLE="$1"
|
||||
|
||||
# Build the installable and forward any other arguments too
|
||||
nix build "$@"
|
||||
|
||||
if [ ! -z ${ATTIC_TOKEN+x} ]; then
|
||||
nix run --inputs-from . attic -- \
|
||||
login \
|
||||
conduit \
|
||||
"${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \
|
||||
"$ATTIC_TOKEN"
|
||||
|
||||
# Push the target installable and its build dependencies
|
||||
nix run --inputs-from . attic -- \
|
||||
push \
|
||||
conduit \
|
||||
"$(nix path-info "$INSTALLABLE" --derivation)" \
|
||||
"$(nix path-info "$INSTALLABLE")"
|
||||
else
|
||||
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
|
||||
fi
|
|
@ -1,26 +1,30 @@
|
|||
# For use in our CI only. This requires a build artifact created by a previous run pipline stage to be placed in cached_target/release/conduit
|
||||
FROM registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:commit-16a08e9b as builder
|
||||
#FROM rust:latest as builder
|
||||
FROM rust:1.75.0
|
||||
|
||||
WORKDIR /workdir
|
||||
|
||||
ARG RUSTC_WRAPPER
|
||||
ARG AWS_ACCESS_KEY_ID
|
||||
ARG AWS_SECRET_ACCESS_KEY
|
||||
ARG SCCACHE_BUCKET
|
||||
ARG SCCACHE_ENDPOINT
|
||||
ARG SCCACHE_S3_USE_SSL
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libclang-dev
|
||||
|
||||
COPY . .
|
||||
RUN mkdir -p target/release
|
||||
RUN test -e cached_target/release/conduit && cp cached_target/release/conduit target/release/conduit || cargo build --release
|
||||
|
||||
## Actual image
|
||||
FROM debian:bullseye
|
||||
WORKDIR /workdir
|
||||
COPY Cargo.toml Cargo.toml
|
||||
COPY Cargo.lock Cargo.lock
|
||||
COPY src src
|
||||
RUN cargo build --release \
|
||||
&& mv target/release/conduit conduit \
|
||||
&& rm -rf target
|
||||
|
||||
# Install caddy
|
||||
RUN apt-get update && apt-get install -y debian-keyring debian-archive-keyring apt-transport-https curl && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' | tee /etc/apt/sources.list.d/caddy-testing.list && apt-get update && apt-get install -y caddy
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
debian-keyring \
|
||||
debian-archive-keyring \
|
||||
apt-transport-https \
|
||||
curl \
|
||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
|
||||
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
|
||||
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
|
||||
| tee /etc/apt/sources.list.d/caddy-testing.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y caddy
|
||||
|
||||
COPY conduit-example.toml conduit.toml
|
||||
COPY complement/caddy.json caddy.json
|
||||
|
@ -29,16 +33,9 @@ ENV SERVER_NAME=localhost
|
|||
ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
||||
|
||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
||||
RUN echo "allow_federation = true" >> conduit.toml
|
||||
RUN echo "allow_check_for_updates = true" >> conduit.toml
|
||||
RUN echo "allow_encryption = true" >> conduit.toml
|
||||
RUN echo "allow_registration = true" >> conduit.toml
|
||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
||||
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
|
||||
|
||||
COPY --from=builder /workdir/target/release/conduit /workdir/conduit
|
||||
RUN chmod +x /workdir/conduit
|
||||
|
||||
EXPOSE 8008 8448
|
||||
|
||||
CMD uname -a && \
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
# Running Conduit on Complement
|
||||
# Complement
|
||||
|
||||
This assumes that you're familiar with complement, if not, please readme
|
||||
[their readme](https://github.com/matrix-org/complement#running).
|
||||
## What's that?
|
||||
|
||||
Complement works with "base images", this directory (and Dockerfile) helps build the conduit complement-ready docker
|
||||
image.
|
||||
Have a look at [its repository](https://github.com/matrix-org/complement).
|
||||
|
||||
To build, `cd` to the base directory of the workspace, and run this:
|
||||
## How do I use it with Conduit?
|
||||
|
||||
`docker build -t complement-conduit:dev -f complement/Dockerfile .`
|
||||
|
||||
Then use `complement-conduit:dev` as a base image for running complement tests.
|
||||
The script at [`../bin/complement`](../bin/complement) has automation for this.
|
||||
It takes a few command line arguments, you can read the script to find out what
|
||||
those are.
|
||||
|
|
|
@ -52,7 +52,11 @@ enable_lightning_bolt = true
|
|||
# trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
# Controls the log verbosity. See also [here][0].
|
||||
#
|
||||
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||
#log = "..."
|
||||
|
||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||
|
|
1
debian/postinst
vendored
1
debian/postinst
vendored
|
@ -78,7 +78,6 @@ allow_check_for_updates = true
|
|||
trusted_servers = ["matrix.org"]
|
||||
|
||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||
EOF
|
||||
fi
|
||||
;;
|
||||
|
|
10
default.nix
Normal file
10
default.nix
Normal file
|
@ -0,0 +1,10 @@
|
|||
(import
|
||||
(
|
||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
||||
fetchTarball {
|
||||
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||
}
|
||||
)
|
||||
{ src = ./.; }
|
||||
).defaultNix
|
|
@ -64,7 +64,6 @@ docker run -d -p 8448:6167 \
|
|||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
||||
--name conduit <link>
|
||||
```
|
||||
|
||||
|
@ -76,7 +75,7 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible
|
|||
|
||||
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
|
||||
|
||||
## Docker-compose
|
||||
### Docker-compose
|
||||
|
||||
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
|
||||
|
||||
|
@ -161,3 +160,58 @@ So...step by step:
|
|||
|
||||
6. Run `docker-compose up -d`
|
||||
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
|
||||
|
||||
|
||||
|
||||
|
||||
## Voice communication
|
||||
|
||||
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
|
||||
|
||||
### Configuration
|
||||
|
||||
Create a configuration file called `coturn.conf` containing:
|
||||
|
||||
```conf
|
||||
use-auth-secret
|
||||
static-auth-secret=<a secret key>
|
||||
realm=<your server domain>
|
||||
```
|
||||
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
|
||||
|
||||
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
|
||||
```
|
||||
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
|
||||
turn_secret = "<secret key from coturn configuration>"
|
||||
```
|
||||
or append the following to the docker environment variables dependig on which configuration method you used earlier:
|
||||
```yml
|
||||
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
|
||||
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
|
||||
```
|
||||
Restart Conduit to apply these changes.
|
||||
|
||||
### Run
|
||||
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
|
||||
```bash
|
||||
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
|
||||
```
|
||||
|
||||
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
|
||||
and run `docker-compose up -d` in the same directory.
|
||||
|
||||
```yml
|
||||
version: 3
|
||||
services:
|
||||
turn:
|
||||
container_name: coturn-server
|
||||
image: docker.io/coturn/coturn
|
||||
restart: unless-stopped
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./coturn.conf:/etc/coturn/turnserver.conf
|
||||
```
|
||||
|
||||
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
|
||||
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ services:
|
|||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||
CONDUIT_ADDRESS: 0.0.0.0
|
||||
CONDUIT_CONFIG: '' # Ignore this
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ services:
|
|||
# CONDUIT_PORT: 6167
|
||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
||||
# CONDUIT_ALLOW_FEDERATION: 'true'
|
||||
|
@ -95,4 +94,4 @@ volumes:
|
|||
acme:
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
proxy:
|
||||
|
|
|
@ -32,7 +32,6 @@ services:
|
|||
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||
CONDUIT_ADDRESS: 0.0.0.0
|
||||
CONDUIT_CONFIG: '' # Ignore this
|
||||
#
|
||||
|
|
64
engage.toml
Normal file
64
engage.toml
Normal file
|
@ -0,0 +1,64 @@
|
|||
interpreter = ["bash", "-euo", "pipefail", "-c"]
|
||||
|
||||
[[task]]
|
||||
name = "engage"
|
||||
group = "versions"
|
||||
script = "engage --version"
|
||||
|
||||
[[task]]
|
||||
name = "rustc"
|
||||
group = "versions"
|
||||
script = "rustc --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo"
|
||||
group = "versions"
|
||||
script = "cargo --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
group = "versions"
|
||||
script = "cargo fmt --version"
|
||||
|
||||
[[task]]
|
||||
name = "rustdoc"
|
||||
group = "versions"
|
||||
script = "rustdoc --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-clippy"
|
||||
group = "versions"
|
||||
script = "cargo clippy -- --version"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-fmt"
|
||||
group = "lints"
|
||||
script = "cargo fmt --check -- --color=always"
|
||||
|
||||
[[task]]
|
||||
name = "cargo-doc"
|
||||
group = "lints"
|
||||
script = """
|
||||
RUSTDOCFLAGS="-D warnings" cargo doc \
|
||||
--workspace \
|
||||
--no-deps \
|
||||
--document-private-items \
|
||||
--color always
|
||||
"""
|
||||
|
||||
[[task]]
|
||||
name = "cargo-clippy"
|
||||
group = "lints"
|
||||
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
|
||||
|
||||
[[task]]
|
||||
name = "cargo"
|
||||
group = "tests"
|
||||
script = """
|
||||
cargo test \
|
||||
--workspace \
|
||||
--all-targets \
|
||||
--color=always \
|
||||
-- \
|
||||
--color=always
|
||||
"""
|
206
flake.lock
generated
206
flake.lock
generated
|
@ -1,22 +1,41 @@
|
|||
{
|
||||
"nodes": {
|
||||
"crane": {
|
||||
"attic": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils": [
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-overlay": "rust-overlay"
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1688772518,
|
||||
"narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=",
|
||||
"lastModified": 1707922053,
|
||||
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
|
||||
"owner": "zhaofengli",
|
||||
"repo": "attic",
|
||||
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "zhaofengli",
|
||||
"ref": "main",
|
||||
"repo": "attic",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"attic",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1702918879,
|
||||
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e",
|
||||
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -25,6 +44,27 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1707685877,
|
||||
"narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
|
@ -33,11 +73,11 @@
|
|||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1689488573,
|
||||
"narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=",
|
||||
"lastModified": 1709619709,
|
||||
"narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "39096fe3f379036ff4a5fa198950b8e79defe939",
|
||||
"rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -62,16 +102,29 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"flake-compat_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1689068808,
|
||||
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
||||
"lastModified": 1696426674,
|
||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -80,13 +133,78 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709126324,
|
||||
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1705332318,
|
||||
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1689444953,
|
||||
"narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=",
|
||||
"lastModified": 1702539185,
|
||||
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8acef304efe70152463a6399f73e636bcc363813",
|
||||
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1702780907,
|
||||
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1709479366,
|
||||
"narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "b8697e57f10292a6165a20f03d2f42920dfaf973",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -98,20 +216,23 @@
|
|||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"attic": "attic",
|
||||
"crane": "crane_2",
|
||||
"fenix": "fenix",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
"flake-compat": "flake-compat_2",
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1689441253,
|
||||
"narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=",
|
||||
"lastModified": 1709571018,
|
||||
"narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8",
|
||||
"rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -121,31 +242,6 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"crane",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"crane",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1688351637,
|
||||
"narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "f9b92316727af9e6c7fee4a761242f7f46880329",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
|
|
288
flake.nix
288
flake.nix
|
@ -2,92 +2,280 @@
|
|||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
nix-filter.url = "github:numtide/nix-filter";
|
||||
flake-compat = {
|
||||
url = "github:edolstra/flake-compat";
|
||||
flake = false;
|
||||
};
|
||||
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
crane = {
|
||||
url = "github:ipetkov/crane";
|
||||
# Pin latest crane that's not affected by the following bugs:
|
||||
#
|
||||
# * <https://github.com/ipetkov/crane/issues/527#issuecomment-1978079140>
|
||||
# * <https://github.com/toml-rs/toml/issues/691>
|
||||
# * <https://github.com/toml-rs/toml/issues/267>
|
||||
url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.flake-utils.follows = "flake-utils";
|
||||
};
|
||||
attic.url = "github:zhaofengli/attic?ref=main";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self
|
||||
, nixpkgs
|
||||
, flake-utils
|
||||
, nix-filter
|
||||
|
||||
, fenix
|
||||
, crane
|
||||
, ...
|
||||
}: flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Use mold on Linux
|
||||
stdenv = if pkgs.stdenv.isLinux then
|
||||
pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv
|
||||
else
|
||||
pkgs.stdenv;
|
||||
pkgsHost = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Nix-accessible `Cargo.toml`
|
||||
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
|
||||
# The Rust toolchain to use
|
||||
toolchain = fenix.packages.${system}.toolchainOf {
|
||||
# Use the Rust version defined in `Cargo.toml`
|
||||
channel = cargoToml.package.rust-version;
|
||||
toolchain = fenix.packages.${system}.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
|
||||
# THE rust-version HASH
|
||||
sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc=";
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
|
||||
};
|
||||
|
||||
# The system's RocksDB
|
||||
ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include";
|
||||
ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib";
|
||||
builder = pkgs:
|
||||
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
|
||||
|
||||
# Shared between the package and the devShell
|
||||
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||
bindgenHook
|
||||
]);
|
||||
nativeBuildInputs = pkgs: [
|
||||
# bindgen needs the build platform's libclang. Apparently due to
|
||||
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
|
||||
# quite do the right thing here.
|
||||
pkgs.pkgsBuildHost.rustPlatform.bindgenHook
|
||||
];
|
||||
|
||||
builder =
|
||||
((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage;
|
||||
rocksdb' = pkgs:
|
||||
let
|
||||
version = "8.11.3";
|
||||
in
|
||||
pkgs.rocksdb.overrideAttrs (old: {
|
||||
inherit version;
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "facebook";
|
||||
repo = "rocksdb";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-OpEiMwGxZuxb9o3RQuSrwZMQGLhe9xLT1aa3HpI4KPs=";
|
||||
};
|
||||
});
|
||||
|
||||
env = pkgs: {
|
||||
ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include";
|
||||
ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib";
|
||||
}
|
||||
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
|
||||
ROCKSDB_STATIC = "";
|
||||
}
|
||||
// {
|
||||
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
|
||||
lib.concatStringsSep " " ([]
|
||||
++ lib.optionals
|
||||
# This disables PIE for static builds, which isn't great in terms
|
||||
# of security. Unfortunately, my hand is forced because nixpkgs'
|
||||
# `libstdc++.a` is built without `-fPIE`, which precludes us from
|
||||
# leaving PIE enabled.
|
||||
stdenv.hostPlatform.isStatic
|
||||
["-C" "relocation-model=static"]
|
||||
++ lib.optionals
|
||||
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
|
||||
["-l" "c"]
|
||||
++ lib.optionals
|
||||
# This check has to match the one [here][0]. We only need to set
|
||||
# these flags when using a different linker. Don't ask me why,
|
||||
# though, because I don't know. All I know is it breaks otherwise.
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
|
||||
(
|
||||
# Nixpkgs doesn't check for x86_64 here but we do, because I
|
||||
# observed a failure building statically for x86_64 without
|
||||
# including it here. Linkers are weird.
|
||||
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
|
||||
&& stdenv.hostPlatform.isStatic
|
||||
&& !stdenv.isDarwin
|
||||
&& !stdenv.cc.bintools.isLLVM
|
||||
)
|
||||
[
|
||||
"-l"
|
||||
"stdc++"
|
||||
"-L"
|
||||
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
# What follows is stolen from [here][0]. Its purpose is to properly
|
||||
# configure compilers and linkers for various stages of the build, and
|
||||
# even covers the case of build scripts that need native code compiled and
|
||||
# run on the build platform (I think).
|
||||
#
|
||||
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.rust.lib) envVars;
|
||||
in
|
||||
pkgs.lib.optionalAttrs
|
||||
(pkgs.stdenv.targetPlatform.rust.rustcTarget
|
||||
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
|
||||
(
|
||||
let
|
||||
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
|
||||
envVars.linkerForTarget;
|
||||
}
|
||||
)
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
|
||||
CARGO_BUILD_TARGET = rustcTarget;
|
||||
}
|
||||
)
|
||||
// (
|
||||
let
|
||||
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
|
||||
in
|
||||
{
|
||||
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
|
||||
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
|
||||
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
|
||||
HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc";
|
||||
HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++";
|
||||
}
|
||||
));
|
||||
|
||||
package = pkgs: builder pkgs {
|
||||
src = nix-filter {
|
||||
root = ./.;
|
||||
include = [
|
||||
"src"
|
||||
"Cargo.toml"
|
||||
"Cargo.lock"
|
||||
];
|
||||
};
|
||||
|
||||
# This is redundant with CI
|
||||
doCheck = false;
|
||||
|
||||
env = env pkgs;
|
||||
nativeBuildInputs = nativeBuildInputs pkgs;
|
||||
|
||||
meta.mainProgram = cargoToml.package.name;
|
||||
};
|
||||
|
||||
mkOciImage = pkgs: package:
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = package.pname;
|
||||
tag = "next";
|
||||
copyToRoot = [
|
||||
pkgs.dockerTools.caCertificates
|
||||
];
|
||||
config = {
|
||||
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
|
||||
# are handled as expected
|
||||
Entrypoint = [
|
||||
"${pkgs.lib.getExe' pkgs.tini "tini"}"
|
||||
"--"
|
||||
];
|
||||
Cmd = [
|
||||
"${pkgs.lib.getExe package}"
|
||||
];
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
packages.default = builder {
|
||||
src = ./.;
|
||||
packages = {
|
||||
default = package pkgsHost;
|
||||
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
|
||||
}
|
||||
//
|
||||
builtins.listToAttrs
|
||||
(builtins.concatLists
|
||||
(builtins.map
|
||||
(crossSystem:
|
||||
let
|
||||
binaryName = "static-${crossSystem}";
|
||||
pkgsCrossStatic =
|
||||
(import nixpkgs {
|
||||
inherit system;
|
||||
crossSystem = {
|
||||
config = crossSystem;
|
||||
};
|
||||
}).pkgsStatic;
|
||||
in
|
||||
[
|
||||
# An output for a statically-linked binary
|
||||
{
|
||||
name = binaryName;
|
||||
value = package pkgsCrossStatic;
|
||||
}
|
||||
|
||||
inherit
|
||||
stdenv
|
||||
nativeBuildInputs
|
||||
ROCKSDB_INCLUDE_DIR
|
||||
ROCKSDB_LIB_DIR;
|
||||
};
|
||||
# An output for an OCI image based on that binary
|
||||
{
|
||||
name = "oci-image-${crossSystem}";
|
||||
value = mkOciImage
|
||||
pkgsCrossStatic
|
||||
self.packages.${system}.${binaryName};
|
||||
}
|
||||
]
|
||||
)
|
||||
[
|
||||
"x86_64-unknown-linux-musl"
|
||||
"aarch64-unknown-linux-musl"
|
||||
]
|
||||
)
|
||||
);
|
||||
|
||||
devShells.default = (pkgs.mkShell.override { inherit stdenv; }) {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so
|
||||
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
|
||||
|
||||
inherit
|
||||
ROCKSDB_INCLUDE_DIR
|
||||
ROCKSDB_LIB_DIR;
|
||||
devShells.default = pkgsHost.mkShell {
|
||||
env = env pkgsHost // {
|
||||
# Rust Analyzer needs to be able to find the path to default crate
|
||||
# sources, and it can read this environment variable to do so. The
|
||||
# `rust-src` component is required in order for this to work.
|
||||
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
|
||||
};
|
||||
|
||||
# Development tools
|
||||
nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [
|
||||
cargo
|
||||
clippy
|
||||
rust-src
|
||||
rustc
|
||||
rustfmt
|
||||
]);
|
||||
};
|
||||
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
|
||||
# Always use nightly rustfmt because most of its options are unstable
|
||||
#
|
||||
# This needs to come before `toolchain` in this list, otherwise
|
||||
# `$PATH` will have stable rustfmt instead.
|
||||
fenix.packages.${system}.latest.rustfmt
|
||||
|
||||
checks = {
|
||||
packagesDefault = self.packages.${system}.default;
|
||||
devShellsDefault = self.devShells.${system}.default;
|
||||
toolchain
|
||||
] ++ (with pkgsHost; [
|
||||
engage
|
||||
|
||||
# Needed for producing Debian packages
|
||||
cargo-deb
|
||||
|
||||
# Needed for Complement
|
||||
go
|
||||
olm
|
||||
|
||||
# Needed for our script for Complement
|
||||
jq
|
||||
]);
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
22
rust-toolchain.toml
Normal file
22
rust-toolchain.toml
Normal file
|
@ -0,0 +1,22 @@
|
|||
# This is the authoritiative configuration of this project's Rust toolchain.
|
||||
#
|
||||
# Other files that need upkeep when this changes:
|
||||
#
|
||||
# * `.gitlab-ci.yml`
|
||||
# * `Cargo.toml`
|
||||
# * `flake.nix`
|
||||
#
|
||||
# Search in those files for `rust-toolchain.toml` to find the relevant places.
|
||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||
|
||||
[toolchain]
|
||||
channel = "1.75.0"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
"rust-src",
|
||||
]
|
||||
targets = [
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-unknown-linux-musl",
|
||||
"aarch64-unknown-linux-musl",
|
||||
]
|
|
@ -239,13 +239,22 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
|
|||
|
||||
// If this is the first real user, grant them admin privileges
|
||||
// Note: the server user, @conduit:servername, is generated first
|
||||
if services().users.count()? == 2 {
|
||||
services()
|
||||
.admin
|
||||
.make_user_admin(&user_id, displayname)
|
||||
.await?;
|
||||
if !is_guest {
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&admin_room)?
|
||||
== Some(1)
|
||||
{
|
||||
services()
|
||||
.admin
|
||||
.make_user_admin(&user_id, displayname)
|
||||
.await?;
|
||||
|
||||
warn!("Granting {} admin privileges as the first user", user_id);
|
||||
warn!("Granting {} admin privileges as the first user", user_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(register::v3::Response {
|
||||
|
|
|
@ -17,7 +17,11 @@ use ruma::{
|
|||
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||
};
|
||||
use serde_json::json;
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::{
|
||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::debug;
|
||||
|
||||
/// # `POST /_matrix/client/r0/keys/upload`
|
||||
///
|
||||
|
@ -335,31 +339,70 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
|
||||
let mut failures = BTreeMap::new();
|
||||
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_query_ratelimiter
|
||||
.write()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||
.into_iter()
|
||||
.map(|(server, vec)| async move {
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_query_ratelimiter
|
||||
.read()
|
||||
.await
|
||||
.get(server)
|
||||
{
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
||||
}
|
||||
|
||||
if time.elapsed() < min_elapsed_duration {
|
||||
debug!("Backing off query from {:?}", server);
|
||||
return (
|
||||
server,
|
||||
Err(Error::BadServerResponse("bad query, still backing off")),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut device_keys_input_fed = BTreeMap::new();
|
||||
for (user_id, keys) in vec {
|
||||
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
|
||||
}
|
||||
(
|
||||
server,
|
||||
services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
tokio::time::timeout(
|
||||
Duration::from_secs(25),
|
||||
services().sending.send_federation_request(
|
||||
server,
|
||||
federation::keys::get_keys::v1::Request {
|
||||
device_keys: device_keys_input_fed,
|
||||
},
|
||||
)
|
||||
.await,
|
||||
),
|
||||
)
|
||||
.await
|
||||
.map_err(|_e| Error::BadServerResponse("Query took too long")),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
while let Some((server, response)) = futures.next().await {
|
||||
match response {
|
||||
Ok(response) => {
|
||||
Ok(Ok(response)) => {
|
||||
for (user, masterkey) in response.master_keys {
|
||||
let (master_key_id, mut master_key) =
|
||||
services().users.parse_master_key(&user, &masterkey)?;
|
||||
|
@ -386,7 +429,9 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
self_signing_keys.extend(response.self_signing_keys);
|
||||
device_keys.extend(response.device_keys);
|
||||
}
|
||||
Err(_e) => {
|
||||
_ => {
|
||||
back_off(server.to_owned()).await;
|
||||
|
||||
failures.insert(server.to_string(), json!({}));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ pub async fn create_content_route(
|
|||
.await?;
|
||||
|
||||
Ok(create_content::v3::Response {
|
||||
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
||||
content_uri: mxc.into(),
|
||||
blurhash: None,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -26,9 +26,10 @@ use ruma::{
|
|||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::{
|
||||
|
@ -64,7 +65,12 @@ pub async fn join_room_by_id_route(
|
|||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(body.room_id.server_name().to_owned());
|
||||
servers.push(
|
||||
body.room_id
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name")
|
||||
.into(),
|
||||
);
|
||||
|
||||
join_room_by_id_helper(
|
||||
body.sender_user.as_deref(),
|
||||
|
@ -105,7 +111,12 @@ pub async fn join_room_by_id_or_alias_route(
|
|||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
servers.push(room_id.server_name().to_owned());
|
||||
servers.push(
|
||||
room_id
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name")
|
||||
.into(),
|
||||
);
|
||||
|
||||
(servers, room_id)
|
||||
}
|
||||
|
@ -202,24 +213,28 @@ pub async fn kick_user_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -266,24 +281,28 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -324,24 +343,28 @@ pub async fn unban_user_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(body.user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -400,7 +423,7 @@ pub async fn get_member_events_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -435,7 +458,7 @@ pub async fn joined_members_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -479,7 +502,7 @@ async fn join_room_by_id_helper(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -619,7 +642,7 @@ async fn join_room_by_id_helper(
|
|||
));
|
||||
}
|
||||
|
||||
if let Ok(signature) = signed_value["signatures"]
|
||||
match signed_value["signatures"]
|
||||
.as_object()
|
||||
.ok_or(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
|
@ -630,18 +653,20 @@ async fn join_room_by_id_helper(
|
|||
ErrorKind::InvalidParam,
|
||||
"Server did not send its signature",
|
||||
))
|
||||
})
|
||||
{
|
||||
join_event
|
||||
.get_mut("signatures")
|
||||
.expect("we created a valid pdu")
|
||||
.as_object_mut()
|
||||
.expect("we created a valid pdu")
|
||||
.insert(remote_server.to_string(), signature.clone());
|
||||
} else {
|
||||
warn!(
|
||||
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}",
|
||||
);
|
||||
}) {
|
||||
Ok(signature) => {
|
||||
join_event
|
||||
.get_mut("signatures")
|
||||
.expect("we created a valid pdu")
|
||||
.as_object_mut()
|
||||
.expect("we created a valid pdu")
|
||||
.insert(remote_server.to_string(), signature.clone());
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -668,7 +693,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -698,7 +723,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -710,7 +735,7 @@ async fn join_room_by_id_helper(
|
|||
}
|
||||
|
||||
info!("Running send_join auth check");
|
||||
if !state_res::event_auth::auth_check(
|
||||
let authenticated = state_res::event_auth::auth_check(
|
||||
&state_res::RoomVersion::new(&room_version_id).expect("room version is supported"),
|
||||
&parsed_join_pdu,
|
||||
None::<PduEvent>, // TODO: third party invite
|
||||
|
@ -733,7 +758,9 @@ async fn join_room_by_id_helper(
|
|||
.map_err(|e| {
|
||||
warn!("Auth check failed: {e}");
|
||||
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
||||
})? {
|
||||
})?;
|
||||
|
||||
if !authenticated {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Auth check failed",
|
||||
|
@ -770,12 +797,16 @@ async fn join_room_by_id_helper(
|
|||
let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?;
|
||||
|
||||
info!("Appending new room join event");
|
||||
services().rooms.timeline.append_pdu(
|
||||
&parsed_join_pdu,
|
||||
join_event,
|
||||
vec![(*parsed_join_pdu.event_id).to_owned()],
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_pdu(
|
||||
&parsed_join_pdu,
|
||||
join_event,
|
||||
vec![(*parsed_join_pdu.event_id).to_owned()],
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Setting final room state for new room");
|
||||
// We set the room state after inserting the pdu, so that we never have a moment in time
|
||||
|
@ -888,18 +919,23 @@ async fn join_room_by_id_helper(
|
|||
};
|
||||
|
||||
// Try normal join first
|
||||
let error = match services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
) {
|
||||
let error = match services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())),
|
||||
Err(e) => e,
|
||||
};
|
||||
|
@ -1095,7 +1131,7 @@ async fn make_join_request(
|
|||
make_join_response_and_server
|
||||
}
|
||||
|
||||
fn validate_and_add_event_id(
|
||||
async fn validate_and_add_event_id(
|
||||
pdu: &RawJsonValue,
|
||||
room_version: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
|
@ -1111,24 +1147,26 @@ fn validate_and_add_event_id(
|
|||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
let back_off = |id| match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry(id)
|
||||
{
|
||||
Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
};
|
||||
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&event_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1143,15 +1181,10 @@ fn validate_and_add_event_id(
|
|||
}
|
||||
}
|
||||
|
||||
if let Err(e) = ruma::signatures::verify_event(
|
||||
&*pub_key_map
|
||||
.read()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?,
|
||||
&value,
|
||||
room_version,
|
||||
) {
|
||||
if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version)
|
||||
{
|
||||
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
||||
back_off(event_id);
|
||||
back_off(event_id).await;
|
||||
return Err(Error::BadServerResponse("Event failed verification."));
|
||||
}
|
||||
|
||||
|
@ -1177,7 +1210,7 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1298,34 +1331,38 @@ pub(crate) async fn invite_helper<'a>(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Invite,
|
||||
displayname: services().users.displayname(user_id)?,
|
||||
avatar_url: services().users.avatar_url(user_id)?,
|
||||
is_direct: Some(is_direct),
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(user_id)?,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Invite,
|
||||
displayname: services().users.displayname(user_id)?,
|
||||
avatar_url: services().users.avatar_url(user_id)?,
|
||||
is_direct: Some(is_direct),
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(user_id)?,
|
||||
reason,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
@ -1362,7 +1399,7 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
|
|||
pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<String>) -> Result<()> {
|
||||
// Ask a remote server if we don't have this room
|
||||
if !services().rooms.metadata.exists(room_id)?
|
||||
&& room_id.server_name() != services().globals.server_name()
|
||||
&& room_id.server_name() != Some(services().globals.server_name())
|
||||
{
|
||||
if let Err(e) = remote_leave_room(user_id, room_id).await {
|
||||
warn!("Failed to leave room {} remotely: {}", user_id, e);
|
||||
|
@ -1393,7 +1430,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1429,18 +1466,22 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
event.membership = MembershipState::Leave;
|
||||
event.reason = reason;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
user_id,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
user_id,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -32,7 +32,7 @@ pub async fn send_message_event_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -73,19 +73,23 @@ pub async fn send_message_event_route(
|
|||
let mut unsigned = BTreeMap::new();
|
||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: body.event_type.to_string().into(),
|
||||
content: serde_json::from_str(body.body.body.json().get())
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
||||
unsigned: Some(unsigned),
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: body.event_type.to_string().into(),
|
||||
content: serde_json::from_str(body.body.body.json().get())
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
||||
unsigned: Some(unsigned),
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().transaction_ids.add_txnid(
|
||||
sender_user,
|
||||
|
@ -124,14 +128,13 @@ pub async fn get_message_events_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||
sender_user,
|
||||
sender_device,
|
||||
&body.room_id,
|
||||
from,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
||||
.await?;
|
||||
|
||||
let limit = u64::from(body.limit).min(100) as usize;
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
use crate::{services, utils, Result, Ruma};
|
||||
use ruma::api::client::presence::{get_presence, set_presence};
|
||||
use crate::{services, utils, Error, Result, Ruma};
|
||||
use ruma::api::client::{
|
||||
error::ErrorKind,
|
||||
presence::{get_presence, set_presence},
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
|
||||
|
@ -79,6 +82,9 @@ pub async fn get_presence_route(
|
|||
presence: presence.content.presence,
|
||||
})
|
||||
} else {
|
||||
todo!();
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::NotFound,
|
||||
"Presence state for this user was not found",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,18 +77,17 @@ pub async fn set_displayname_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
);
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await;
|
||||
|
||||
// Presence update
|
||||
services().rooms.edus.presence.update_presence(
|
||||
|
@ -212,18 +211,17 @@ pub async fn set_avatar_url_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
);
|
||||
let _ = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await;
|
||||
|
||||
// Presence update
|
||||
services().rooms.edus.presence.update_presence(
|
||||
|
|
|
@ -24,27 +24,32 @@ pub async fn redact_event_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomRedaction,
|
||||
content: to_raw_value(&RoomRedactionEventContent {
|
||||
reason: body.reason.clone(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: Some(body.event_id.into()),
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomRedaction,
|
||||
content: to_raw_value(&RoomRedactionEventContent {
|
||||
redacts: Some(body.event_id.clone()),
|
||||
reason: body.reason.clone(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: Some(body.event_id.into()),
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
|
@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
|
@ -121,7 +121,7 @@ pub async fn get_relating_events_route(
|
|||
let to = body
|
||||
.to
|
||||
.as_ref()
|
||||
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||
.and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
// Use limit or else 10, with maximum 100
|
||||
let limit = body
|
||||
|
|
|
@ -61,7 +61,7 @@ pub async fn create_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -142,8 +142,9 @@ pub async fn create_room_route(
|
|||
content
|
||||
}
|
||||
None => {
|
||||
// TODO: Add correct value for v11
|
||||
let mut content = serde_json::from_str::<CanonicalJsonObject>(
|
||||
to_raw_value(&RoomCreateEventContent::new(sender_user.clone()))
|
||||
to_raw_value(&RoomCreateEventContent::new_v1(sender_user.clone()))
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?
|
||||
.get(),
|
||||
)
|
||||
|
@ -173,42 +174,50 @@ pub async fn create_room_route(
|
|||
}
|
||||
|
||||
// 1. The room create event
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 2. Let the room creator join
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: services().users.displayname(sender_user)?,
|
||||
avatar_url: services().users.avatar_url(sender_user)?,
|
||||
is_direct: Some(body.is_direct),
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(sender_user)?,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: services().users.displayname(sender_user)?,
|
||||
avatar_url: services().users.avatar_url(sender_user)?,
|
||||
is_direct: Some(body.is_direct),
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(sender_user)?,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 3. Power levels
|
||||
|
||||
|
@ -245,30 +254,14 @@ pub async fn create_room_route(
|
|||
}
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&power_levels_content)
|
||||
.expect("to_raw_value always works on serde_json::Value"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
|
||||
// 4. Canonical room alias
|
||||
if let Some(room_alias_id) = &alias {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
alias: Some(room_alias_id.to_owned()),
|
||||
alt_aliases: vec![],
|
||||
})
|
||||
.expect("We checked that alias earlier, it must be fine"),
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&power_levels_content)
|
||||
.expect("to_raw_value always works on serde_json::Value"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
|
@ -276,64 +269,100 @@ pub async fn create_room_route(
|
|||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4. Canonical room alias
|
||||
if let Some(room_alias_id) = &alias {
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
alias: Some(room_alias_id.to_owned()),
|
||||
alt_aliases: vec![],
|
||||
})
|
||||
.expect("We checked that alias earlier, it must be fine"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 5. Events set by preset
|
||||
|
||||
// 5.1 Join Rules
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||
RoomPreset::PublicChat => JoinRule::Public,
|
||||
// according to spec "invite" is the default
|
||||
_ => JoinRule::Invite,
|
||||
}))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||
RoomPreset::PublicChat => JoinRule::Public,
|
||||
// according to spec "invite" is the default
|
||||
_ => JoinRule::Invite,
|
||||
}))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5.2 History Visibility
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
HistoryVisibility::Shared,
|
||||
))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
HistoryVisibility::Shared,
|
||||
))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5.3 Guest Access
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
||||
_ => GuestAccess::CanJoin,
|
||||
}))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||
RoomPreset::PublicChat => GuestAccess::Forbidden,
|
||||
_ => GuestAccess::CanJoin,
|
||||
}))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 6. Events listed in initial_state
|
||||
for event in &body.initial_state {
|
||||
|
@ -352,47 +381,54 @@ pub async fn create_room_route(
|
|||
continue;
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
pdu_builder,
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 7. Events implied by name and topic
|
||||
if let Some(name) = &body.name {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(name.clone()))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(topic) = &body.topic {
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
topic: topic.clone(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
topic: topic.clone(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||
|
@ -522,7 +558,7 @@ pub async fn upgrade_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -530,22 +566,26 @@ pub async fn upgrade_room_route(
|
|||
|
||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||
// Fail if the sender does not have the required permissions
|
||||
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTombstone,
|
||||
content: to_raw_value(&RoomTombstoneEventContent {
|
||||
body: "This room has been replaced".to_owned(),
|
||||
replacement_room: replacement_room.clone(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
let tombstone_event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTombstone,
|
||||
content: to_raw_value(&RoomTombstoneEventContent {
|
||||
body: "This room has been replaced".to_owned(),
|
||||
replacement_room: replacement_room.clone(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Change lock to replacement room
|
||||
drop(state_lock);
|
||||
|
@ -554,7 +594,7 @@ pub async fn upgrade_room_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(replacement_room.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -612,43 +652,51 @@ pub async fn upgrade_room_route(
|
|||
));
|
||||
}
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&create_event_content)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&create_event_content)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Join the new room
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: services().users.displayname(sender_user)?,
|
||||
avatar_url: services().users.avatar_url(sender_user)?,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(sender_user)?,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: services().users.displayname(sender_user)?,
|
||||
avatar_url: services().users.avatar_url(sender_user)?,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: services().users.blurhash(sender_user)?,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(sender_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Recommended transferable state events list from the specs
|
||||
let transferable_state_events = vec to see
|
||||
/// supported login types.
|
||||
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
||||
// To allow deprecated login methods
|
||||
#![allow(deprecated)]
|
||||
// Validate login method
|
||||
// TODO: Other login methods
|
||||
let user_id = match &body.login_info {
|
||||
login::v3::LoginInfo::Password(login::v3::Password {
|
||||
identifier,
|
||||
password,
|
||||
user,
|
||||
address: _,
|
||||
medium: _,
|
||||
}) => {
|
||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||
user_id.to_lowercase()
|
||||
let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(
|
||||
user_id.to_lowercase(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse(user)
|
||||
} else {
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
};
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
|
||||
|
||||
let hash = services()
|
||||
.users
|
||||
.password_hash(&user_id)?
|
||||
|
@ -105,24 +112,28 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
));
|
||||
}
|
||||
}
|
||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { identifier }) => {
|
||||
login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService {
|
||||
identifier,
|
||||
user,
|
||||
}) => {
|
||||
if !body.from_appservice {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Forbidden login type.",
|
||||
));
|
||||
};
|
||||
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||
user_id.to_lowercase()
|
||||
if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(
|
||||
user_id.to_lowercase(),
|
||||
services().globals.server_name(),
|
||||
)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse(user)
|
||||
} else {
|
||||
warn!("Bad login type: {:?}", &body.login_info);
|
||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||
};
|
||||
let user_id =
|
||||
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||
.map_err(|_| {
|
||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||
})?;
|
||||
user_id
|
||||
}
|
||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?
|
||||
}
|
||||
_ => {
|
||||
warn!("Unsupported or unknown login type: {:?}", &body.login_info);
|
||||
|
@ -163,6 +174,8 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
|
|||
|
||||
info!("{} logged in", user_id);
|
||||
|
||||
// Homeservers are still required to send the `home_server` field
|
||||
#[allow(deprecated)]
|
||||
Ok(login::v3::Response {
|
||||
user_id,
|
||||
access_token: token,
|
||||
|
|
|
@ -85,7 +85,7 @@ pub async fn get_state_events_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -118,7 +118,7 @@ pub async fn get_state_events_for_key_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -157,7 +157,7 @@ pub async fn get_state_events_for_empty_key_route(
|
|||
if !services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(&sender_user, &body.room_id)?
|
||||
.user_can_see_state_events(sender_user, &body.room_id)?
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
|
@ -227,24 +227,28 @@ async fn send_state_event_for_key_helper(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: event_type.to_string().into(),
|
||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||
unsigned: None,
|
||||
state_key: Some(state_key),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
let event_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: event_type.to_string().into(),
|
||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||
unsigned: None,
|
||||
state_key: Some(state_key),
|
||||
redacts: None,
|
||||
},
|
||||
sender_user,
|
||||
room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(event_id)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use crate::{
|
||||
service::rooms::timeline::PduCount, services, Error, PduEvent, Result, Ruma, RumaResponse,
|
||||
};
|
||||
|
||||
use ruma::{
|
||||
api::client::{
|
||||
filter::{FilterDefinition, LazyLoadOptions},
|
||||
|
@ -20,7 +21,7 @@ use ruma::{
|
|||
StateEventType, TimelineEventType,
|
||||
},
|
||||
serde::Raw,
|
||||
uint, DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
uint, DeviceId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId,
|
||||
};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
|
@ -75,7 +76,7 @@ pub async fn sync_events_route(
|
|||
.globals
|
||||
.sync_receivers
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((sender_user.clone(), sender_device.clone()))
|
||||
{
|
||||
Entry::Vacant(v) => {
|
||||
|
@ -147,7 +148,7 @@ async fn sync_helper_wrapper(
|
|||
.globals
|
||||
.sync_receivers
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((sender_user, sender_device))
|
||||
{
|
||||
Entry::Occupied(o) => {
|
||||
|
@ -302,11 +303,11 @@ async fn sync_helper(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -434,11 +435,11 @@ async fn sync_helper(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -554,6 +555,7 @@ async fn sync_helper(
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn load_joined_room(
|
||||
sender_user: &UserId,
|
||||
sender_device: &DeviceId,
|
||||
|
@ -576,11 +578,11 @@ async fn load_joined_room(
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
}
|
||||
|
||||
|
@ -590,7 +592,7 @@ async fn load_joined_room(
|
|||
|| services()
|
||||
.rooms
|
||||
.user
|
||||
.last_notification_read(&sender_user, &room_id)?
|
||||
.last_notification_read(sender_user, room_id)?
|
||||
> since;
|
||||
|
||||
let mut timeline_users = HashSet::new();
|
||||
|
@ -598,17 +600,16 @@ async fn load_joined_room(
|
|||
timeline_users.insert(event.sender.as_str().to_owned());
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sincecount,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount)
|
||||
.await?;
|
||||
|
||||
// Database queries:
|
||||
|
||||
let current_shortstatehash =
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||
s
|
||||
} else {
|
||||
error!("Room {} has no state", room_id);
|
||||
|
@ -618,7 +619,7 @@ async fn load_joined_room(
|
|||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(&room_id, since)?;
|
||||
.get_token_shortstatehash(room_id, since)?;
|
||||
|
||||
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
|
||||
if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
||||
|
@ -630,12 +631,12 @@ async fn load_joined_room(
|
|||
let joined_member_count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or(0);
|
||||
let invited_member_count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(&room_id)?
|
||||
.room_invited_count(room_id)?
|
||||
.unwrap_or(0);
|
||||
|
||||
// Recalculate heroes (first 5 members)
|
||||
|
@ -648,7 +649,7 @@ async fn load_joined_room(
|
|||
for hero in services()
|
||||
.rooms
|
||||
.timeline
|
||||
.all_pdus(&sender_user, &room_id)?
|
||||
.all_pdus(sender_user, room_id)?
|
||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
||||
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
|
||||
.map(|(_, pdu)| {
|
||||
|
@ -669,11 +670,11 @@ async fn load_joined_room(
|
|||
) && (services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(&user_id, &room_id)?
|
||||
.is_joined(&user_id, room_id)?
|
||||
|| services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_invited(&user_id, &room_id)?)
|
||||
.is_invited(&user_id, room_id)?)
|
||||
{
|
||||
Ok::<_, Error>(Some(state_key.clone()))
|
||||
} else {
|
||||
|
@ -789,20 +790,24 @@ async fn load_joined_room(
|
|||
|
||||
// Reset lazy loading because this is an initial sync
|
||||
services().rooms.lazy_loading.lazy_load_reset(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
)?;
|
||||
|
||||
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||
// loaded.
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
)
|
||||
.await;
|
||||
|
||||
(
|
||||
heroes,
|
||||
|
@ -866,14 +871,14 @@ async fn load_joined_room(
|
|||
}
|
||||
|
||||
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
&event.sender,
|
||||
)? || lazy_load_send_redundant
|
||||
{
|
||||
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||
&room_id,
|
||||
room_id,
|
||||
&StateEventType::RoomMember,
|
||||
event.sender.as_str(),
|
||||
)? {
|
||||
|
@ -883,13 +888,17 @@ async fn load_joined_room(
|
|||
}
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
&sender_user,
|
||||
&sender_device,
|
||||
&room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
)
|
||||
.await;
|
||||
|
||||
let encrypted_room = services()
|
||||
.rooms
|
||||
|
@ -934,7 +943,7 @@ async fn load_joined_room(
|
|||
match new_membership {
|
||||
MembershipState::Join => {
|
||||
// A new user joined an encrypted room
|
||||
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
|
||||
if !share_encrypted_room(sender_user, &user_id, room_id)? {
|
||||
device_list_updates.insert(user_id);
|
||||
}
|
||||
}
|
||||
|
@ -954,15 +963,15 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.flatten()
|
||||
.filter(|user_id| {
|
||||
// Don't send key updates from the sender to the sender
|
||||
&sender_user != user_id
|
||||
sender_user != user_id
|
||||
})
|
||||
.filter(|user_id| {
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
||||
!share_encrypted_room(sender_user, user_id, room_id)
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
);
|
||||
|
@ -997,7 +1006,7 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(&sender_user, &room_id)?
|
||||
.notification_count(sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
)
|
||||
|
@ -1010,7 +1019,7 @@ async fn load_joined_room(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(&sender_user, &room_id)?
|
||||
.highlight_count(sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("highlight count can't go that high"),
|
||||
)
|
||||
|
@ -1039,15 +1048,15 @@ async fn load_joined_room(
|
|||
.rooms
|
||||
.edus
|
||||
.read_receipt
|
||||
.readreceipts_since(&room_id, since)
|
||||
.readreceipts_since(room_id, since)
|
||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||
.map(|(_, _, v)| v)
|
||||
.collect();
|
||||
|
||||
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
|
||||
if services().rooms.edus.typing.last_typing_update(room_id)? > since {
|
||||
edus.push(
|
||||
serde_json::from_str(
|
||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
|
||||
&serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?)
|
||||
.expect("event is valid, we just created it"),
|
||||
)
|
||||
.expect("event is valid, we just created it"),
|
||||
|
@ -1056,7 +1065,7 @@ async fn load_joined_room(
|
|||
|
||||
// Save the state after this sync so we can send the correct state diff next sync
|
||||
services().rooms.user.associate_token_shortstatehash(
|
||||
&room_id,
|
||||
room_id,
|
||||
next_batch,
|
||||
current_shortstatehash,
|
||||
)?;
|
||||
|
@ -1065,7 +1074,7 @@ async fn load_joined_room(
|
|||
account_data: RoomAccountData {
|
||||
events: services()
|
||||
.account_data
|
||||
.changes_since(Some(&room_id), &sender_user, since)?
|
||||
.changes_since(Some(room_id), sender_user, since)?
|
||||
.into_iter()
|
||||
.filter_map(|(_, v)| {
|
||||
serde_json::from_str(v.json().get())
|
||||
|
@ -1102,7 +1111,7 @@ async fn load_joined_room(
|
|||
fn load_timeline(
|
||||
sender_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
sincecount: PduCount,
|
||||
roomsincecount: PduCount,
|
||||
limit: u64,
|
||||
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
|
||||
let timeline_pdus;
|
||||
|
@ -1110,13 +1119,13 @@ fn load_timeline(
|
|||
if services()
|
||||
.rooms
|
||||
.timeline
|
||||
.last_timeline_count(&sender_user, &room_id)?
|
||||
> sincecount
|
||||
.last_timeline_count(sender_user, room_id)?
|
||||
> roomsincecount
|
||||
{
|
||||
let mut non_timeline_pdus = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_until(&sender_user, &room_id, PduCount::max())?
|
||||
.pdus_until(sender_user, room_id, PduCount::max())?
|
||||
.filter_map(|r| {
|
||||
// Filter out buggy events
|
||||
if r.is_err() {
|
||||
|
@ -1124,7 +1133,7 @@ fn load_timeline(
|
|||
}
|
||||
r.ok()
|
||||
})
|
||||
.take_while(|(pducount, _)| pducount > &sincecount);
|
||||
.take_while(|(pducount, _)| pducount > &roomsincecount);
|
||||
|
||||
// Take the last events for the timeline
|
||||
timeline_pdus = non_timeline_pdus
|
||||
|
@ -1178,16 +1187,15 @@ pub async fn sync_events_v4_route(
|
|||
// Setup watchers, so if there's no response, we can wait for them
|
||||
let watcher = services().globals.watch(&sender_user, &sender_device);
|
||||
|
||||
let next_batch = services().globals.current_count()?;
|
||||
let next_batch = services().globals.next_count()?;
|
||||
|
||||
let since = body
|
||||
let globalsince = body
|
||||
.pos
|
||||
.as_ref()
|
||||
.and_then(|string| string.parse().ok())
|
||||
.unwrap_or(0);
|
||||
let sincecount = PduCount::Normal(since);
|
||||
|
||||
if since == 0 {
|
||||
if globalsince == 0 {
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
services().users.forget_sync_request_connection(
|
||||
sender_user.clone(),
|
||||
|
@ -1214,7 +1222,7 @@ pub async fn sync_events_v4_route(
|
|||
if body.extensions.to_device.enabled.unwrap_or(false) {
|
||||
services()
|
||||
.users
|
||||
.remove_to_device_events(&sender_user, &sender_device, since)?;
|
||||
.remove_to_device_events(&sender_user, &sender_device, globalsince)?;
|
||||
}
|
||||
|
||||
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
||||
|
@ -1226,13 +1234,13 @@ pub async fn sync_events_v4_route(
|
|||
device_list_changes.extend(
|
||||
services()
|
||||
.users
|
||||
.keys_changed(sender_user.as_ref(), since, None)
|
||||
.keys_changed(sender_user.as_ref(), globalsince, None)
|
||||
.filter_map(|r| r.ok()),
|
||||
);
|
||||
|
||||
for room_id in &all_joined_rooms {
|
||||
let current_shortstatehash =
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
||||
if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||
s
|
||||
} else {
|
||||
error!("Room {} has no state", room_id);
|
||||
|
@ -1242,7 +1250,7 @@ pub async fn sync_events_v4_route(
|
|||
let since_shortstatehash = services()
|
||||
.rooms
|
||||
.user
|
||||
.get_token_shortstatehash(&room_id, since)?;
|
||||
.get_token_shortstatehash(room_id, globalsince)?;
|
||||
|
||||
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
||||
.and_then(|shortstatehash| {
|
||||
|
@ -1331,7 +1339,7 @@ pub async fn sync_events_v4_route(
|
|||
if !share_encrypted_room(
|
||||
&sender_user,
|
||||
&user_id,
|
||||
&room_id,
|
||||
room_id,
|
||||
)? {
|
||||
device_list_changes.insert(user_id);
|
||||
}
|
||||
|
@ -1352,7 +1360,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.flatten()
|
||||
.filter(|user_id| {
|
||||
// Don't send key updates from the sender to the sender
|
||||
|
@ -1360,7 +1368,7 @@ pub async fn sync_events_v4_route(
|
|||
})
|
||||
.filter(|user_id| {
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||
!share_encrypted_room(&sender_user, user_id, &room_id)
|
||||
!share_encrypted_room(&sender_user, user_id, room_id)
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
);
|
||||
|
@ -1371,7 +1379,7 @@ pub async fn sync_events_v4_route(
|
|||
device_list_changes.extend(
|
||||
services()
|
||||
.users
|
||||
.keys_changed(room_id.as_ref(), since, None)
|
||||
.keys_changed(room_id.as_ref(), globalsince, None)
|
||||
.filter_map(|r| r.ok()),
|
||||
);
|
||||
}
|
||||
|
@ -1408,7 +1416,7 @@ pub async fn sync_events_v4_route(
|
|||
continue;
|
||||
}
|
||||
|
||||
let mut new_known_rooms = BTreeMap::new();
|
||||
let mut new_known_rooms = BTreeSet::new();
|
||||
|
||||
lists.insert(
|
||||
list_id.clone(),
|
||||
|
@ -1424,12 +1432,12 @@ pub async fn sync_events_v4_route(
|
|||
let room_ids = all_joined_rooms
|
||||
[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)]
|
||||
.to_vec();
|
||||
new_known_rooms.extend(room_ids.iter().cloned().map(|r| (r, true)));
|
||||
new_known_rooms.extend(room_ids.iter().cloned());
|
||||
for room_id in &room_ids {
|
||||
let todo_room = todo_rooms.entry(room_id.clone()).or_insert((
|
||||
BTreeSet::new(),
|
||||
0,
|
||||
true,
|
||||
u64::MAX,
|
||||
));
|
||||
let limit = list
|
||||
.room_details
|
||||
|
@ -1440,14 +1448,18 @@ pub async fn sync_events_v4_route(
|
|||
.0
|
||||
.extend(list.room_details.required_state.iter().cloned());
|
||||
todo_room.1 = todo_room.1.max(limit);
|
||||
if known_rooms.get(&list_id).and_then(|k| k.get(room_id)) != Some(&true)
|
||||
{
|
||||
todo_room.2 = false;
|
||||
}
|
||||
// 0 means unknown because it got out of date
|
||||
todo_room.2 = todo_room.2.min(
|
||||
known_rooms
|
||||
.get(&list_id)
|
||||
.and_then(|k| k.get(room_id))
|
||||
.copied()
|
||||
.unwrap_or(0),
|
||||
);
|
||||
}
|
||||
sync_events::v4::SyncOp {
|
||||
op: SlidingOp::Sync,
|
||||
range: Some(r.clone()),
|
||||
range: Some(r),
|
||||
index: None,
|
||||
room_ids,
|
||||
room_id: None,
|
||||
|
@ -1465,26 +1477,31 @@ pub async fn sync_events_v4_route(
|
|||
conn_id.clone(),
|
||||
list_id,
|
||||
new_known_rooms,
|
||||
globalsince,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut known_subscription_rooms = BTreeMap::new();
|
||||
let mut known_subscription_rooms = BTreeSet::new();
|
||||
for (room_id, room) in &body.room_subscriptions {
|
||||
if !services().rooms.metadata.exists(room_id)? {
|
||||
continue;
|
||||
}
|
||||
let todo_room = todo_rooms
|
||||
.entry(room_id.clone())
|
||||
.or_insert((BTreeSet::new(), 0, true));
|
||||
.or_insert((BTreeSet::new(), 0, u64::MAX));
|
||||
let limit = room.timeline_limit.map_or(10, u64::from).min(100);
|
||||
todo_room.0.extend(room.required_state.iter().cloned());
|
||||
todo_room.1 = todo_room.1.max(limit);
|
||||
if known_rooms
|
||||
.get("subscriptions")
|
||||
.and_then(|k| k.get(room_id))
|
||||
!= Some(&true)
|
||||
{
|
||||
todo_room.2 = false;
|
||||
}
|
||||
known_subscription_rooms.insert(room_id.clone(), true);
|
||||
// 0 means unknown because it got out of date
|
||||
todo_room.2 = todo_room.2.min(
|
||||
known_rooms
|
||||
.get("subscriptions")
|
||||
.and_then(|k| k.get(room_id))
|
||||
.copied()
|
||||
.unwrap_or(0),
|
||||
);
|
||||
known_subscription_rooms.insert(room_id.clone());
|
||||
}
|
||||
|
||||
for r in body.unsubscribe_rooms {
|
||||
|
@ -1499,6 +1516,7 @@ pub async fn sync_events_v4_route(
|
|||
conn_id.clone(),
|
||||
"subscriptions".to_owned(),
|
||||
known_subscription_rooms,
|
||||
globalsince,
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1512,12 +1530,13 @@ pub async fn sync_events_v4_route(
|
|||
}
|
||||
|
||||
let mut rooms = BTreeMap::new();
|
||||
for (room_id, (required_state_request, timeline_limit, known)) in &todo_rooms {
|
||||
// TODO: per-room sync tokens
|
||||
let (timeline_pdus, limited) =
|
||||
load_timeline(&sender_user, &room_id, sincecount, *timeline_limit)?;
|
||||
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms {
|
||||
let roomsincecount = PduCount::Normal(*roomsince);
|
||||
|
||||
if *known && timeline_pdus.is_empty() {
|
||||
let (timeline_pdus, limited) =
|
||||
load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?;
|
||||
|
||||
if roomsince != &0 && timeline_pdus.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1533,8 +1552,8 @@ pub async fn sync_events_v4_route(
|
|||
}))
|
||||
})?
|
||||
.or_else(|| {
|
||||
if since != 0 {
|
||||
Some(since.to_string())
|
||||
if roomsince != &0 {
|
||||
Some(roomsince.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -1547,63 +1566,58 @@ pub async fn sync_events_v4_route(
|
|||
|
||||
let required_state = required_state_request
|
||||
.iter()
|
||||
.map(|state| {
|
||||
.flat_map(|state| {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &state.0, &state.1)
|
||||
.room_state_get(room_id, &state.0, &state.1)
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|state| state.to_sync_state_event())
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.filter_map(|o| o)
|
||||
.map(|state| state.to_sync_state_event())
|
||||
.collect();
|
||||
|
||||
// Heroes
|
||||
let heroes = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&room_id)
|
||||
.room_members(room_id)
|
||||
.filter_map(|r| r.ok())
|
||||
.filter(|member| member != &sender_user)
|
||||
.map(|member| {
|
||||
Ok::<_, Error>(
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&room_id, &member)?
|
||||
.map(|memberevent| {
|
||||
(
|
||||
memberevent
|
||||
.displayname
|
||||
.unwrap_or_else(|| member.to_string()),
|
||||
memberevent.avatar_url,
|
||||
)
|
||||
}),
|
||||
)
|
||||
.flat_map(|member| {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(room_id, &member)
|
||||
.ok()
|
||||
.flatten()
|
||||
.map(|memberevent| {
|
||||
(
|
||||
memberevent
|
||||
.displayname
|
||||
.unwrap_or_else(|| member.to_string()),
|
||||
memberevent.avatar_url,
|
||||
)
|
||||
})
|
||||
})
|
||||
.filter_map(|r| r.ok())
|
||||
.filter_map(|o| o)
|
||||
.take(5)
|
||||
.collect::<Vec<_>>();
|
||||
let name = if heroes.len() > 1 {
|
||||
let last = heroes[0].0.clone();
|
||||
Some(
|
||||
heroes[1..]
|
||||
let name = match &heroes[..] {
|
||||
[] => None,
|
||||
[only] => Some(only.0.clone()),
|
||||
[firsts @ .., last] => Some(
|
||||
firsts
|
||||
.iter()
|
||||
.map(|h| h.0.clone())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
+ " and "
|
||||
+ &last,
|
||||
)
|
||||
} else if heroes.len() == 1 {
|
||||
Some(heroes[0].0.clone())
|
||||
} else {
|
||||
None
|
||||
+ &last.0,
|
||||
),
|
||||
};
|
||||
|
||||
let avatar = if heroes.len() == 1 {
|
||||
heroes[0].1.clone()
|
||||
let avatar = if let [only] = &heroes[..] {
|
||||
only.1.clone()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
@ -1611,17 +1625,17 @@ pub async fn sync_events_v4_route(
|
|||
rooms.insert(
|
||||
room_id.clone(),
|
||||
sync_events::v4::SlidingSyncRoom {
|
||||
name: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_name(&room_id)?
|
||||
.or_else(|| name),
|
||||
avatar: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_avatar(&room_id)?
|
||||
.map_or(avatar, |a| a.url),
|
||||
initial: Some(!known),
|
||||
name: services().rooms.state_accessor.get_name(room_id)?.or(name),
|
||||
avatar: if let Some(avatar) = avatar {
|
||||
JsOption::Some(avatar)
|
||||
} else {
|
||||
match services().rooms.state_accessor.get_avatar(room_id)? {
|
||||
JsOption::Some(avatar) => JsOption::from_option(avatar.url),
|
||||
JsOption::Null => JsOption::Null,
|
||||
JsOption::Undefined => JsOption::Undefined,
|
||||
}
|
||||
},
|
||||
initial: Some(roomsince == &0),
|
||||
is_dm: None,
|
||||
invite_state: None,
|
||||
unread_notifications: UnreadNotificationsCount {
|
||||
|
@ -1629,7 +1643,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.highlight_count(&sender_user, &room_id)?
|
||||
.highlight_count(&sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
|
@ -1637,7 +1651,7 @@ pub async fn sync_events_v4_route(
|
|||
services()
|
||||
.rooms
|
||||
.user
|
||||
.notification_count(&sender_user, &room_id)?
|
||||
.notification_count(&sender_user, room_id)?
|
||||
.try_into()
|
||||
.expect("notification count can't go that high"),
|
||||
),
|
||||
|
@ -1650,7 +1664,7 @@ pub async fn sync_events_v4_route(
|
|||
(services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or(0) as u32)
|
||||
.into(),
|
||||
),
|
||||
|
@ -1658,11 +1672,12 @@ pub async fn sync_events_v4_route(
|
|||
(services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_invited_count(&room_id)?
|
||||
.room_invited_count(room_id)?
|
||||
.unwrap_or(0) as u32)
|
||||
.into(),
|
||||
),
|
||||
num_live: None, // Count events in timeline greater than global sync counter
|
||||
timestamp: None,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
@ -1681,7 +1696,7 @@ pub async fn sync_events_v4_route(
|
|||
}
|
||||
|
||||
Ok(sync_events::v4::Response {
|
||||
initial: since == 0,
|
||||
initial: globalsince == 0,
|
||||
txn_id: body.txn_id.clone(),
|
||||
pos: next_batch.to_string(),
|
||||
lists,
|
||||
|
@ -1712,7 +1727,7 @@ pub async fn sync_events_v4_route(
|
|||
global: if body.extensions.account_data.enabled.unwrap_or(false) {
|
||||
services()
|
||||
.account_data
|
||||
.changes_since(None, &sender_user, since)?
|
||||
.changes_since(None, &sender_user, globalsince)?
|
||||
.into_iter()
|
||||
.filter_map(|(_, v)| {
|
||||
serde_json::from_str(v.json().get())
|
||||
|
|
|
@ -26,6 +26,7 @@ pub async fn get_supported_versions_route(
|
|||
"v1.2".to_owned(),
|
||||
"v1.3".to_owned(),
|
||||
"v1.4".to_owned(),
|
||||
"v1.5".to_owned(),
|
||||
],
|
||||
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||
};
|
||||
|
|
|
@ -51,11 +51,12 @@ use std::{
|
|||
fmt::Debug,
|
||||
mem,
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use tracing::{debug, error, trace, warn};
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
/// Wraps either an literal IP address plus port, or a hostname plus complement
|
||||
/// (colon-plus-port if it was specified).
|
||||
|
@ -137,7 +138,7 @@ where
|
|||
.globals
|
||||
.actual_destination_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(destination)
|
||||
.cloned();
|
||||
|
||||
|
@ -290,7 +291,7 @@ where
|
|||
.globals
|
||||
.actual_destination_cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(
|
||||
OwnedServerName::from(destination),
|
||||
(actual_destination, host),
|
||||
|
@ -341,7 +342,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest {
|
|||
}
|
||||
|
||||
/// Returns: actual_destination, host header
|
||||
/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
|
||||
/// Implemented according to the specification at <https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names>
|
||||
/// Numbers in comments below refer to bullet points in linked section of specification
|
||||
async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) {
|
||||
debug!("Finding actual destination for {destination}");
|
||||
|
@ -666,7 +667,7 @@ pub fn parse_incoming_pdu(
|
|||
|
||||
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
|
||||
|
||||
let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) {
|
||||
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) {
|
||||
Ok(t) => t,
|
||||
Err(_) => {
|
||||
// Event could not be converted to canonical json
|
||||
|
@ -724,7 +725,7 @@ pub async fn send_transaction_message_route(
|
|||
continue;
|
||||
}
|
||||
|
||||
let r = parse_incoming_pdu(&pdu);
|
||||
let r = parse_incoming_pdu(pdu);
|
||||
let (event_id, value, room_id) = match r {
|
||||
Ok(t) => t,
|
||||
Err(e) => {
|
||||
|
@ -735,17 +736,12 @@ pub async fn send_transaction_message_route(
|
|||
};
|
||||
// We do not add the event_id field to the pdu here because of signature and hashes checks
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.event_handler
|
||||
.acl_check(sender_servername, &room_id)?;
|
||||
|
||||
let mutex = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_federation
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -997,7 +993,7 @@ pub async fn get_event_route(
|
|||
|
||||
if !services().rooms.state_accessor.server_can_see_event(
|
||||
sender_servername,
|
||||
&room_id,
|
||||
room_id,
|
||||
&body.event_id,
|
||||
)? {
|
||||
return Err(Error::BadRequest(
|
||||
|
@ -1063,7 +1059,7 @@ pub async fn get_backfill_route(
|
|||
let all_events = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
||||
.pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)?
|
||||
.take(limit.try_into().unwrap());
|
||||
|
||||
let events = all_events
|
||||
|
@ -1080,7 +1076,7 @@ pub async fn get_backfill_route(
|
|||
})
|
||||
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
|
||||
.filter_map(|r| r.ok().flatten())
|
||||
.map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
|
||||
.map(PduEvent::convert_to_outgoing_federation_event)
|
||||
.collect();
|
||||
|
||||
Ok(get_backfill::v1::Response {
|
||||
|
@ -1414,7 +1410,7 @@ pub async fn create_join_event_template_route(
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(body.room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1584,7 +1580,7 @@ async fn create_join_event(
|
|||
.globals
|
||||
.roomid_mutex_federation
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1804,6 +1800,13 @@ pub async fn get_devices_route(
|
|||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
if body.user_id.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let sender_servername = body
|
||||
.sender_servername
|
||||
.as_ref()
|
||||
|
@ -1878,6 +1881,13 @@ pub async fn get_profile_information_route(
|
|||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
if body.user_id.server_name() != services().globals.server_name() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let mut displayname = None;
|
||||
let mut avatar_url = None;
|
||||
let mut blurhash = None;
|
||||
|
@ -1914,6 +1924,17 @@ pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_key
|
|||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
if body
|
||||
.device_keys
|
||||
.iter()
|
||||
.any(|(u, _)| u.server_name() != services().globals.server_name())
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let result = get_keys_helper(None, &body.device_keys, |u| {
|
||||
Some(u.server_name()) == body.sender_servername.as_deref()
|
||||
})
|
||||
|
@ -1936,6 +1957,17 @@ pub async fn claim_keys_route(
|
|||
return Err(Error::bad_config("Federation is disabled."));
|
||||
}
|
||||
|
||||
if body
|
||||
.one_time_keys
|
||||
.iter()
|
||||
.any(|(u, _)| u.server_name() != services().globals.server_name())
|
||||
{
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Tried to access user from other server.",
|
||||
));
|
||||
}
|
||||
|
||||
let result = claim_keys_helper(&body.one_time_keys).await?;
|
||||
|
||||
Ok(claim_keys::v1::Response {
|
||||
|
|
13
src/clap.rs
Normal file
13
src/clap.rs
Normal file
|
@ -0,0 +1,13 @@
|
|||
//! Integration with `clap`
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
/// Command line arguments
|
||||
#[derive(Parser)]
|
||||
#[clap(about, version)]
|
||||
pub struct Args {}
|
||||
|
||||
/// Parse command line arguments into structured data
|
||||
pub fn parse() -> Args {
|
||||
Args::parse()
|
||||
}
|
|
@ -264,7 +264,7 @@ fn default_trusted_servers() -> Vec<OwnedServerName> {
|
|||
}
|
||||
|
||||
fn default_log() -> String {
|
||||
"warn,state_res=warn,_=off,sled=off".to_owned()
|
||||
"warn,state_res=warn,_=off".to_owned()
|
||||
}
|
||||
|
||||
fn default_turn_ttl() -> u64 {
|
||||
|
|
|
@ -29,7 +29,9 @@ use crate::Result;
|
|||
/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`.
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[derive(Default)]
|
||||
pub enum ProxyConfig {
|
||||
#[default]
|
||||
None,
|
||||
Global {
|
||||
#[serde(deserialize_with = "crate::utils::deserialize_from_str")]
|
||||
|
@ -48,11 +50,6 @@ impl ProxyConfig {
|
|||
})
|
||||
}
|
||||
}
|
||||
impl Default for ProxyConfig {
|
||||
fn default() -> Self {
|
||||
ProxyConfig::None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct PartialProxyConfig {
|
||||
|
|
|
@ -116,7 +116,7 @@ impl KvTree for PersyTree {
|
|||
match iter {
|
||||
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
})),
|
||||
Err(e) => {
|
||||
|
@ -142,7 +142,7 @@ impl KvTree for PersyTree {
|
|||
Ok(iter) => {
|
||||
let map = iter.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
});
|
||||
if backwards {
|
||||
|
@ -179,7 +179,7 @@ impl KvTree for PersyTree {
|
|||
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
||||
.filter_map(|(k, v)| {
|
||||
v.into_iter()
|
||||
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||
.map(|val| ((*k).to_owned(), (*val).to_owned()))
|
||||
.next()
|
||||
}),
|
||||
)
|
||||
|
|
|
@ -49,6 +49,9 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
|||
db_opts.set_max_background_jobs(6);
|
||||
db_opts.set_bytes_per_sync(1048576);
|
||||
|
||||
// https://github.com/facebook/rocksdb/issues/849
|
||||
db_opts.set_keep_log_file_num(100);
|
||||
|
||||
// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords
|
||||
//
|
||||
// Unclean shutdowns of a Matrix homeserver are likely to be fine when
|
||||
|
|
|
@ -33,7 +33,7 @@ impl Iterator for PreparedStatementIterator<'_> {
|
|||
struct NonAliasingBox<T>(*mut T);
|
||||
impl<T> Drop for NonAliasingBox<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { Box::from_raw(self.0) };
|
||||
drop(unsafe { Box::from_raw(self.0) });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ use tokio::sync::watch;
|
|||
|
||||
#[derive(Default)]
|
||||
pub(super) struct Watchers {
|
||||
#[allow(clippy::type_complexity)]
|
||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||
}
|
||||
|
||||
|
|
|
@ -123,13 +123,12 @@ impl service::account_data::Data for KeyValueDatabase {
|
|||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||
.map(|(k, v)| {
|
||||
Ok::<_, Error>((
|
||||
RoomAccountDataEventType::try_from(
|
||||
RoomAccountDataEventType::from(
|
||||
utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else(
|
||||
|| Error::bad_database("RoomUserData ID in db is invalid."),
|
||||
)?)
|
||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
)
|
||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
),
|
||||
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
||||
Error::bad_database("Database contains invalid account data.")
|
||||
})?,
|
||||
|
|
|
@ -256,8 +256,8 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
|
|||
..
|
||||
} = new_keys;
|
||||
|
||||
keys.verify_keys.extend(verify_keys.into_iter());
|
||||
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
||||
keys.verify_keys.extend(verify_keys);
|
||||
keys.old_verify_keys.extend(old_verify_keys);
|
||||
|
||||
self.server_signingkeys.insert(
|
||||
origin.as_bytes(),
|
||||
|
|
|
@ -157,10 +157,9 @@ impl service::rooms::short::Data for KeyValueDatabase {
|
|||
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
|
||||
|
||||
let event_type =
|
||||
StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||
StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
|
||||
})?)
|
||||
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
|
||||
})?);
|
||||
|
||||
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
|
||||
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
|
||||
|
|
|
@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
let parsed = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)?;
|
||||
.parse_compressed_state_event(compressed)?;
|
||||
result.insert(parsed.0, parsed.1);
|
||||
|
||||
i += 1;
|
||||
|
@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
let (_, eventid) = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)?;
|
||||
.parse_compressed_state_event(compressed)?;
|
||||
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
||||
result.insert(
|
||||
(
|
||||
|
@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)
|
||||
.parse_compressed_state_event(compressed)
|
||||
.ok()
|
||||
.map(|(_, id)| id)
|
||||
}))
|
||||
|
|
|
@ -471,6 +471,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
/// Returns an iterator over all rooms a user was invited to.
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn rooms_invited<'a>(
|
||||
&'a self,
|
||||
|
@ -549,6 +550,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
|||
}
|
||||
|
||||
/// Returns an iterator over all rooms a user left.
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
fn rooms_left<'a>(
|
||||
&'a self,
|
||||
|
|
|
@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
user_id: &'a UserId,
|
||||
room_id: &'a RoomId,
|
||||
until: u64,
|
||||
include: &'a IncludeThreads,
|
||||
_include: &'a IncludeThreads,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>> {
|
||||
let prefix = services()
|
||||
.rooms
|
||||
|
@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
self.threadid_userids
|
||||
.iter_from(¤t, true)
|
||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||
.map(move |(pduid, users)| {
|
||||
.map(move |(pduid, _users)| {
|
||||
let count = utils::u64_from_bytes(&pduid[(mem::size_of::<u64>())..])
|
||||
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
|
||||
let mut pdu = services()
|
||||
|
@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase {
|
|||
.collect::<Vec<_>>()
|
||||
.join(&[0xff][..]);
|
||||
|
||||
self.threadid_userids.insert(&root_id, &users)?;
|
||||
self.threadid_userids.insert(root_id, &users)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_participants(&self, root_id: &[u8]) -> Result<Option<Vec<OwnedUserId>>> {
|
||||
if let Some(users) = self.threadid_userids.get(&root_id)? {
|
||||
if let Some(users) = self.threadid_userids.get(root_id)? {
|
||||
Ok(Some(
|
||||
users
|
||||
.split(|b| *b == 0xff)
|
||||
|
|
|
@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
|
||||
/// Returns the `count` of this pdu's id.
|
||||
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
||||
Ok(self
|
||||
.eventid_pduid
|
||||
self.eventid_pduid
|
||||
.get(event_id.as_bytes())?
|
||||
.map(|pdu_id| pdu_count(&pdu_id))
|
||||
.transpose()?)
|
||||
.transpose()
|
||||
}
|
||||
|
||||
/// Returns the json of a pdu.
|
||||
|
@ -80,12 +79,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
|
||||
/// Returns the pdu's id.
|
||||
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
||||
Ok(self.eventid_pduid.get(event_id.as_bytes())?)
|
||||
self.eventid_pduid.get(event_id.as_bytes())
|
||||
}
|
||||
|
||||
/// Returns the pdu.
|
||||
///
|
||||
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
|
||||
fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
|
||||
self.eventid_pduid
|
||||
.get(event_id.as_bytes())?
|
||||
|
@ -232,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
room_id: &RoomId,
|
||||
until: PduCount,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||
let (prefix, current) = count_to_id(&room_id, until, 1, true)?;
|
||||
let (prefix, current) = count_to_id(room_id, until, 1, true)?;
|
||||
|
||||
let user_id = user_id.to_owned();
|
||||
|
||||
|
@ -259,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
|||
room_id: &RoomId,
|
||||
from: PduCount,
|
||||
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||
let (prefix, current) = count_to_id(&room_id, from, 1, false)?;
|
||||
let (prefix, current) = count_to_id(room_id, from, 1, false)?;
|
||||
|
||||
let user_id = user_id.to_owned();
|
||||
|
||||
|
@ -334,7 +331,7 @@ fn count_to_id(
|
|||
.rooms
|
||||
.short
|
||||
.get_shortroomid(room_id)?
|
||||
.expect("room exists")
|
||||
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))?
|
||||
.to_be_bytes()
|
||||
.to_vec();
|
||||
let mut pdu_id = prefix.clone();
|
||||
|
|
|
@ -146,10 +146,9 @@ impl service::users::Data for KeyValueDatabase {
|
|||
self.userid_avatarurl
|
||||
.get(user_id.as_bytes())?
|
||||
.map(|bytes| {
|
||||
let s = utils::string_from_bytes(&bytes)
|
||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?;
|
||||
s.try_into()
|
||||
utils::string_from_bytes(&bytes)
|
||||
.map_err(|_| Error::bad_database("Avatar URL in db is invalid."))
|
||||
.map(Into::into)
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
|
|
@ -852,7 +852,9 @@ impl KeyValueDatabase {
|
|||
if rule.is_some() {
|
||||
let mut rule = rule.unwrap().clone();
|
||||
rule.rule_id = content_rule_transformation[1].to_owned();
|
||||
rules_list.content.remove(content_rule_transformation[0]);
|
||||
rules_list
|
||||
.content
|
||||
.shift_remove(content_rule_transformation[0]);
|
||||
rules_list.content.insert(rule);
|
||||
}
|
||||
}
|
||||
|
@ -875,7 +877,7 @@ impl KeyValueDatabase {
|
|||
if let Some(rule) = rule {
|
||||
let mut rule = rule.clone();
|
||||
rule.rule_id = transformation[1].to_owned();
|
||||
rules_list.underride.remove(transformation[0]);
|
||||
rules_list.underride.shift_remove(transformation[0]);
|
||||
rules_list.underride.insert(rule);
|
||||
}
|
||||
}
|
||||
|
|
13
src/lib.rs
13
src/lib.rs
|
@ -1,18 +1,13 @@
|
|||
#![warn(
|
||||
rust_2018_idioms,
|
||||
unused_qualifications,
|
||||
clippy::cloned_instead_of_copied,
|
||||
clippy::str_to_string
|
||||
)]
|
||||
#![allow(clippy::suspicious_else_formatting)]
|
||||
#![deny(clippy::dbg_macro)]
|
||||
|
||||
pub mod api;
|
||||
pub mod clap;
|
||||
mod config;
|
||||
mod database;
|
||||
mod service;
|
||||
mod utils;
|
||||
|
||||
// Not async due to services() being used in many closures, and async closures are not stable as of writing
|
||||
// This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where
|
||||
// the current maintainer (Timo) has asked to not modify those
|
||||
use std::sync::RwLock;
|
||||
|
||||
pub use api::ruma_wrapper::{Ruma, RumaResponse};
|
||||
|
|
20
src/main.rs
20
src/main.rs
|
@ -1,13 +1,3 @@
|
|||
#![warn(
|
||||
rust_2018_idioms,
|
||||
unused_qualifications,
|
||||
clippy::cloned_instead_of_copied,
|
||||
clippy::str_to_string,
|
||||
clippy::future_not_send
|
||||
)]
|
||||
#![allow(clippy::suspicious_else_formatting)]
|
||||
#![deny(clippy::dbg_macro)]
|
||||
|
||||
use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration};
|
||||
|
||||
use axum::{
|
||||
|
@ -54,6 +44,8 @@ static GLOBAL: Jemalloc = Jemalloc;
|
|||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
clap::parse();
|
||||
|
||||
// Initialize config
|
||||
let raw_config =
|
||||
Figment::new()
|
||||
|
@ -75,8 +67,6 @@ async fn main() {
|
|||
|
||||
config.warn_deprecated();
|
||||
|
||||
let log = format!("{},ruma_state_res=error,_=off,sled=off", config.log);
|
||||
|
||||
if config.allow_jaeger {
|
||||
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||
|
@ -86,7 +76,7 @@ async fn main() {
|
|||
.unwrap();
|
||||
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||
|
||||
let filter_layer = match EnvFilter::try_new(&log) {
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
|
@ -113,7 +103,7 @@ async fn main() {
|
|||
} else {
|
||||
let registry = tracing_subscriber::Registry::default();
|
||||
let fmt_layer = tracing_subscriber::fmt::Layer::new();
|
||||
let filter_layer = match EnvFilter::try_new(&log) {
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}");
|
||||
|
@ -238,7 +228,7 @@ async fn spawn_task<B: Send + 'static>(
|
|||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
async fn unrecognized_method<B>(
|
||||
async fn unrecognized_method<B: Send>(
|
||||
req: axum::http::Request<B>,
|
||||
next: axum::middleware::Next<B>,
|
||||
) -> std::result::Result<axum::response::Response, StatusCode> {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::{
|
||||
collections::BTreeMap,
|
||||
convert::{TryFrom, TryInto},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
|
@ -23,10 +23,10 @@ use ruma::{
|
|||
},
|
||||
TimelineEventType,
|
||||
},
|
||||
EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
use tokio::sync::{mpsc, Mutex, MutexGuard};
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
|
||||
use crate::{
|
||||
api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
|
||||
|
@ -50,7 +50,7 @@ enum AdminCommand {
|
|||
/// Registering a new bridge using the ID of an existing bridge will replace
|
||||
/// the old one.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # yaml content here
|
||||
/// # ```
|
||||
|
@ -96,7 +96,7 @@ enum AdminCommand {
|
|||
/// Removing a mass amount of users from a room may cause a significant amount of leave events.
|
||||
/// The time to leave rooms may depend significantly on joined rooms and servers.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # User list here
|
||||
/// # ```
|
||||
|
@ -121,7 +121,7 @@ enum AdminCommand {
|
|||
/// The PDU event is only checked for validity and is not added to the
|
||||
/// database.
|
||||
///
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # PDU json content here
|
||||
/// # ```
|
||||
|
@ -165,14 +165,14 @@ enum AdminCommand {
|
|||
EnableRoom { room_id: Box<RoomId> },
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
SignJson,
|
||||
|
||||
/// Verify json signatures
|
||||
/// [commandbody]
|
||||
/// [commandbody]()
|
||||
/// # ```
|
||||
/// # json here
|
||||
/// # ```
|
||||
|
@ -214,60 +214,44 @@ impl Service {
|
|||
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
|
||||
.expect("@conduit:server_name is valid");
|
||||
|
||||
let conduit_room = services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.as_str()
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid room alias"),
|
||||
)
|
||||
.expect("Database data for admin room alias must be valid")
|
||||
.expect("Admin room must exist");
|
||||
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(event) = receiver.recv() => {
|
||||
let message_content = match event {
|
||||
AdminRoomEvent::SendMessage(content) => content,
|
||||
AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await
|
||||
};
|
||||
|
||||
let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| {
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMessage,
|
||||
content: to_raw_value(&message)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&conduit_room,
|
||||
mutex_lock,
|
||||
)
|
||||
.unwrap();
|
||||
};
|
||||
let mutex_state = Arc::clone(
|
||||
services().globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.await
|
||||
.entry(conduit_room.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(event) = receiver.recv() => {
|
||||
let message_content = match event {
|
||||
AdminRoomEvent::SendMessage(content) => content,
|
||||
AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await
|
||||
};
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
services().globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry(conduit_room.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
send_message(message_content, &state_lock);
|
||||
|
||||
drop(state_lock);
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMessage,
|
||||
content: to_raw_value(&message_content)
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: None,
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&conduit_room,
|
||||
&state_lock,
|
||||
)
|
||||
.await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -434,11 +418,7 @@ impl Service {
|
|||
Err(e) => RoomMessageEventContent::text_plain(e.to_string()),
|
||||
},
|
||||
AdminCommand::IncomingFederation => {
|
||||
let map = services()
|
||||
.globals
|
||||
.roomid_federationhandletime
|
||||
.read()
|
||||
.unwrap();
|
||||
let map = services().globals.roomid_federationhandletime.read().await;
|
||||
let mut msg: String = format!("Handling {} incoming pdus:\n", map.len());
|
||||
|
||||
for (r, (e, i)) in map.iter() {
|
||||
|
@ -552,7 +532,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
AdminCommand::MemoryUsage => {
|
||||
let response1 = services().memory_usage();
|
||||
let response1 = services().memory_usage().await;
|
||||
let response2 = services().globals.db.memory_usage();
|
||||
|
||||
RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -565,7 +545,7 @@ impl Service {
|
|||
RoomMessageEventContent::text_plain("Done.")
|
||||
}
|
||||
AdminCommand::ClearServiceCaches { amount } => {
|
||||
services().clear_caches(amount);
|
||||
services().clear_caches(amount).await;
|
||||
|
||||
RoomMessageEventContent::text_plain("Done.")
|
||||
}
|
||||
|
@ -806,7 +786,7 @@ impl Service {
|
|||
.fetch_required_signing_keys(&value, &pub_key_map)
|
||||
.await?;
|
||||
|
||||
let pub_key_map = pub_key_map.read().unwrap();
|
||||
let pub_key_map = pub_key_map.read().await;
|
||||
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
||||
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
||||
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||
|
@ -858,12 +838,15 @@ impl Service {
|
|||
.expect("Regex compilation should not fail");
|
||||
let text = re.replace_all(&text, "<code>$1</code>: $4");
|
||||
|
||||
// Look for a `[commandbody]` tag. If it exists, use all lines below it that
|
||||
// Look for a `[commandbody]()` tag. If it exists, use all lines below it that
|
||||
// start with a `#` in the USAGE section.
|
||||
let mut text_lines: Vec<&str> = text.lines().collect();
|
||||
let mut command_body = String::new();
|
||||
|
||||
if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") {
|
||||
if let Some(line_index) = text_lines
|
||||
.iter()
|
||||
.position(|line| *line == "[commandbody]()")
|
||||
{
|
||||
text_lines.remove(line_index);
|
||||
|
||||
while text_lines
|
||||
|
@ -919,7 +902,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -932,176 +915,229 @@ impl Service {
|
|||
|
||||
services().users.create(&conduit_user, None)?;
|
||||
|
||||
let mut content = RoomCreateEventContent::new(conduit_user.clone());
|
||||
let mut content = RoomCreateEventContent::new_v1(conduit_user.clone());
|
||||
content.federate = true;
|
||||
content.predecessor = None;
|
||||
content.room_version = services().globals.default_room_version();
|
||||
|
||||
// 1. The room create event
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCreate,
|
||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 2. Make conduit bot join
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: None,
|
||||
avatar_url: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(conduit_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: None,
|
||||
avatar_url: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(conduit_user.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 3. Power levels
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.clone(), 100.into());
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
users,
|
||||
..Default::default()
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
users,
|
||||
..Default::default()
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.1 Join Rules
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomJoinRules,
|
||||
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.2 History Visibility
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
HistoryVisibility::Shared,
|
||||
))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomHistoryVisibility,
|
||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||
HistoryVisibility::Shared,
|
||||
))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 4.3 Guest Access
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomGuestAccess,
|
||||
content: to_raw_value(&RoomGuestAccessEventContent::new(
|
||||
GuestAccess::Forbidden,
|
||||
))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 5. Events implied by name and topic
|
||||
let room_name = format!("{} Admin Room", services().globals.server_name());
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomName,
|
||||
content: to_raw_value(&RoomNameEventContent::new(room_name))
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
topic: format!("Manage {}", services().globals.server_name()),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomTopic,
|
||||
content: to_raw_value(&RoomTopicEventContent {
|
||||
topic: format!("Manage {}", services().globals.server_name()),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 6. Room alias
|
||||
let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
alias: Some(alias.clone()),
|
||||
alt_aliases: Vec::new(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomCanonicalAlias,
|
||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||
alias: Some(alias.clone()),
|
||||
alt_aliases: Vec::new(),
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets the room ID of the admin room
|
||||
///
|
||||
/// Errors are propagated from the database, and will have None if there is no admin room
|
||||
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
|
||||
let admin_room_alias: Box<RoomAliasId> =
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&admin_room_alias)
|
||||
}
|
||||
|
||||
/// Invite the user to the conduit admin room.
|
||||
///
|
||||
/// In conduit, this is equivalent to granting admin privileges.
|
||||
|
@ -1110,102 +1146,105 @@ impl Service {
|
|||
user_id: &UserId,
|
||||
displayname: String,
|
||||
) -> Result<()> {
|
||||
let admin_room_alias: Box<RoomAliasId> =
|
||||
format!("#admins:{}", services().globals.server_name())
|
||||
.try_into()
|
||||
.expect("#admins:server_name is a valid alias name");
|
||||
let room_id = services()
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_local_alias(&admin_room_alias)?
|
||||
.expect("Admin room must exist");
|
||||
if let Some(room_id) = services().admin.get_admin_room()? {
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let mutex_state = Arc::clone(
|
||||
// Use the server user to grant the new admin's power level
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
|
||||
// Invite and join the real user
|
||||
services()
|
||||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let state_lock = mutex_state.lock().await;
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Invite,
|
||||
displayname: None,
|
||||
avatar_url: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: Some(displayname),
|
||||
avatar_url: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
user_id,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Use the server user to grant the new admin's power level
|
||||
let conduit_user =
|
||||
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||
.expect("@conduit:server_name is valid");
|
||||
// Set power level
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.to_owned(), 100.into());
|
||||
users.insert(user_id.to_owned(), 100.into());
|
||||
|
||||
// Invite and join the real user
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Invite,
|
||||
displayname: None,
|
||||
avatar_url: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMember,
|
||||
content: to_raw_value(&RoomMemberEventContent {
|
||||
membership: MembershipState::Join,
|
||||
displayname: Some(displayname),
|
||||
avatar_url: None,
|
||||
is_direct: None,
|
||||
third_party_invite: None,
|
||||
blurhash: None,
|
||||
reason: None,
|
||||
join_authorized_via_users_server: None,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some(user_id.to_string()),
|
||||
redacts: None,
|
||||
},
|
||||
user_id,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
users,
|
||||
..Default::default()
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Set power level
|
||||
let mut users = BTreeMap::new();
|
||||
users.insert(conduit_user.to_owned(), 100.into());
|
||||
users.insert(user_id.to_owned(), 100.into());
|
||||
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomPowerLevels,
|
||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
||||
users,
|
||||
..Default::default()
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
unsigned: None,
|
||||
state_key: Some("".to_owned()),
|
||||
redacts: None,
|
||||
},
|
||||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
|
||||
// Send welcome message
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
// Send welcome message
|
||||
services().rooms.timeline.build_and_append_pdu(
|
||||
PduBuilder {
|
||||
event_type: TimelineEventType::RoomMessage,
|
||||
content: to_raw_value(&RoomMessageEventContent::text_html(
|
||||
|
@ -1220,8 +1259,8 @@ impl Service {
|
|||
&conduit_user,
|
||||
&room_id,
|
||||
&state_lock,
|
||||
)?;
|
||||
|
||||
).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,12 @@ use ruma::{
|
|||
use crate::api::server_server::FedDest;
|
||||
|
||||
use crate::{services, Config, Error, Result};
|
||||
use futures_util::FutureExt;
|
||||
use hyper::{
|
||||
client::connect::dns::{GaiResolver, Name},
|
||||
service::Service as HyperService,
|
||||
};
|
||||
use reqwest::dns::{Addrs, Resolve, Resolving};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::sync::sync_events,
|
||||
|
@ -17,17 +23,19 @@ use ruma::{
|
|||
};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
error::Error as StdError,
|
||||
fs,
|
||||
future::Future,
|
||||
future::{self, Future},
|
||||
iter,
|
||||
net::{IpAddr, SocketAddr},
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{self, AtomicBool},
|
||||
Arc, Mutex, RwLock,
|
||||
Arc, RwLock as StdRwLock,
|
||||
},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore};
|
||||
use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore};
|
||||
use tracing::{error, info};
|
||||
use trust_dns_resolver::TokioAsyncResolver;
|
||||
|
||||
|
@ -45,7 +53,7 @@ pub struct Service {
|
|||
pub db: &'static dyn Data,
|
||||
|
||||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
||||
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
|
||||
pub tls_name_override: Arc<StdRwLock<TlsNameMap>>,
|
||||
pub config: Config,
|
||||
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
||||
dns_resolver: TokioAsyncResolver,
|
||||
|
@ -56,11 +64,12 @@ pub struct Service {
|
|||
pub unstable_room_versions: Vec<RoomVersionId>,
|
||||
pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>,
|
||||
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
|
||||
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
|
||||
pub servername_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, Arc<Semaphore>>>>,
|
||||
pub sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
|
||||
pub roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>,
|
||||
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer
|
||||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
||||
pub roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
|
||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
||||
pub stateres_mutex: Arc<Mutex<()>>,
|
||||
pub rotate: RotationHandler,
|
||||
|
@ -98,6 +107,45 @@ impl Default for RotationHandler {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct Resolver {
|
||||
inner: GaiResolver,
|
||||
overrides: Arc<StdRwLock<TlsNameMap>>,
|
||||
}
|
||||
|
||||
impl Resolver {
|
||||
pub fn new(overrides: Arc<StdRwLock<TlsNameMap>>) -> Self {
|
||||
Resolver {
|
||||
inner: GaiResolver::new(),
|
||||
overrides,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve for Resolver {
|
||||
fn resolve(&self, name: Name) -> Resolving {
|
||||
self.overrides
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(name.as_str())
|
||||
.and_then(|(override_name, port)| {
|
||||
override_name.first().map(|first_name| {
|
||||
let x: Box<dyn Iterator<Item = SocketAddr> + Send> =
|
||||
Box::new(iter::once(SocketAddr::new(*first_name, *port)));
|
||||
let x: Resolving = Box::pin(future::ready(Ok(x)));
|
||||
x
|
||||
})
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
let this = &mut self.inner.clone();
|
||||
Box::pin(HyperService::<Name>::call(this, name).map(|result| {
|
||||
result
|
||||
.map(|addrs| -> Addrs { Box::new(addrs) })
|
||||
.map_err(|err| -> Box<dyn StdError + Send + Sync> { Box::new(err) })
|
||||
}))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Service {
|
||||
pub fn load(db: &'static dyn Data, config: Config) -> Result<Self> {
|
||||
let keypair = db.load_keypair();
|
||||
|
@ -111,7 +159,7 @@ impl Service {
|
|||
}
|
||||
};
|
||||
|
||||
let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new()));
|
||||
let tls_name_override = Arc::new(StdRwLock::new(TlsNameMap::new()));
|
||||
|
||||
let jwt_decoding_key = config
|
||||
.jwt_secret
|
||||
|
@ -119,14 +167,8 @@ impl Service {
|
|||
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()));
|
||||
|
||||
let default_client = reqwest_client_builder(&config)?.build()?;
|
||||
let name_override = Arc::clone(&tls_name_override);
|
||||
let federation_client = reqwest_client_builder(&config)?
|
||||
.resolve_fn(move |domain| {
|
||||
let read_guard = name_override.read().unwrap();
|
||||
let (override_name, port) = read_guard.get(&domain)?;
|
||||
let first_name = override_name.get(0)?;
|
||||
Some(SocketAddr::new(*first_name, *port))
|
||||
})
|
||||
.dns_resolver(Arc::new(Resolver::new(tls_name_override.clone())))
|
||||
.build()?;
|
||||
|
||||
// Supported and stable room versions
|
||||
|
@ -160,6 +202,7 @@ impl Service {
|
|||
unstable_room_versions,
|
||||
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||
bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||
servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||
roomid_mutex_state: RwLock::new(HashMap::new()),
|
||||
roomid_mutex_insert: RwLock::new(HashMap::new()),
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
sync::{Arc, Mutex},
|
||||
sync::{Arc, Mutex as StdMutex},
|
||||
};
|
||||
|
||||
use lru_cache::LruCache;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{Config, Result};
|
||||
|
||||
|
@ -79,17 +80,17 @@ impl Services {
|
|||
state: rooms::state::Service { db },
|
||||
state_accessor: rooms::state_accessor::Service {
|
||||
db,
|
||||
server_visibility_cache: Mutex::new(LruCache::new(
|
||||
server_visibility_cache: StdMutex::new(LruCache::new(
|
||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
)),
|
||||
user_visibility_cache: Mutex::new(LruCache::new(
|
||||
user_visibility_cache: StdMutex::new(LruCache::new(
|
||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
)),
|
||||
},
|
||||
state_cache: rooms::state_cache::Service { db },
|
||||
state_compressor: rooms::state_compressor::Service {
|
||||
db,
|
||||
stateinfo_cache: Mutex::new(LruCache::new(
|
||||
stateinfo_cache: StdMutex::new(LruCache::new(
|
||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
)),
|
||||
},
|
||||
|
@ -107,7 +108,7 @@ impl Services {
|
|||
uiaa: uiaa::Service { db },
|
||||
users: users::Service {
|
||||
db,
|
||||
connections: Mutex::new(BTreeMap::new()),
|
||||
connections: StdMutex::new(BTreeMap::new()),
|
||||
},
|
||||
account_data: account_data::Service { db },
|
||||
admin: admin::Service::build(),
|
||||
|
@ -118,14 +119,8 @@ impl Services {
|
|||
globals: globals::Service::load(db, config)?,
|
||||
})
|
||||
}
|
||||
fn memory_usage(&self) -> String {
|
||||
let lazy_load_waiting = self
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_waiting
|
||||
.lock()
|
||||
.unwrap()
|
||||
.len();
|
||||
async fn memory_usage(&self) -> String {
|
||||
let lazy_load_waiting = self.rooms.lazy_loading.lazy_load_waiting.lock().await.len();
|
||||
let server_visibility_cache = self
|
||||
.rooms
|
||||
.state_accessor
|
||||
|
@ -152,15 +147,9 @@ impl Services {
|
|||
.timeline
|
||||
.lasttimelinecount_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.len();
|
||||
let roomid_spacechunk_cache = self
|
||||
.rooms
|
||||
.spaces
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.len();
|
||||
let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len();
|
||||
|
||||
format!(
|
||||
"\
|
||||
|
@ -173,13 +162,13 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
|||
"
|
||||
)
|
||||
}
|
||||
fn clear_caches(&self, amount: u32) {
|
||||
async fn clear_caches(&self, amount: u32) {
|
||||
if amount > 0 {
|
||||
self.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_waiting
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.clear();
|
||||
}
|
||||
if amount > 1 {
|
||||
|
@ -211,7 +200,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
|||
.timeline
|
||||
.lasttimelinecount_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.clear();
|
||||
}
|
||||
if amount > 5 {
|
||||
|
@ -219,7 +208,7 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}\
|
|||
.spaces
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.clear();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -385,7 +385,7 @@ impl PartialEq for PduEvent {
|
|||
}
|
||||
impl PartialOrd for PduEvent {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
self.event_id.partial_cmp(&other.event_id)
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
impl Ord for PduEvent {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
mod data;
|
||||
pub use data::Data;
|
||||
use ruma::events::AnySyncTimelineEvent;
|
||||
use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx};
|
||||
|
||||
use crate::{services, Error, PduEvent, Result};
|
||||
use bytes::BytesMut;
|
||||
|
@ -193,6 +193,12 @@ impl Service {
|
|||
pdu: &Raw<AnySyncTimelineEvent>,
|
||||
room_id: &RoomId,
|
||||
) -> Result<&'a [Action]> {
|
||||
let power_levels = PushConditionPowerLevelsCtx {
|
||||
users: power_levels.users.clone(),
|
||||
users_default: power_levels.users_default,
|
||||
notifications: power_levels.notifications.clone(),
|
||||
};
|
||||
|
||||
let ctx = PushConditionRoomCtx {
|
||||
room_id: room_id.to_owned(),
|
||||
member_count: 10_u32.into(), // TODO: get member count efficiently
|
||||
|
@ -201,9 +207,7 @@ impl Service {
|
|||
.users
|
||||
.displayname(user)?
|
||||
.unwrap_or_else(|| user.localpart().to_owned()),
|
||||
users_power_levels: power_levels.users.clone(),
|
||||
default_power_level: power_levels.users_default,
|
||||
notification_power_levels: power_levels.notifications.clone(),
|
||||
power_levels: Some(power_levels),
|
||||
};
|
||||
|
||||
Ok(ruleset.get_actions(pdu, &ctx))
|
||||
|
|
|
@ -17,29 +17,32 @@ impl Service {
|
|||
/// make sure users outside these rooms can't see them.
|
||||
pub fn update_presence(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
presence: PresenceEvent,
|
||||
_user_id: &UserId,
|
||||
_room_id: &RoomId,
|
||||
_presence: PresenceEvent,
|
||||
) -> Result<()> {
|
||||
self.db.update_presence(user_id, room_id, presence)
|
||||
// self.db.update_presence(user_id, room_id, presence)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resets the presence timeout, so the user will stay in their current presence state.
|
||||
pub fn ping_presence(&self, user_id: &UserId) -> Result<()> {
|
||||
self.db.ping_presence(user_id)
|
||||
pub fn ping_presence(&self, _user_id: &UserId) -> Result<()> {
|
||||
// self.db.ping_presence(user_id)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_last_presence_event(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
_user_id: &UserId,
|
||||
_room_id: &RoomId,
|
||||
) -> Result<Option<PresenceEvent>> {
|
||||
let last_update = match self.db.last_presence_update(user_id)? {
|
||||
Some(last) => last,
|
||||
None => return Ok(None),
|
||||
};
|
||||
// let last_update = match self.db.last_presence_update(user_id)? {
|
||||
// Some(last) => last,
|
||||
// None => return Ok(None),
|
||||
// };
|
||||
|
||||
self.db.get_presence_event(room_id, user_id, last_update)
|
||||
// self.db.get_presence_event(room_id, user_id, last_update)
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/* TODO
|
||||
|
@ -111,12 +114,12 @@ impl Service {
|
|||
}*/
|
||||
|
||||
/// Returns the most recent presence updates that happened after the event with id `since`.
|
||||
#[tracing::instrument(skip(self, since, room_id))]
|
||||
pub fn presence_since(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
since: u64,
|
||||
_room_id: &RoomId,
|
||||
_since: u64,
|
||||
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
|
||||
self.db.presence_since(room_id, since)
|
||||
// self.db.presence_since(room_id, since)
|
||||
Ok(HashMap::new())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ pub trait Data: Send + Sync {
|
|||
) -> Result<()>;
|
||||
|
||||
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn readreceipts_since<'a>(
|
||||
&'a self,
|
||||
room_id: &RoomId,
|
||||
|
|
|
@ -1,25 +1,23 @@
|
|||
/// An async function that can recursively call itself.
|
||||
type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
|
||||
|
||||
use ruma::{
|
||||
api::federation::discovery::{get_remote_server_keys, get_server_keys},
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId,
|
||||
RoomVersionId,
|
||||
};
|
||||
use std::{
|
||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
||||
pin::Pin,
|
||||
sync::{Arc, RwLock, RwLockWriteGuard},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use futures_util::{stream::FuturesUnordered, Future, StreamExt};
|
||||
use ruma::{
|
||||
api::{
|
||||
client::error::ErrorKind,
|
||||
federation::{
|
||||
discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria},
|
||||
discovery::{
|
||||
get_remote_server_keys,
|
||||
get_remote_server_keys_batch::{self, v2::QueryCriteria},
|
||||
get_server_keys,
|
||||
},
|
||||
event::{get_event, get_room_state_ids},
|
||||
membership::create_join_event,
|
||||
},
|
||||
|
@ -31,9 +29,11 @@ use ruma::{
|
|||
int,
|
||||
serde::Base64,
|
||||
state_res::{self, RoomVersion, StateMap},
|
||||
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName,
|
||||
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch,
|
||||
OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName,
|
||||
};
|
||||
use serde_json::value::RawValue as RawJsonValue;
|
||||
use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore};
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
use crate::{service::*, services, Error, PduEvent, Result};
|
||||
|
@ -92,6 +92,8 @@ impl Service {
|
|||
));
|
||||
}
|
||||
|
||||
services().rooms.event_handler.acl_check(origin, room_id)?;
|
||||
|
||||
// 1. Skip the PDU if we already have it as a timeline event
|
||||
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
|
||||
return Ok(Some(pdu_id.to_vec()));
|
||||
|
@ -117,7 +119,15 @@ impl Service {
|
|||
.ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?;
|
||||
|
||||
let (incoming_pdu, val) = self
|
||||
.handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map)
|
||||
.handle_outlier_pdu(
|
||||
origin,
|
||||
&create_event,
|
||||
event_id,
|
||||
room_id,
|
||||
value,
|
||||
false,
|
||||
pub_key_map,
|
||||
)
|
||||
.await?;
|
||||
self.check_room_id(room_id, &incoming_pdu)?;
|
||||
|
||||
|
@ -158,7 +168,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&*prev_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -174,7 +184,22 @@ impl Service {
|
|||
}
|
||||
|
||||
if errors >= 5 {
|
||||
break;
|
||||
// Timeout other events
|
||||
match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.await
|
||||
.entry((*prev_id).to_owned())
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => {
|
||||
*e.get_mut() = (Instant::now(), e.get().1 + 1)
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some((pdu, json)) = eventid_info.remove(&*prev_id) {
|
||||
|
@ -188,7 +213,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
||||
|
||||
if let Err(e) = self
|
||||
|
@ -208,7 +233,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry((*prev_id).to_owned())
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
|
@ -224,7 +249,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.remove(&room_id.to_owned());
|
||||
debug!(
|
||||
"Handling prev event {} took {}m{}s",
|
||||
|
@ -242,7 +267,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
|
||||
let r = services()
|
||||
.rooms
|
||||
|
@ -260,12 +285,13 @@ impl Service {
|
|||
.globals
|
||||
.roomid_federationhandletime
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.remove(&room_id.to_owned());
|
||||
|
||||
r
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(self, create_event, value, pub_key_map))]
|
||||
fn handle_outlier_pdu<'a>(
|
||||
&'a self,
|
||||
|
@ -274,6 +300,7 @@ impl Service {
|
|||
event_id: &'a EventId,
|
||||
room_id: &'a RoomId,
|
||||
mut value: BTreeMap<String, CanonicalJsonValue>,
|
||||
auth_events_known: bool,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>> {
|
||||
Box::pin(async move {
|
||||
|
@ -299,11 +326,8 @@ impl Service {
|
|||
let room_version =
|
||||
RoomVersion::new(room_version_id).expect("room version is supported");
|
||||
|
||||
let mut val = match ruma::signatures::verify_event(
|
||||
&pub_key_map.read().expect("RwLock is poisoned."),
|
||||
&value,
|
||||
room_version_id,
|
||||
) {
|
||||
let guard = pub_key_map.read().await;
|
||||
let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) {
|
||||
Err(e) => {
|
||||
// Drop
|
||||
warn!("Dropping bad event {}: {}", event_id, e,);
|
||||
|
@ -315,7 +339,7 @@ impl Service {
|
|||
Ok(ruma::signatures::Verified::Signatures) => {
|
||||
// Redact
|
||||
warn!("Calculated hash does not match: {}", event_id);
|
||||
match ruma::canonical_json::redact(value, room_version_id, None) {
|
||||
let obj = match ruma::canonical_json::redact(value, room_version_id, None) {
|
||||
Ok(obj) => obj,
|
||||
Err(_) => {
|
||||
return Err(Error::BadRequest(
|
||||
|
@ -323,11 +347,23 @@ impl Service {
|
|||
"Redaction failed",
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// Skip the PDU if it is redacted and we already have it as an outlier event
|
||||
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Event was redacted and we already knew about it",
|
||||
));
|
||||
}
|
||||
|
||||
obj
|
||||
}
|
||||
Ok(ruma::signatures::Verified::All) => value,
|
||||
};
|
||||
|
||||
drop(guard);
|
||||
|
||||
// Now that we have checked the signature and hashes we can add the eventID and convert
|
||||
// to our PduEvent type
|
||||
val.insert(
|
||||
|
@ -341,23 +377,25 @@ impl Service {
|
|||
|
||||
self.check_room_id(room_id, &incoming_pdu)?;
|
||||
|
||||
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
|
||||
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
|
||||
// NOTE: Step 5 is not applied anymore because it failed too often
|
||||
debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events");
|
||||
self.fetch_and_handle_outliers(
|
||||
origin,
|
||||
&incoming_pdu
|
||||
.auth_events
|
||||
.iter()
|
||||
.map(|x| Arc::from(&**x))
|
||||
.collect::<Vec<_>>(),
|
||||
create_event,
|
||||
room_id,
|
||||
room_version_id,
|
||||
pub_key_map,
|
||||
)
|
||||
.await;
|
||||
if !auth_events_known {
|
||||
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
|
||||
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
|
||||
// NOTE: Step 5 is not applied anymore because it failed too often
|
||||
debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events");
|
||||
self.fetch_and_handle_outliers(
|
||||
origin,
|
||||
&incoming_pdu
|
||||
.auth_events
|
||||
.iter()
|
||||
.map(|x| Arc::from(&**x))
|
||||
.collect::<Vec<_>>(),
|
||||
create_event,
|
||||
room_id,
|
||||
room_version_id,
|
||||
pub_key_map,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
|
||||
debug!(
|
||||
|
@ -653,13 +691,15 @@ impl Service {
|
|||
{
|
||||
Ok(res) => {
|
||||
debug!("Fetching state events at event.");
|
||||
let collect = res
|
||||
.pdu_ids
|
||||
.iter()
|
||||
.map(|x| Arc::from(&**x))
|
||||
.collect::<Vec<_>>();
|
||||
let state_vec = self
|
||||
.fetch_and_handle_outliers(
|
||||
origin,
|
||||
&res.pdu_ids
|
||||
.iter()
|
||||
.map(|x| Arc::from(&**x))
|
||||
.collect::<Vec<_>>(),
|
||||
&collect,
|
||||
create_event,
|
||||
room_id,
|
||||
room_version_id,
|
||||
|
@ -766,7 +806,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_state
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -845,14 +885,18 @@ impl Service {
|
|||
debug!("Starting soft fail auth check");
|
||||
|
||||
if soft_fail {
|
||||
services().rooms.timeline.append_incoming_pdu(
|
||||
&incoming_pdu,
|
||||
val,
|
||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_incoming_pdu(
|
||||
&incoming_pdu,
|
||||
val,
|
||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Soft fail, we keep the event as an outlier but don't add it to the timeline
|
||||
warn!("Event was soft failed: {:?}", incoming_pdu);
|
||||
|
@ -873,14 +917,18 @@ impl Service {
|
|||
// We use the `state_at_event` instead of `state_after` so we accurately
|
||||
// represent the state for this event.
|
||||
|
||||
let pdu_id = services().rooms.timeline.append_incoming_pdu(
|
||||
&incoming_pdu,
|
||||
val,
|
||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
)?;
|
||||
let pdu_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_incoming_pdu(
|
||||
&incoming_pdu,
|
||||
val,
|
||||
extremities.iter().map(|e| (**e).to_owned()).collect(),
|
||||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Appended incoming pdu");
|
||||
|
||||
|
@ -942,14 +990,21 @@ impl Service {
|
|||
|
||||
debug!("Resolving state");
|
||||
|
||||
let lock = services().globals.stateres_mutex.lock();
|
||||
let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
|
||||
let fetch_event = |id: &_| {
|
||||
let res = services().rooms.timeline.get_pdu(id);
|
||||
if let Err(e) = &res {
|
||||
error!("LOOK AT ME Failed to fetch event: {}", e);
|
||||
}
|
||||
res.ok().flatten()
|
||||
}) {
|
||||
};
|
||||
|
||||
let lock = services().globals.stateres_mutex.lock();
|
||||
let state = match state_res::resolve(
|
||||
room_version_id,
|
||||
&fork_states,
|
||||
auth_chain_sets,
|
||||
fetch_event,
|
||||
) {
|
||||
Ok(new_state) => new_state,
|
||||
Err(_) => {
|
||||
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
|
||||
|
@ -986,6 +1041,7 @@ impl Service {
|
|||
/// b. Look at outlier pdu tree
|
||||
/// c. Ask origin server over federation
|
||||
/// d. TODO: Ask other servers over federation?
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) fn fetch_and_handle_outliers<'a>(
|
||||
&'a self,
|
||||
|
@ -998,41 +1054,25 @@ impl Service {
|
|||
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>>
|
||||
{
|
||||
Box::pin(async move {
|
||||
let back_off = |id| match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
let back_off = |id| async move {
|
||||
match services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.write()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => {
|
||||
*e.get_mut() = (Instant::now(), e.get().1 + 1)
|
||||
}
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
};
|
||||
|
||||
let mut pdus = vec![];
|
||||
for id in events {
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(&**id)
|
||||
{
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration =
|
||||
Duration::from_secs(5 * 60) * (*tries) * (*tries);
|
||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
||||
}
|
||||
|
||||
if time.elapsed() < min_elapsed_duration {
|
||||
info!("Backing off from {}", id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// a. Look in the main timeline (pduid_pdu tree)
|
||||
// b. Look at outlier pdu tree
|
||||
// (get_pdu_json checks both)
|
||||
|
@ -1050,6 +1090,26 @@ impl Service {
|
|||
let mut events_all = HashSet::new();
|
||||
let mut i = 0;
|
||||
while let Some(next_id) = todo_auth_events.pop() {
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.await
|
||||
.get(&*next_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration =
|
||||
Duration::from_secs(5 * 60) * (*tries) * (*tries);
|
||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
||||
}
|
||||
|
||||
if time.elapsed() < min_elapsed_duration {
|
||||
info!("Backing off from {}", next_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if events_all.contains(&next_id) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1060,7 +1120,7 @@ impl Service {
|
|||
}
|
||||
|
||||
if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) {
|
||||
trace!("Found {} in db", id);
|
||||
trace!("Found {} in db", next_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1081,7 +1141,7 @@ impl Service {
|
|||
match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) {
|
||||
Ok(t) => t,
|
||||
Err(_) => {
|
||||
back_off((*next_id).to_owned());
|
||||
back_off((*next_id).to_owned()).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
@ -1113,12 +1173,32 @@ impl Service {
|
|||
}
|
||||
Err(_) => {
|
||||
warn!("Failed to fetch event: {}", next_id);
|
||||
back_off((*next_id).to_owned());
|
||||
back_off((*next_id).to_owned()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (next_id, value) in events_in_reverse_order.iter().rev() {
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.await
|
||||
.get(&**next_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration =
|
||||
Duration::from_secs(5 * 60) * (*tries) * (*tries);
|
||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
||||
}
|
||||
|
||||
if time.elapsed() < min_elapsed_duration {
|
||||
info!("Backing off from {}", next_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
match self
|
||||
.handle_outlier_pdu(
|
||||
origin,
|
||||
|
@ -1126,6 +1206,7 @@ impl Service {
|
|||
next_id,
|
||||
room_id,
|
||||
value.clone(),
|
||||
true,
|
||||
pub_key_map,
|
||||
)
|
||||
.await
|
||||
|
@ -1137,7 +1218,7 @@ impl Service {
|
|||
}
|
||||
Err(e) => {
|
||||
warn!("Authentication of event {} failed: {:?}", next_id, e);
|
||||
back_off((**next_id).to_owned());
|
||||
back_off((**next_id).to_owned()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1292,7 +1373,7 @@ impl Service {
|
|||
|
||||
pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
|
||||
.await
|
||||
.insert(signature_server.clone(), keys);
|
||||
}
|
||||
|
||||
|
@ -1301,7 +1382,7 @@ impl Service {
|
|||
|
||||
// Gets a list of servers for which we don't have the signing key yet. We go over
|
||||
// the PDUs and either cache the key or add it to the list that needs to be retrieved.
|
||||
fn get_server_keys_from_cache(
|
||||
async fn get_server_keys_from_cache(
|
||||
&self,
|
||||
pdu: &RawJsonValue,
|
||||
servers: &mut BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
|
||||
|
@ -1325,7 +1406,7 @@ impl Service {
|
|||
.globals
|
||||
.bad_event_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(event_id)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1401,17 +1482,19 @@ impl Service {
|
|||
> = BTreeMap::new();
|
||||
|
||||
{
|
||||
let mut pkm = pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
||||
let mut pkm = pub_key_map.write().await;
|
||||
|
||||
// Try to fetch keys, failure is okay
|
||||
// Servers we couldn't find in the cache will be added to `servers`
|
||||
for pdu in &event.room_state.state {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
let _ = self
|
||||
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
|
||||
.await;
|
||||
}
|
||||
for pdu in &event.room_state.auth_chain {
|
||||
let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm);
|
||||
let _ = self
|
||||
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
|
||||
.await;
|
||||
}
|
||||
|
||||
drop(pkm);
|
||||
|
@ -1435,9 +1518,7 @@ impl Service {
|
|||
.await
|
||||
{
|
||||
trace!("Got signing keys: {:?}", keys);
|
||||
let mut pkm = pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?;
|
||||
let mut pkm = pub_key_map.write().await;
|
||||
for k in keys.server_keys {
|
||||
let k = match k.deserialize() {
|
||||
Ok(key) => key,
|
||||
|
@ -1496,10 +1577,7 @@ impl Service {
|
|||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v.key))
|
||||
.collect();
|
||||
pub_key_map
|
||||
.write()
|
||||
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
|
||||
.insert(origin.to_string(), result);
|
||||
pub_key_map.write().await.insert(origin.to_string(), result);
|
||||
}
|
||||
}
|
||||
info!("Done handling result");
|
||||
|
@ -1530,6 +1608,11 @@ impl Service {
|
|||
}
|
||||
};
|
||||
|
||||
if acl_event_content.allow.is_empty() {
|
||||
// Ignore broken acl events
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if acl_event_content.is_allowed(server_name) {
|
||||
Ok(())
|
||||
} else {
|
||||
|
@ -1559,14 +1642,14 @@ impl Service {
|
|||
.globals
|
||||
.servername_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(origin)
|
||||
.map(|s| Arc::clone(s).acquire_owned());
|
||||
|
||||
let permit = match permit {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
let mut write = services().globals.servername_ratelimiter.write().unwrap();
|
||||
let mut write = services().globals.servername_ratelimiter.write().await;
|
||||
let s = Arc::clone(
|
||||
write
|
||||
.entry(origin.to_owned())
|
||||
|
@ -1578,24 +1661,26 @@ impl Service {
|
|||
}
|
||||
.await;
|
||||
|
||||
let back_off = |id| match services()
|
||||
.globals
|
||||
.bad_signature_ratelimiter
|
||||
.write()
|
||||
.unwrap()
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
let back_off = |id| async {
|
||||
match services()
|
||||
.globals
|
||||
.bad_signature_ratelimiter
|
||||
.write()
|
||||
.await
|
||||
.entry(id)
|
||||
{
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
};
|
||||
|
||||
if let Some((time, tries)) = services()
|
||||
.globals
|
||||
.bad_signature_ratelimiter
|
||||
.read()
|
||||
.unwrap()
|
||||
.await
|
||||
.get(&signature_ids)
|
||||
{
|
||||
// Exponential backoff
|
||||
|
@ -1702,7 +1787,7 @@ impl Service {
|
|||
|
||||
drop(permit);
|
||||
|
||||
back_off(signature_ids);
|
||||
back_off(signature_ids).await;
|
||||
|
||||
warn!("Failed to find public key for server: {}", origin);
|
||||
Err(Error::BadServerResponse(
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
mod data;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Mutex,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
pub use data::Data;
|
||||
use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
|
@ -14,6 +12,7 @@ use super::timeline::PduCount;
|
|||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub lazy_load_waiting:
|
||||
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
|
||||
}
|
||||
|
@ -32,7 +31,7 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn lazy_load_mark_sent(
|
||||
pub async fn lazy_load_mark_sent(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
device_id: &DeviceId,
|
||||
|
@ -40,7 +39,7 @@ impl Service {
|
|||
lazy_load: HashSet<OwnedUserId>,
|
||||
count: PduCount,
|
||||
) {
|
||||
self.lazy_load_waiting.lock().unwrap().insert(
|
||||
self.lazy_load_waiting.lock().await.insert(
|
||||
(
|
||||
user_id.to_owned(),
|
||||
device_id.to_owned(),
|
||||
|
@ -52,14 +51,14 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn lazy_load_confirm_delivery(
|
||||
pub async fn lazy_load_confirm_delivery(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
device_id: &DeviceId,
|
||||
room_id: &RoomId,
|
||||
since: PduCount,
|
||||
) -> Result<()> {
|
||||
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
||||
if let Some(user_ids) = self.lazy_load_waiting.lock().await.remove(&(
|
||||
user_id.to_owned(),
|
||||
device_id.to_owned(),
|
||||
room_id.to_owned(),
|
||||
|
|
|
@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId};
|
|||
|
||||
pub trait Data: Send + Sync {
|
||||
fn add_relation(&self, from: u64, to: u64) -> Result<()>;
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn relations_until<'a>(
|
||||
&'a self,
|
||||
user_id: &'a UserId,
|
||||
|
|
|
@ -40,6 +40,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn paginate_relations_with_filter(
|
||||
&self,
|
||||
sender_user: &UserId,
|
||||
|
@ -82,7 +83,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
||||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||
|
@ -106,7 +107,7 @@ impl Service {
|
|||
let events_before: Vec<_> = services()
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
.relations_until(sender_user, &room_id, target, from)?
|
||||
.relations_until(sender_user, room_id, target, from)?
|
||||
.filter(|r| {
|
||||
r.as_ref().map_or(true, |(_, pdu)| {
|
||||
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
|
||||
|
@ -129,7 +130,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, &room_id, &pdu.event_id)
|
||||
.user_can_see_event(sender_user, room_id, &pdu.event_id)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||
|
|
|
@ -4,6 +4,7 @@ use ruma::RoomId;
|
|||
pub trait Data: Send + Sync {
|
||||
fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn search_pdus<'a>(
|
||||
&'a self,
|
||||
room_id: &RoomId,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
|
||||
use lru_cache::LruCache;
|
||||
use ruma::{
|
||||
|
@ -25,6 +25,7 @@ use ruma::{
|
|||
space::SpaceRoomJoinRule,
|
||||
OwnedRoomId, RoomId, UserId,
|
||||
};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
|
@ -79,7 +80,7 @@ impl Service {
|
|||
if let Some(cached) = self
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.get_mut(¤t_room.to_owned())
|
||||
.as_ref()
|
||||
{
|
||||
|
@ -134,7 +135,7 @@ impl Service {
|
|||
|
||||
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
|
||||
.ok()
|
||||
.and_then(|c| c.via)
|
||||
.map(|c| c.via)
|
||||
.map_or(true, |v| v.is_empty())
|
||||
{
|
||||
continue;
|
||||
|
@ -171,7 +172,7 @@ impl Service {
|
|||
.transpose()?
|
||||
.unwrap_or(JoinRule::Invite);
|
||||
|
||||
self.roomid_spacechunk_cache.lock().unwrap().insert(
|
||||
self.roomid_spacechunk_cache.lock().await.insert(
|
||||
current_room.clone(),
|
||||
Some(CachedSpaceChunk {
|
||||
chunk,
|
||||
|
@ -185,7 +186,9 @@ impl Service {
|
|||
stack.push(children_ids);
|
||||
}
|
||||
} else {
|
||||
let server = current_room.server_name();
|
||||
let server = current_room
|
||||
.server_name()
|
||||
.expect("Room IDs should always have a server name");
|
||||
if server == services().globals.server_name() {
|
||||
continue;
|
||||
}
|
||||
|
@ -193,11 +196,11 @@ impl Service {
|
|||
// Early return so the client can see some data already
|
||||
break;
|
||||
}
|
||||
warn!("Asking {server} for /hierarchy");
|
||||
debug!("Asking {server} for /hierarchy");
|
||||
if let Ok(response) = services()
|
||||
.sending
|
||||
.send_federation_request(
|
||||
&server,
|
||||
server,
|
||||
federation::space::get_hierarchy::v1::Request {
|
||||
room_id: current_room.to_owned(),
|
||||
suggested_only,
|
||||
|
@ -235,7 +238,7 @@ impl Service {
|
|||
.room
|
||||
.allowed_room_ids
|
||||
.into_iter()
|
||||
.map(|room| AllowRule::room_membership(room))
|
||||
.map(AllowRule::room_membership)
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
@ -245,7 +248,7 @@ impl Service {
|
|||
.room
|
||||
.allowed_room_ids
|
||||
.into_iter()
|
||||
.map(|room| AllowRule::room_membership(room))
|
||||
.map(AllowRule::room_membership)
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
@ -263,7 +266,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
self.roomid_spacechunk_cache.lock().unwrap().insert(
|
||||
self.roomid_spacechunk_cache.lock().await.insert(
|
||||
current_room.clone(),
|
||||
Some(CachedSpaceChunk {
|
||||
chunk,
|
||||
|
@ -287,7 +290,7 @@ impl Service {
|
|||
} else {
|
||||
self.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(current_room.clone(), None);
|
||||
}
|
||||
}
|
||||
|
@ -313,7 +316,7 @@ impl Service {
|
|||
canonical_alias: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomCanonicalAliasEventContent| c.alias)
|
||||
|
@ -321,11 +324,11 @@ impl Service {
|
|||
Error::bad_database("Invalid canonical alias event in database.")
|
||||
})
|
||||
})?,
|
||||
name: services().rooms.state_accessor.get_name(&room_id)?,
|
||||
name: services().rooms.state_accessor.get_name(room_id)?,
|
||||
num_joined_members: services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_joined_count(&room_id)?
|
||||
.room_joined_count(room_id)?
|
||||
.unwrap_or_else(|| {
|
||||
warn!("Room {} has no member count", room_id);
|
||||
0
|
||||
|
@ -336,7 +339,7 @@ impl Service {
|
|||
topic: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomTopic, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomTopicEventContent| Some(c.topic))
|
||||
|
@ -348,7 +351,7 @@ impl Service {
|
|||
world_readable: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.map_or(Ok(false), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomHistoryVisibilityEventContent| {
|
||||
|
@ -363,7 +366,7 @@ impl Service {
|
|||
guest_can_join: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomGuestAccess, "")?
|
||||
.map_or(Ok(false), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomGuestAccessEventContent| {
|
||||
|
@ -376,7 +379,7 @@ impl Service {
|
|||
avatar_url: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||
.map(|s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomAvatarEventContent| c.url)
|
||||
|
@ -389,7 +392,7 @@ impl Service {
|
|||
let join_rule = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")?
|
||||
.map(|s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomJoinRulesEventContent| c.join_rule)
|
||||
|
@ -415,7 +418,7 @@ impl Service {
|
|||
room_type: services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
|
||||
.map(|s| {
|
||||
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(|e| {
|
||||
error!("Invalid room create event in database: {}", e);
|
||||
|
@ -455,7 +458,7 @@ impl Service {
|
|||
SpaceRoomJoinRule::Invite => services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, &room_id)?,
|
||||
.is_joined(sender_user, room_id)?,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
|
@ -479,17 +482,14 @@ impl Service {
|
|||
match join_rule {
|
||||
JoinRule::Restricted(r) => {
|
||||
for rule in &r.allow {
|
||||
match rule {
|
||||
join_rules::AllowRule::RoomMembership(rm) => {
|
||||
if let Ok(true) = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, &rm.room_id)
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
if let join_rules::AllowRule::RoomMembership(rm) = rule {
|
||||
if let Ok(true) = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_joined(sender_user, &rm.room_id)
|
||||
{
|
||||
return Ok(true);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&new)
|
||||
.parse_compressed_state_event(new)
|
||||
.ok()
|
||||
.map(|(_, id)| id)
|
||||
}) {
|
||||
|
@ -95,7 +95,7 @@ impl Service {
|
|||
.spaces
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.remove(&pdu.room_id);
|
||||
}
|
||||
_ => continue,
|
||||
|
@ -412,7 +412,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.parse_compressed_state_event(&compressed)
|
||||
.parse_compressed_state_event(compressed)
|
||||
.ok()
|
||||
})
|
||||
.filter_map(|(shortstatekey, event_id)| {
|
||||
|
|
|
@ -16,7 +16,7 @@ use ruma::{
|
|||
},
|
||||
StateEventType,
|
||||
},
|
||||
EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
|
@ -180,7 +180,7 @@ impl Service {
|
|||
return Ok(*visibility);
|
||||
}
|
||||
|
||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
||||
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||
|
||||
let history_visibility = self
|
||||
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
|
||||
|
@ -197,11 +197,11 @@ impl Service {
|
|||
HistoryVisibility::Shared => currently_member,
|
||||
HistoryVisibility::Invited => {
|
||||
// Allow if any member on requesting server was AT LEAST invited, else deny
|
||||
self.user_was_invited(shortstatehash, &user_id)
|
||||
self.user_was_invited(shortstatehash, user_id)
|
||||
}
|
||||
HistoryVisibility::Joined => {
|
||||
// Allow if any member on requested server was joined, else deny
|
||||
self.user_was_joined(shortstatehash, &user_id)
|
||||
self.user_was_joined(shortstatehash, user_id)
|
||||
}
|
||||
_ => {
|
||||
error!("Unknown history visibility {history_visibility}");
|
||||
|
@ -221,10 +221,10 @@ impl Service {
|
|||
/// the room's history_visibility at that event's state.
|
||||
#[tracing::instrument(skip(self, user_id, room_id))]
|
||||
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||
let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?;
|
||||
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
|
||||
|
||||
let history_visibility = self
|
||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||
.map_or(Ok(HistoryVisibility::Shared), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomHistoryVisibilityEventContent| c.history_visibility)
|
||||
|
@ -276,20 +276,26 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomName, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map(|c: RoomNameEventContent| c.name)
|
||||
.map_err(|_| Error::bad_database("Invalid room name event in database."))
|
||||
.map(|c: RoomNameEventContent| Some(c.name))
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
"Invalid room name event in database for room {}. {}",
|
||||
room_id, e
|
||||
);
|
||||
Error::bad_database("Invalid room name event in database.")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_avatar(&self, room_id: &RoomId) -> Result<Option<RoomAvatarEventContent>> {
|
||||
pub fn get_avatar(&self, room_id: &RoomId) -> Result<JsOption<RoomAvatarEventContent>> {
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
||||
.map_or(Ok(None), |s| {
|
||||
.room_state_get(room_id, &StateEventType::RoomAvatar, "")?
|
||||
.map_or(Ok(JsOption::Undefined), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid room avatar event in database."))
|
||||
})
|
||||
|
@ -303,7 +309,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())?
|
||||
.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?
|
||||
.map_or(Ok(None), |s| {
|
||||
serde_json::from_str(s.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid room member event in database."))
|
||||
|
|
|
@ -78,6 +78,7 @@ pub trait Data: Send + Sync {
|
|||
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>;
|
||||
|
||||
/// Returns an iterator over all rooms a user was invited to.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn rooms_invited<'a>(
|
||||
&'a self,
|
||||
user_id: &UserId,
|
||||
|
@ -96,6 +97,7 @@ pub trait Data: Send + Sync {
|
|||
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>>;
|
||||
|
||||
/// Returns an iterator over all rooms a user left.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn rooms_left<'a>(
|
||||
&'a self,
|
||||
user_id: &UserId,
|
||||
|
|
|
@ -16,6 +16,7 @@ use self::data::StateDiff;
|
|||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub stateinfo_cache: Mutex<
|
||||
LruCache<
|
||||
u64,
|
||||
|
@ -33,6 +34,7 @@ pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
|
|||
|
||||
impl Service {
|
||||
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn load_shortstatehash_info(
|
||||
&self,
|
||||
|
@ -131,6 +133,7 @@ impl Service {
|
|||
/// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid
|
||||
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
|
||||
/// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[tracing::instrument(skip(
|
||||
self,
|
||||
statediffnew,
|
||||
|
@ -164,7 +167,7 @@ impl Service {
|
|||
for removed in statediffremoved.iter() {
|
||||
if !parent_new.remove(removed) {
|
||||
// It was not added in the parent and we removed it
|
||||
parent_removed.insert(removed.clone());
|
||||
parent_removed.insert(*removed);
|
||||
}
|
||||
// Else it was added in the parent and we removed it again. We can forget this change
|
||||
}
|
||||
|
@ -172,7 +175,7 @@ impl Service {
|
|||
for new in statediffnew.iter() {
|
||||
if !parent_removed.remove(new) {
|
||||
// It was not touched in the parent and we added it
|
||||
parent_new.insert(new.clone());
|
||||
parent_new.insert(*new);
|
||||
}
|
||||
// Else it was removed in the parent and we added it again. We can forget this change
|
||||
}
|
||||
|
@ -217,7 +220,7 @@ impl Service {
|
|||
for removed in statediffremoved.iter() {
|
||||
if !parent_new.remove(removed) {
|
||||
// It was not added in the parent and we removed it
|
||||
parent_removed.insert(removed.clone());
|
||||
parent_removed.insert(*removed);
|
||||
}
|
||||
// Else it was added in the parent and we removed it again. We can forget this change
|
||||
}
|
||||
|
@ -225,7 +228,7 @@ impl Service {
|
|||
for new in statediffnew.iter() {
|
||||
if !parent_removed.remove(new) {
|
||||
// It was not touched in the parent and we added it
|
||||
parent_new.insert(new.clone());
|
||||
parent_new.insert(*new);
|
||||
}
|
||||
// Else it was removed in the parent and we added it again. We can forget this change
|
||||
}
|
||||
|
@ -253,6 +256,7 @@ impl Service {
|
|||
}
|
||||
|
||||
/// Returns the new shortstatehash, and the state diff from the previous room state
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn save_state(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
|
|
|
@ -2,6 +2,7 @@ use crate::{PduEvent, Result};
|
|||
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn threads_until<'a>(
|
||||
&'a self,
|
||||
user_id: &'a UserId,
|
||||
|
|
|
@ -26,7 +26,7 @@ impl Service {
|
|||
self.db.threads_until(user_id, room_id, until, include)
|
||||
}
|
||||
|
||||
pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
|
||||
pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
|
||||
let root_id = &services()
|
||||
.rooms
|
||||
.timeline
|
||||
|
@ -103,7 +103,7 @@ impl Service {
|
|||
}
|
||||
|
||||
let mut users = Vec::new();
|
||||
if let Some(userids) = self.db.get_participants(&root_id)? {
|
||||
if let Some(userids) = self.db.get_participants(root_id)? {
|
||||
users.extend_from_slice(&userids);
|
||||
users.push(pdu.sender.clone());
|
||||
} else {
|
||||
|
|
|
@ -66,6 +66,7 @@ pub trait Data: Send + Sync {
|
|||
|
||||
/// Returns an iterator over all events and their tokens in a room that happened before the
|
||||
/// event with id `until` in reverse-chronological order.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn pdus_until<'a>(
|
||||
&'a self,
|
||||
user_id: &UserId,
|
||||
|
@ -75,6 +76,7 @@ pub trait Data: Send + Sync {
|
|||
|
||||
/// Returns an iterator over all events in a room that happened after the event with id `from`
|
||||
/// in chronological order.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn pdus_after<'a>(
|
||||
&'a self,
|
||||
user_id: &UserId,
|
||||
|
|
|
@ -2,12 +2,8 @@ mod data;
|
|||
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::{BTreeMap, HashMap},
|
||||
};
|
||||
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
pub use data::Data;
|
||||
|
@ -28,11 +24,11 @@ use ruma::{
|
|||
state_res,
|
||||
state_res::{Event, RoomVersion},
|
||||
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
||||
OwnedServerName, RoomAliasId, RoomId, ServerName, UserId,
|
||||
OwnedServerName, RoomId, ServerName, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use tokio::sync::MutexGuard;
|
||||
use tokio::sync::{Mutex, MutexGuard, RwLock};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::{
|
||||
|
@ -58,8 +54,8 @@ impl PduCount {
|
|||
}
|
||||
|
||||
pub fn try_from_string(token: &str) -> Result<Self> {
|
||||
if token.starts_with('-') {
|
||||
token[1..].parse().map(PduCount::Backfilled)
|
||||
if let Some(stripped) = token.strip_prefix('-') {
|
||||
stripped.parse().map(PduCount::Backfilled)
|
||||
} else {
|
||||
token.parse().map(PduCount::Normal)
|
||||
}
|
||||
|
@ -90,18 +86,6 @@ impl Ord for PduCount {
|
|||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn comparisons() {
|
||||
assert!(PduCount::Normal(1) < PduCount::Normal(2));
|
||||
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
|
||||
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
|
||||
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
|
@ -112,7 +96,7 @@ pub struct Service {
|
|||
impl Service {
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
|
||||
self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
|
||||
self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
|
||||
.next()
|
||||
.map(|o| o.map(|(_, p)| Arc::new(p)))
|
||||
.transpose()
|
||||
|
@ -213,7 +197,7 @@ impl Service {
|
|||
///
|
||||
/// Returns pdu id
|
||||
#[tracing::instrument(skip(self, pdu, pdu_json, leaves))]
|
||||
pub fn append_pdu<'a>(
|
||||
pub async fn append_pdu<'a>(
|
||||
&self,
|
||||
pdu: &PduEvent,
|
||||
mut pdu_json: CanonicalJsonObject,
|
||||
|
@ -275,11 +259,11 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(pdu.room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
|
||||
let count1 = services().globals.next_count()?;
|
||||
// Mark as read first so the sending client doesn't get a notification even if appending
|
||||
|
@ -320,12 +304,25 @@ impl Service {
|
|||
let mut notifies = Vec::new();
|
||||
let mut highlights = Vec::new();
|
||||
|
||||
for user in services()
|
||||
let mut push_target = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_our_real_users(&pdu.room_id)?
|
||||
.iter()
|
||||
{
|
||||
.get_our_real_users(&pdu.room_id)?;
|
||||
|
||||
if pdu.kind == TimelineEventType::RoomMember {
|
||||
if let Some(state_key) = &pdu.state_key {
|
||||
let target_user_id = UserId::parse(state_key.clone())
|
||||
.expect("This state_key was previously validated");
|
||||
|
||||
if !push_target.contains(&target_user_id) {
|
||||
let mut target = push_target.as_ref().clone();
|
||||
target.insert(target_user_id);
|
||||
push_target = Arc::new(target);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for user in push_target.iter() {
|
||||
// Don't notify the user of their own events
|
||||
if user == &pdu.sender {
|
||||
continue;
|
||||
|
@ -394,7 +391,7 @@ impl Service {
|
|||
.spaces
|
||||
.roomid_spacechunk_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.await
|
||||
.remove(&pdu.room_id);
|
||||
}
|
||||
}
|
||||
|
@ -447,26 +444,22 @@ impl Service {
|
|||
.search
|
||||
.index_pdu(shortroomid, &pdu_id, &body)?;
|
||||
|
||||
let admin_room = services().rooms.alias.resolve_local_alias(
|
||||
<&RoomAliasId>::try_from(
|
||||
format!("#admins:{}", services().globals.server_name()).as_str(),
|
||||
)
|
||||
.expect("#admins:server_name is a valid room alias"),
|
||||
)?;
|
||||
let server_user = format!("@conduit:{}", services().globals.server_name());
|
||||
|
||||
let to_conduit = body.starts_with(&format!("{server_user}: "))
|
||||
|| body.starts_with(&format!("{server_user} "))
|
||||
|| body == format!("{server_user}:")
|
||||
|| body == format!("{server_user}");
|
||||
|| body == server_user;
|
||||
|
||||
// This will evaluate to false if the emergency password is set up so that
|
||||
// the administrator can execute commands as conduit
|
||||
let from_conduit = pdu.sender == server_user
|
||||
&& services().globals.emergency_password().is_none();
|
||||
|
||||
if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) {
|
||||
services().admin.process_message(body);
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if to_conduit && !from_conduit && admin_room == pdu.room_id {
|
||||
services().admin.process_message(body);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -809,7 +802,7 @@ impl Service {
|
|||
/// Creates a new persisted data unit and adds it to a room. This function takes a
|
||||
/// roomid_mutex_state, meaning that only this function is able to mutate the room state.
|
||||
#[tracing::instrument(skip(self, state_lock))]
|
||||
pub fn build_and_append_pdu(
|
||||
pub async fn build_and_append_pdu(
|
||||
&self,
|
||||
pdu_builder: PduBuilder,
|
||||
sender: &UserId,
|
||||
|
@ -819,89 +812,85 @@ impl Service {
|
|||
let (pdu, pdu_json) =
|
||||
self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
|
||||
|
||||
let admin_room = services().rooms.alias.resolve_local_alias(
|
||||
<&RoomAliasId>::try_from(
|
||||
format!("#admins:{}", services().globals.server_name()).as_str(),
|
||||
)
|
||||
.expect("#admins:server_name is a valid room alias"),
|
||||
)?;
|
||||
if admin_room.filter(|v| v == room_id).is_some() {
|
||||
match pdu.event_type() {
|
||||
TimelineEventType::RoomEncryption => {
|
||||
warn!("Encryption is not allowed in the admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Encryption is not allowed in the admins room.",
|
||||
));
|
||||
if let Some(admin_room) = services().admin.get_admin_room()? {
|
||||
if admin_room == room_id {
|
||||
match pdu.event_type() {
|
||||
TimelineEventType::RoomEncryption => {
|
||||
warn!("Encryption is not allowed in the admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Encryption is not allowed in the admins room.",
|
||||
));
|
||||
}
|
||||
TimelineEventType::RoomMember => {
|
||||
#[derive(Deserialize)]
|
||||
struct ExtractMembership {
|
||||
membership: MembershipState,
|
||||
}
|
||||
|
||||
let target = pdu
|
||||
.state_key()
|
||||
.filter(|v| v.starts_with('@'))
|
||||
.unwrap_or(sender.as_str());
|
||||
let server_name = services().globals.server_name();
|
||||
let server_user = format!("@conduit:{}", server_name);
|
||||
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||
|
||||
if content.membership == MembershipState::Leave {
|
||||
if target == server_user {
|
||||
warn!("Conduit user cannot leave from admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Conduit user cannot leave from admins room.",
|
||||
));
|
||||
}
|
||||
|
||||
let count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(|m| m.ok())
|
||||
.filter(|m| m.server_name() == server_name)
|
||||
.filter(|m| m != target)
|
||||
.count();
|
||||
if count < 2 {
|
||||
warn!("Last admin cannot leave from admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Last admin cannot leave from admins room.",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
|
||||
if target == server_user {
|
||||
warn!("Conduit user cannot be banned in admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Conduit user cannot be banned in admins room.",
|
||||
));
|
||||
}
|
||||
|
||||
let count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(|m| m.ok())
|
||||
.filter(|m| m.server_name() == server_name)
|
||||
.filter(|m| m != target)
|
||||
.count();
|
||||
if count < 2 {
|
||||
warn!("Last admin cannot be banned in admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Last admin cannot be banned in admins room.",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
TimelineEventType::RoomMember => {
|
||||
#[derive(Deserialize)]
|
||||
struct ExtractMembership {
|
||||
membership: MembershipState,
|
||||
}
|
||||
|
||||
let target = pdu
|
||||
.state_key()
|
||||
.filter(|v| v.starts_with("@"))
|
||||
.unwrap_or(sender.as_str());
|
||||
let server_name = services().globals.server_name();
|
||||
let server_user = format!("@conduit:{}", server_name);
|
||||
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||
|
||||
if content.membership == MembershipState::Leave {
|
||||
if target == &server_user {
|
||||
warn!("Conduit user cannot leave from admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Conduit user cannot leave from admins room.",
|
||||
));
|
||||
}
|
||||
|
||||
let count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(|m| m.ok())
|
||||
.filter(|m| m.server_name() == server_name)
|
||||
.filter(|m| m != target)
|
||||
.count();
|
||||
if count < 2 {
|
||||
warn!("Last admin cannot leave from admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Last admin cannot leave from admins room.",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
|
||||
if target == &server_user {
|
||||
warn!("Conduit user cannot be banned in admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Conduit user cannot be banned in admins room.",
|
||||
));
|
||||
}
|
||||
|
||||
let count = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.filter_map(|m| m.ok())
|
||||
.filter(|m| m.server_name() == server_name)
|
||||
.filter(|m| m != target)
|
||||
.count();
|
||||
if count < 2 {
|
||||
warn!("Last admin cannot be banned in admins room");
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::Forbidden,
|
||||
"Last admin cannot be banned in admins room.",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -909,14 +898,16 @@ impl Service {
|
|||
// pdu without it's state. This is okay because append_pdu can't fail.
|
||||
let statehashid = services().rooms.state.append_to_state(&pdu)?;
|
||||
|
||||
let pdu_id = self.append_pdu(
|
||||
&pdu,
|
||||
pdu_json,
|
||||
// Since this PDU references all pdu_leaves we can update the leaves
|
||||
// of the room
|
||||
vec![(*pdu.event_id).to_owned()],
|
||||
state_lock,
|
||||
)?;
|
||||
let pdu_id = self
|
||||
.append_pdu(
|
||||
&pdu,
|
||||
pdu_json,
|
||||
// Since this PDU references all pdu_leaves we can update the leaves
|
||||
// of the room
|
||||
vec![(*pdu.event_id).to_owned()],
|
||||
state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// We set the room state after inserting the pdu, so that we never have a moment in time
|
||||
// where events in the current room state do not exist
|
||||
|
@ -954,7 +945,7 @@ impl Service {
|
|||
/// Append the incoming event setting the state snapshot to the state from the
|
||||
/// server that sent the event.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub fn append_incoming_pdu<'a>(
|
||||
pub async fn append_incoming_pdu<'a>(
|
||||
&self,
|
||||
pdu: &PduEvent,
|
||||
pdu_json: CanonicalJsonObject,
|
||||
|
@ -984,11 +975,11 @@ impl Service {
|
|||
return Ok(None);
|
||||
}
|
||||
|
||||
let pdu_id =
|
||||
services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?;
|
||||
let pdu_id = services()
|
||||
.rooms
|
||||
.timeline
|
||||
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)
|
||||
.await?;
|
||||
|
||||
Ok(Some(pdu_id))
|
||||
}
|
||||
|
@ -1048,7 +1039,7 @@ impl Service {
|
|||
#[tracing::instrument(skip(self, room_id))]
|
||||
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
|
||||
let first_pdu = self
|
||||
.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
|
||||
.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)?
|
||||
.next()
|
||||
.expect("Room is not empty")?;
|
||||
|
||||
|
@ -1060,7 +1051,7 @@ impl Service {
|
|||
let power_levels: RoomPowerLevelsEventContent = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
|
||||
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
|
||||
.map(|ev| {
|
||||
serde_json::from_str(ev.content.get())
|
||||
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
|
||||
|
@ -1091,11 +1082,9 @@ impl Service {
|
|||
.await;
|
||||
match response {
|
||||
Ok(response) => {
|
||||
let mut pub_key_map = RwLock::new(BTreeMap::new());
|
||||
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||
for pdu in response.pdus {
|
||||
if let Err(e) = self
|
||||
.backfill_pdu(backfill_server, pdu, &mut pub_key_map)
|
||||
.await
|
||||
if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await
|
||||
{
|
||||
warn!("Failed to add backfilled pdu: {e}");
|
||||
}
|
||||
|
@ -1127,7 +1116,7 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_federation
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.or_default(),
|
||||
);
|
||||
|
@ -1142,7 +1131,7 @@ impl Service {
|
|||
services()
|
||||
.rooms
|
||||
.event_handler
|
||||
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map)
|
||||
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map)
|
||||
.await?;
|
||||
|
||||
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
|
||||
|
@ -1159,11 +1148,11 @@ impl Service {
|
|||
.globals
|
||||
.roomid_mutex_insert
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.entry(room_id.clone())
|
||||
.or_default(),
|
||||
);
|
||||
let insert_lock = mutex_insert.lock().unwrap();
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
|
||||
let count = services().globals.next_count()?;
|
||||
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
|
||||
|
@ -1175,24 +1164,21 @@ impl Service {
|
|||
|
||||
drop(insert_lock);
|
||||
|
||||
match pdu.kind {
|
||||
TimelineEventType::RoomMessage => {
|
||||
#[derive(Deserialize)]
|
||||
struct ExtractBody {
|
||||
body: Option<String>,
|
||||
}
|
||||
|
||||
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||
|
||||
if let Some(body) = content.body {
|
||||
services()
|
||||
.rooms
|
||||
.search
|
||||
.index_pdu(shortroomid, &pdu_id, &body)?;
|
||||
}
|
||||
if pdu.kind == TimelineEventType::RoomMessage {
|
||||
#[derive(Deserialize)]
|
||||
struct ExtractBody {
|
||||
body: Option<String>,
|
||||
}
|
||||
|
||||
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
|
||||
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
|
||||
|
||||
if let Some(body) = content.body {
|
||||
services()
|
||||
.rooms
|
||||
.search
|
||||
.index_pdu(shortroomid, &pdu_id, &body)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
drop(mutex_lock);
|
||||
|
||||
|
@ -1200,3 +1186,16 @@ impl Service {
|
|||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn comparisons() {
|
||||
assert!(PduCount::Normal(1) < PduCount::Normal(2));
|
||||
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
|
||||
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
|
||||
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ use crate::Result;
|
|||
use super::{OutgoingKind, SendingEventType};
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn active_requests<'a>(
|
||||
&'a self,
|
||||
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a>;
|
||||
|
|
|
@ -131,7 +131,7 @@ impl Service {
|
|||
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) {
|
||||
let entry = initial_transactions
|
||||
.entry(outgoing_kind.clone())
|
||||
.or_insert_with(Vec::new);
|
||||
.or_default();
|
||||
|
||||
if entry.len() > 30 {
|
||||
warn!(
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
mod data;
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
collections::{BTreeMap, BTreeSet},
|
||||
mem,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
@ -28,12 +28,13 @@ use crate::{services, Error, Result};
|
|||
pub struct SlidingSyncCache {
|
||||
lists: BTreeMap<String, SyncRequestList>,
|
||||
subscriptions: BTreeMap<OwnedRoomId, sync_events::v4::RoomSubscription>,
|
||||
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, bool>>,
|
||||
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, u64>>, // For every room, the roomsince number
|
||||
extensions: ExtensionsConfig,
|
||||
}
|
||||
|
||||
pub struct Service {
|
||||
pub db: &'static dyn Data,
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub connections:
|
||||
Mutex<BTreeMap<(OwnedUserId, OwnedDeviceId, String), Arc<Mutex<SlidingSyncCache>>>>,
|
||||
}
|
||||
|
@ -61,7 +62,7 @@ impl Service {
|
|||
user_id: OwnedUserId,
|
||||
device_id: OwnedDeviceId,
|
||||
request: &mut sync_events::v4::Request,
|
||||
) -> BTreeMap<String, BTreeMap<OwnedRoomId, bool>> {
|
||||
) -> BTreeMap<String, BTreeMap<OwnedRoomId, u64>> {
|
||||
let Some(conn_id) = request.conn_id.clone() else {
|
||||
return BTreeMap::new();
|
||||
};
|
||||
|
@ -127,6 +128,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
(_, Some(cached_filters)) => list.filters = Some(cached_filters),
|
||||
(Some(list_filters), _) => list.filters = Some(list_filters.clone()),
|
||||
(_, _) => {}
|
||||
}
|
||||
if list.bump_event_types.is_empty() {
|
||||
|
@ -136,12 +138,18 @@ impl Service {
|
|||
cached.lists.insert(list_id.clone(), list.clone());
|
||||
}
|
||||
|
||||
cached
|
||||
.subscriptions
|
||||
.extend(request.room_subscriptions.clone().into_iter());
|
||||
request
|
||||
.room_subscriptions
|
||||
.extend(cached.subscriptions.clone().into_iter());
|
||||
cached.subscriptions.extend(
|
||||
request
|
||||
.room_subscriptions
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone())),
|
||||
);
|
||||
request.room_subscriptions.extend(
|
||||
cached
|
||||
.subscriptions
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone())),
|
||||
);
|
||||
|
||||
request.extensions.e2ee.enabled = request
|
||||
.extensions
|
||||
|
@ -210,7 +218,8 @@ impl Service {
|
|||
device_id: OwnedDeviceId,
|
||||
conn_id: String,
|
||||
list_id: String,
|
||||
new_cached_rooms: BTreeMap<OwnedRoomId, bool>,
|
||||
new_cached_rooms: BTreeSet<OwnedRoomId>,
|
||||
globalsince: u64,
|
||||
) {
|
||||
let mut cache = self.connections.lock().unwrap();
|
||||
let cached = Arc::clone(
|
||||
|
@ -228,7 +237,20 @@ impl Service {
|
|||
let cached = &mut cached.lock().unwrap();
|
||||
drop(cache);
|
||||
|
||||
cached.known_rooms.insert(list_id, new_cached_rooms);
|
||||
for (roomid, lastsince) in cached
|
||||
.known_rooms
|
||||
.entry(list_id.clone())
|
||||
.or_default()
|
||||
.iter_mut()
|
||||
{
|
||||
if !new_cached_rooms.contains(roomid) {
|
||||
*lastsince = 0;
|
||||
}
|
||||
}
|
||||
let list = cached.known_rooms.entry(list_id).or_default();
|
||||
for roomid in new_cached_rooms {
|
||||
list.insert(roomid, globalsince);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if account is deactivated
|
||||
|
|
|
@ -116,9 +116,11 @@ impl Error {
|
|||
Self::BadRequest(kind, _) => (
|
||||
kind.clone(),
|
||||
match kind {
|
||||
Forbidden | GuestAccessForbidden | ThreepidAuthFailed | ThreepidDenied => {
|
||||
StatusCode::FORBIDDEN
|
||||
}
|
||||
WrongRoomKeysVersion { .. }
|
||||
| Forbidden
|
||||
| GuestAccessForbidden
|
||||
| ThreepidAuthFailed
|
||||
| ThreepidDenied => StatusCode::FORBIDDEN,
|
||||
Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED,
|
||||
NotFound | Unrecognized => StatusCode::NOT_FOUND,
|
||||
LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue