diff --git a/.cargo/audit.toml b/.cargo/audit.toml deleted file mode 100644 index 37148cfb..00000000 --- a/.cargo/audit.toml +++ /dev/null @@ -1,27 +0,0 @@ -[advisories] -ignore = ["RUSTSEC-2024-0436", "RUSTSEC-2025-0014"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] -informational_warnings = [] # warn for categories of informational advisories -severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") - -# Advisory Database Configuration -[database] -path = "~/.cargo/advisory-db" # Path where advisory git repo will be cloned -url = "https://github.com/RustSec/advisory-db.git" # URL to git repo -fetch = true # Perform a `git fetch` before auditing (default: true) -stale = false # Allow stale advisory DB (i.e. no commits for 90 days, default: false) - -# Output Configuration -[output] -deny = ["warnings", "unmaintained", "unsound", "yanked"] # exit on error if unmaintained dependencies are found -format = "terminal" # "terminal" (human readable report) or "json" -quiet = false # Only print information on error -show_tree = true # Show inverse dependency trees along with advisories (default: true) - -# Target Configuration -[target] -arch = ["x86_64", "aarch64"] # Ignore advisories for CPU architectures other than these -os = ["linux", "windows", "macos"] # Ignore advisories for operating systems other than these - -[yanked] -enabled = true # Warn for yanked crates in Cargo.lock (default: true) -update_index = true # Auto-update the crates.io index (default: true) diff --git a/.dockerignore b/.dockerignore index 453634df..c78ddbac 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,10 @@ # Local build and dev artifacts -target/ +target +tests # Docker files Dockerfile* -docker/ +docker-compose* # IDE files .vscode diff --git a/.editorconfig b/.editorconfig index 2d7438a4..dba82b95 100644 --- a/.editorconfig +++ b/.editorconfig @@ -11,14 +11,5 @@ indent_style = space insert_final_newline = true max_line_length = 120 -[*.{md,txt}] -indent_style = space -indent_size = 4 -max_line_length = 80 - [*.nix] -indent_size = 2 - -[*.rs] -indent_style = tab -max_line_length = 98 +indent_size = 2 \ No newline at end of file diff --git a/.envrc b/.envrc index 952ec2f8..403a9bdf 100644 --- a/.envrc +++ b/.envrc @@ -1,7 +1,5 @@ #!/usr/bin/env bash -dotenv_if_exists - -use flake ".#${DIRENV_DEVSHELL:-default}" +use flake PATH_add bin diff --git a/.forgejo/workflows/build-alpine.yml b/.forgejo/workflows/build-alpine.yml deleted file mode 100644 index b1757a60..00000000 --- a/.forgejo/workflows/build-alpine.yml +++ /dev/null @@ -1,49 +0,0 @@ -on: - - workflow-dispatch - - push - -jobs: - build: - runs-on: ubuntu-latest - container: - image: alpine:edge - - steps: - - name: set up dependencies - run: | - apk update - apk upgrade - apk add nodejs git alpine-sdk - - uses: actions/checkout@v4 - name: checkout the alpine dir - with: - sparse-checkout: "alpine/" - - # - uses: actions/checkout@v4 - # name: checkout the rest in the alpine dir - # with: - # path: 'alpine/continuwuity' - - name: set up user - run: adduser -DG abuild ci - - - name: set up keys - run: | - pwd - mkdir ~/.abuild - echo "${{ secrets.abuild_privkey }}" > ~/.abuild/ci@continuwuity.rsa - echo "${{ secrets.abuild_pubkey }}" > ~/.abuild/ci@continuwuity.rsa.pub - echo $HOME - echo 'PACKAGER_PRIVKEY="/root/.abuild/ci@continuwuity.rsa"' > ~/.abuild/abuild.conf - ls ~/.abuild - - - name: go go gadget abuild - run: | - cd alpine - # modify the APKBUILD to use the current branch instead of the release - # note that it seems to require the repo to be public (as you'll get - # a 404 even if the token is provided) - export ARCHIVE_URL="${{ github.server_url }}/${{ github.repository }}/archive/${{ github.ref_name }}.tar.gz" - echo $ARCHIVE_URL - sed -i '/^source=/c\source="'"$ARCHIVE_URL" APKBUILD - abuild -F checksum - abuild -Fr diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml deleted file mode 100644 index 7d95a317..00000000 --- a/.forgejo/workflows/documentation.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: Documentation - -on: - pull_request: - push: - branches: - - main - tags: - - "v*" - workflow_dispatch: - -concurrency: - group: "pages-${{ github.ref }}" - cancel-in-progress: true - -jobs: - docs: - name: Build and Deploy Documentation - runs-on: ubuntu-latest - - steps: - - name: Sync repository - uses: https://github.com/actions/checkout@v4 - with: - persist-credentials: false - fetch-depth: 0 - - - name: Setup mdBook - uses: https://github.com/peaceiris/actions-mdbook@v2 - with: - mdbook-version: "latest" - - - name: Build mdbook - run: mdbook build - - - name: Prepare static files for deployment - run: | - mkdir -p ./public/.well-known/matrix - mkdir -p ./public/.well-known/continuwuity - mkdir -p ./public/schema - # Copy the Matrix .well-known files - cp ./docs/static/server ./public/.well-known/matrix/server - cp ./docs/static/client ./public/.well-known/matrix/client - cp ./docs/static/client ./public/.well-known/matrix/support - cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements - cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json - # Copy the custom headers file - cp ./docs/static/_headers ./public/_headers - echo "Copied .well-known files and _headers to ./public" - - - name: Setup Node.js - uses: https://github.com/actions/setup-node@v4 - with: - node-version: 20 - - - name: Install dependencies - run: npm install --save-dev wrangler@latest - - - name: Deploy to Cloudflare Pages (Production) - if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' - uses: https://github.com/cloudflare/wrangler-action@v3 - with: - accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} - apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" - - - name: Deploy to Cloudflare Pages (Preview) - if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' - uses: https://github.com/cloudflare/wrangler-action@v3 - with: - accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} - apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" diff --git a/.forgejo/workflows/element.yml b/.forgejo/workflows/element.yml deleted file mode 100644 index db771197..00000000 --- a/.forgejo/workflows/element.yml +++ /dev/null @@ -1,127 +0,0 @@ -name: Deploy Element Web - -on: - schedule: - - cron: "0 0 * * *" - workflow_dispatch: - -concurrency: - group: "element-${{ github.ref }}" - cancel-in-progress: true - -jobs: - build-and-deploy: - name: Build and Deploy Element Web - runs-on: ubuntu-latest - - steps: - - name: Setup Node.js - uses: https://code.forgejo.org/actions/setup-node@v4 - with: - node-version: "20" - - - name: Clone, setup, and build Element Web - run: | - echo "Cloning Element Web..." - git clone https://github.com/maunium/element-web - cd element-web - git checkout develop - git pull - - echo "Cloning matrix-js-sdk..." - git clone https://github.com/matrix-org/matrix-js-sdk.git - - echo "Installing Yarn..." - npm install -g yarn - - echo "Installing dependencies..." - yarn install - - echo "Preparing build environment..." - mkdir -p .home - - echo "Cleaning up specific node_modules paths..." - rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing." - - echo "Getting matrix-js-sdk commit hash..." - cd matrix-js-sdk - jsver=$(git rev-parse HEAD) - jsver=${jsver:0:12} - cd .. - echo "matrix-js-sdk version hash: $jsver" - - echo "Getting element-web commit hash..." - ver=$(git rev-parse HEAD) - ver=${ver:0:12} - echo "element-web version hash: $ver" - - chmod +x ./build-sh - - export VERSION="$ver-js-$jsver" - echo "Building Element Web version: $VERSION" - ./build-sh - - echo "Checking for build output..." - ls -la webapp/ - - - name: Create config.json - run: | - cat < ./element-web/webapp/config.json - { - "default_server_name": "continuwuity.org", - "default_server_config": { - "m.homeserver": { - "base_url": "https://matrix.continuwuity.org" - } - }, - "default_country_code": "GB", - "default_theme": "dark", - "mobile_guide_toast": false, - "show_labs_settings": true, - "room_directory": [ - "continuwuity.org", - "matrixrooms.info" - ], - "settings_defaults": { - "UIFeature.urlPreviews": true, - "UIFeature.feedback": false, - "UIFeature.voip": false, - "UIFeature.shareQrCode": false, - "UIFeature.shareSocial": false, - "UIFeature.locationSharing": false, - "enableSyntaxHighlightLanguageDetection": true - }, - "features": { - "feature_pinning": true, - "feature_custom_themes": true - } - } - EOF - echo "Created ./element-web/webapp/config.json" - cat ./element-web/webapp/config.json - - - name: Upload Artifact - uses: https://code.forgejo.org/actions/upload-artifact@v3 - with: - name: element-web - path: ./element-web/webapp/ - retention-days: 14 - - - name: Install Wrangler - run: npm install --save-dev wrangler@latest - - - name: Deploy to Cloudflare Pages (Production) - if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' - uses: https://github.com/cloudflare/wrangler-action@v3 - with: - accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} - apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" - - - name: Deploy to Cloudflare Pages (Preview) - if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' - uses: https://github.com/cloudflare/wrangler-action@v3 - with: - accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} - apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml deleted file mode 100644 index 141bfef9..00000000 --- a/.forgejo/workflows/release-image.yml +++ /dev/null @@ -1,235 +0,0 @@ -name: Release Docker Image -concurrency: - group: "release-image-${{ github.ref }}" - -on: - pull_request: - push: - paths-ignore: - - "*.md" - - "**/*.md" - - ".gitlab-ci.yml" - - ".gitignore" - - "renovate.json" - - "debian/**" - - "docker/**" - - "docs/**" - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -env: - BUILTIN_REGISTRY: forgejo.ellis.link - BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" - -jobs: - define-variables: - runs-on: ubuntu-latest - - outputs: - images: ${{ steps.var.outputs.images }} - images_list: ${{ steps.var.outputs.images_list }} - build_matrix: ${{ steps.var.outputs.build_matrix }} - - steps: - - name: Setting variables - uses: https://github.com/actions/github-script@v7 - id: var - with: - script: | - const githubRepo = '${{ github.repository }}'.toLowerCase() - const repoId = githubRepo.split('/')[1] - - core.setOutput('github_repository', githubRepo) - const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo - let images = [] - if (process.env.BUILTIN_REGISTRY_ENABLED === "true") { - images.push(builtinImage) - } - core.setOutput('images', images.join("\n")) - core.setOutput('images_list', images.join(",")) - const platforms = ['linux/amd64', 'linux/arm64'] - core.setOutput('build_matrix', JSON.stringify({ - platform: platforms, - include: platforms.map(platform => { return { - platform, - slug: platform.replace('/', '-') - }}) - })) - - build-image: - runs-on: dind - container: ghcr.io/catthehacker/ubuntu:act-latest - needs: define-variables - permissions: - contents: read - packages: write - attestations: write - id-token: write - strategy: - matrix: - { - "include": - [ - { "platform": "linux/amd64", "slug": "linux-amd64" }, - { "platform": "linux/arm64", "slug": "linux-arm64" }, - ], - "platform": ["linux/amd64", "linux/arm64"], - } - steps: - - name: Echo strategy - run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' - - name: Echo matrix - run: echo '${{ toJSON(matrix) }}' - - name: Checkout repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - run: | - if ! command -v rustup &> /dev/null ; then - curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y - echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH - fi - - uses: https://github.com/cargo-bins/cargo-binstall@main - - run: cargo binstall timelord-cli@3.0.1 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. - - name: Login to builtin registry - uses: docker/login-action@v3 - with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} - - # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. - - name: Extract metadata (labels, annotations) for Docker - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{needs.define-variables.outputs.images}} - # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 - env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index - - # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. - # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. - # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. - # It will not push images generated from a pull request - - name: Get short git commit SHA - id: sha - run: | - calculatedSha=$(git rev-parse --short ${{ github.sha }}) - echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - - name: Get Git commit timestamps - run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - - name: Set up timelord - uses: actions/cache/restore@v3 - with: - path: /timelord/ - key: timelord-v0 # Cache is already split per runner - - name: Run timelord to set timestamps - run: timelord sync --source-dir . --cache-dir /timelord/ - - name: Save timelord - uses: actions/cache/save@v3 - with: - path: /timelord/ - key: timelord-v0 - - name: Build and push Docker image by digest - id: build - uses: docker/build-push-action@v6 - with: - context: . - file: "docker/Dockerfile" - build-args: | - CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }} - platforms: ${{ matrix.platform }} - labels: ${{ steps.meta.outputs.labels }} - annotations: ${{ steps.meta.outputs.annotations }} - cache-from: type=gha - cache-to: type=gha,mode=max - sbom: true - outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true - env: - SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} - - # For publishing multi-platform manifests - - name: Export digest - run: | - mkdir -p /tmp/digests - digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" - - - name: Upload digest - uses: forgejo/upload-artifact@v4 - with: - name: digests-${{ matrix.slug }} - path: /tmp/digests/* - if-no-files-found: error - retention-days: 1 - - merge: - runs-on: dind - container: ghcr.io/catthehacker/ubuntu:act-latest - needs: [define-variables, build-image] - steps: - - name: Download digests - uses: forgejo/download-artifact@v4 - with: - path: /tmp/digests - pattern: digests-* - merge-multiple: true - # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. - - name: Login to builtin registry - uses: docker/login-action@v3 - with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Extract metadata (tags) for Docker - id: meta - uses: docker/metadata-action@v5 - with: - tags: | - type=semver,pattern=v{{version}} - type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} - type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) 1= github.ref && 'branch-' || '' }} - type=ref,event=pr - type=sha,format=long - images: ${{needs.define-variables.outputs.images}} - # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 - env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: index - - - name: Create manifest list and push - working-directory: /tmp/digests - env: - IMAGES: ${{needs.define-variables.outputs.images}} - shell: bash - run: | - IFS=$'\n' - IMAGES_LIST=($IMAGES) - ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS) - TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS) - for REPO in "${IMAGES_LIST[@]}"; do - docker buildx imagetools create \ - $(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \ - $(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \ - $(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done) - done - - - name: Inspect image - env: - IMAGES: ${{needs.define-variables.outputs.images}} - shell: bash - run: | - IMAGES_LIST=($IMAGES) - for REPO in "${IMAGES_LIST[@]}"; do - docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }} - done diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index 3dfaca65..00000000 --- a/.gitattributes +++ /dev/null @@ -1,87 +0,0 @@ -# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Rust.gitattributes -# Auto detect text files and perform normalization -* text=auto - -*.rs text diff=rust -*.toml text diff=toml -Cargo.lock text - -# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Common.gitattributes -# Documents -*.bibtex text diff=bibtex -*.doc diff=astextplain -*.DOC diff=astextplain -*.docx diff=astextplain -*.DOCX diff=astextplain -*.dot diff=astextplain -*.DOT diff=astextplain -*.pdf diff=astextplain -*.PDF diff=astextplain -*.rtf diff=astextplain -*.RTF diff=astextplain -*.md text diff=markdown -*.mdx text diff=markdown -*.tex text diff=tex -*.adoc text -*.textile text -*.mustache text -*.csv text eol=crlf -*.tab text -*.tsv text -*.txt text -*.sql text -*.epub diff=astextplain - -# Graphics -*.png binary -*.jpg binary -*.jpeg binary -*.gif binary -*.tif binary -*.tiff binary -*.ico binary -# SVG treated as text by default. -*.svg text -*.eps binary - -# Scripts -*.bash text eol=lf -*.fish text eol=lf -*.ksh text eol=lf -*.sh text eol=lf -*.zsh text eol=lf -# These are explicitly windows files and should use crlf -*.bat text eol=crlf -*.cmd text eol=crlf -*.ps1 text eol=crlf - -# Serialisation -*.json text -*.toml text -*.xml text -*.yaml text -*.yml text - -# Archives -*.7z binary -*.bz binary -*.bz2 binary -*.bzip2 binary -*.gz binary -*.lz binary -*.lzma binary -*.rar binary -*.tar binary -*.taz binary -*.tbz binary -*.tbz2 binary -*.tgz binary -*.tlz binary -*.txz binary -*.xz binary -*.Z binary -*.zip binary -*.zst binary - -# Text files where line endings should be preserved -*.patch -text \ No newline at end of file diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..4210554b --- /dev/null +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,8 @@ + + + +----------------------------------------------------------------------------- + +- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test` +- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..8b0d4e32 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,266 @@ +name: CI and Artifacts + +on: + pull_request: + push: + branches: + - main + - dev + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + # Required to make some things output color + TERM: ansi + # Publishing to my nix binary cache + ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} + # Just in case incremental is still being set to true, speeds up CI + CARGO_INCREMENTAL: 0 + # Custom nix binary cache if fork is being used + ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} + ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} + +permissions: + packages: write + contents: read + +jobs: + setup: + name: CI Setup + runs-on: ubuntu-latest + steps: + - name: Sync repository + uses: actions/checkout@v4 + + - name: Install Nix (with flakes and nix-command enabled) + uses: cachix/install-nix-action@v26 + with: + nix_path: nixpkgs=channel:nixos-unstable + + # Add `nix-community`, Crane, upstream Conduit, and conduwuit binary caches + extra_nix_config: | + experimental-features = nix-command flakes + extra-substituters = https://nix-community.cachix.org + extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= + extra-substituters = https://crane.cachix.org + extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= + extra-substituters = https://nix.computer.surgery/conduit + extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo= + extra-substituters = https://attic.kennel.juneis.dog/conduit + extra-trusted-public-keys = conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit + extra-trusted-public-keys = conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= + + - name: Add alternative Nix binary caches if specified + if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }} + run: | + echo "extra-substituters = ${{ env.ATTIC_ENDPOINT }}" >> /etc/nix/nix.conf + echo "extra-trusted-public-keys = ${{ env.ATTIC_PUBLIC_KEY }}" >> /etc/nix/nix.conf + + - name: Pop/push Magic Nix Cache + uses: DeterminateSystems/magic-nix-cache-action@main + + - name: Configure `nix-direnv` + run: | + echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" + + - name: Install `direnv` and `nix-direnv` + run: nix-env -f "" -iA direnv -iA nix-direnv + + - name: Pop/push downloaded crate cache + uses: actions/cache@v4 + with: + key: downloaded-crates + path: ~/.cargo + + - name: Pop/push compiled crate cache + uses: actions/cache@v4 + with: + key: compiled-crates-${{runner.os}} + path: target + + # Do this to shorten the logs for the real CI step + - name: Populate `/nix/store` + run: nix develop --command true + + - name: Allow direnv + run: direnv allow + + - name: Cache x86_64 inputs for devShell + run: | + ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation + + + build-and-test: + name: CI and Artifacts + needs: setup + runs-on: ubuntu-latest + strategy: + matrix: + target: [ + "static-x86_64-unknown-linux-musl", + "static-x86_64-unknown-linux-musl-jemalloc", + "static-x86_64-unknown-linux-musl-hmalloc", + "static-aarch64-unknown-linux-musl", + "static-aarch64-unknown-linux-musl-jemalloc", + "static-aarch64-unknown-linux-musl-hmalloc", + ] + oci-target: [ + "x86_64-unknown-linux-gnu", + "x86_64-unknown-linux-musl", + "x86_64-unknown-linux-musl-jemalloc", + "x86_64-unknown-linux-musl-hmalloc", + "aarch64-unknown-linux-musl", + "aarch64-unknown-linux-musl-jemalloc", + "aarch64-unknown-linux-musl-hmalloc", + ] + + steps: + - name: Perform continuous integration + run: direnv exec . engage + + + - name: Build static artifacts + run: | + ./bin/nix-build-and-cache .#${{ matrix.target }} + mkdir -p target/release + cp -v -f result/bin/conduit target/release + direnv exec . cargo deb --no-build --output target/debian/${{ matrix.target }}.deb + + - name: Upload static artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.target }} + path: result/bin/conduit + if-no-files-found: error + + - name: Upload static deb artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.target }}.deb + path: target/debian/${{ matrix.target }}.deb + if-no-files-found: error + + + - name: Build OCI images + run: | + ./bin/nix-build-and-cache .#oci-image-${{ matrix.oci-target }} + cp -v -f result oci-image-${{ matrix.oci-target }}.tar.gz + + - name: Upload OCI image artifacts + uses: actions/upload-artifact@v4 + with: + name: oci-image-${{ matrix.oci-target }} + path: oci-image-${{ matrix.oci-target }}.tar.gz + if-no-files-found: error + # don't compress again + compression-level: 0 + + + + + publish: + needs: build-and-test + runs-on: ubuntu-latest + steps: + - name: Extract metadata for Dockerhub + env: + REGISTRY: registry.hub.docker.com + IMAGE_NAME: ${{ github.repository }} + id: meta-dockerhub + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Extract metadata for GitHub Container Registry + env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + id: meta-ghcr + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + + - name: Login to Dockerhub + env: + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + DOCKER_USERNAME: ${{ vars.DOCKER_USERNAME }} + if: ${{ (github.event_name != 'pull_request') && (env.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} + uses: docker/login-action@v3 + with: + # username is not really a secret + username: ${{ vars.DOCKER_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + env: + REGISTRY: ghcr.io + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + + - name: Publish to Dockerhub + env: + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + DOCKER_USERNAME: ${{ vars.DOCKER_USERNAME }} + IMAGE_NAME: docker.io/${{ github.repository }} + IMAGE_SUFFIX_AMD64: amd64 + IMAGE_SUFFIX_ARM64V8: arm64v8 + if: ${{ (github.event_name != 'pull_request') && (env.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} + run: | + docker load -i oci-image-amd64.tar.gz + IMAGE_ID_AMD64=$(docker images -q conduit:main) + docker load -i oci-image-arm64v8.tar.gz + IMAGE_ID_ARM64V8=$(docker images -q conduit:main) + + # Tag and push the architecture specific images + docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 + docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker push $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 + docker push $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + # Tag the multi-arch image + docker manifest create $IMAGE_NAME:$GITHUB_SHA --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:$GITHUB_SHA + # Tag and push the git ref + docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME + # Tag "main" as latest (stable branch) + if [[ "$GITHUB_REF_NAME" = "main" ]]; then + docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:latest + fi + + - name: Publish to GitHub Container Registry + if: github.event_name != 'pull_request' + env: + IMAGE_NAME: ghcr.io/${{ github.repository }} + IMAGE_SUFFIX_AMD64: amd64 + IMAGE_SUFFIX_ARM64V8: arm64v8 + run: | + docker load -i oci-image-amd64.tar.gz + IMAGE_ID_AMD64=$(docker images -q conduit:main) + docker load -i oci-image-arm64v8.tar.gz + IMAGE_ID_ARM64V8=$(docker images -q conduit:main) + + # Tag and push the architecture specific images + docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 + docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker push $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 + docker push $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + # Tag the multi-arch image + docker manifest create $IMAGE_NAME:$GITHUB_SHA --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:$GITHUB_SHA + # Tag and push the git ref + docker manifest create $IMAGE_NAME:$GITHUB_REF_NAME --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:$GITHUB_REF_NAME + # Tag "main" as latest (stable branch) + if [[ "$GITHUB_REF_NAME" = "main" ]]; then + docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$GITHUB_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:latest + fi diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 00000000..cdeeef42 --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,117 @@ +name: Documentation and GitHub Pages + +on: + pull_request: + push: + branches: + - main + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + # Required to make some things output color + TERM: ansi + # Publishing to my nix binary cache + ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} + # Custom nix binary cache if fork is being used + ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} + ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + docs: + name: Documentation and GitHub Pages + + runs-on: ubuntu-latest + + permissions: + pages: write + id-token: write + + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + steps: + - name: Sync repository + uses: actions/checkout@v4 + + - name: Setup GitHub Pages + if: github.event_name != 'pull_request' + uses: actions/configure-pages@v5 + + - name: Install Nix (with flakes and nix-command enabled) + uses: cachix/install-nix-action@v26 + with: + nix_path: nixpkgs=channel:nixos-unstable + + # Add `nix-community`, Crane, upstream Conduit, and conduwuit binary caches + extra_nix_config: | + experimental-features = nix-command flakes + extra-substituters = https://nix-community.cachix.org + extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= + extra-substituters = https://crane.cachix.org + extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= + extra-substituters = https://nix.computer.surgery/conduit + extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo= + extra-substituters = https://attic.kennel.juneis.dog/conduit + extra-trusted-public-keys = conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit + extra-trusted-public-keys = conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= + + - name: Add alternative Nix binary caches if specified + if: ${{ (env.ATTIC_ENDPOINT != '') && (env.ATTIC_PUBLIC_KEY != '') }} + run: | + echo "extra-substituters = ${{ env.ATTIC_ENDPOINT }}" >> /etc/nix/nix.conf + echo "extra-trusted-public-keys = ${{ env.ATTIC_PUBLIC_KEY }}" >> /etc/nix/nix.conf + + - name: Pop/push Magic Nix Cache + uses: DeterminateSystems/magic-nix-cache-action@main + + - name: Configure `nix-direnv` + run: | + echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" + + - name: Install `direnv` and `nix-direnv` + run: nix-env -f "" -iA direnv -iA nix-direnv + + # Do this to shorten the logs for the real CI step + - name: Populate `/nix/store` + run: nix develop --command true + + - name: Allow direnv + run: direnv allow + + - name: Cache x86_64 inputs for devShell + run: | + ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation + + - name: Build documentation (book) + run: | + ./bin/nix-build-and-cache .#book + cp -r --dereference result public + - name: Upload generated documentation (book) as normal artifact + uses: actions/upload-artifact@v4 + with: + name: public + path: public + if-no-files-found: error + # don't compress again + compression-level: 0 + + - name: Upload generated documentation (book) as GitHub Pages artifact + if: github.event_name != 'pull_request' + uses: actions/upload-pages-artifact@v3 + with: + path: public + + - name: Deploy to GitHub Pages + if: github.event_name != 'pull_request' + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml new file mode 100644 index 00000000..5b4b7248 --- /dev/null +++ b/.github/workflows/trivy.yml @@ -0,0 +1,40 @@ +name: Trivy code and vulnerability scanning + +on: + pull_request: + push: + branches: + - main + schedule: + - cron: '00 12 * * *' + +permissions: + contents: read + +jobs: + trivy-scan: + name: Trivy Scan + runs-on: ubuntu-latest + permissions: + contents: read + security-events: write + actions: read + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Trivy code and vulnerability scanner on repo + uses: aquasecurity/trivy-action@0.19.0 + with: + scan-type: repo + format: sarif + output: trivy-results.sarif + severity: CRITICAL,HIGH,MEDIUM,LOW + + - name: Run Trivy code and vulnerability scanner on filesystem + uses: aquasecurity/trivy-action@0.19.0 + with: + scan-type: fs + format: sarif + output: trivy-results.sarif + severity: CRITICAL,HIGH,MEDIUM,LOW \ No newline at end of file diff --git a/.gitignore b/.gitignore index b5fea66b..4332a81a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,3 @@ -# Local environment overrides -/.env - # CMake cmake-build-*/ @@ -30,7 +27,7 @@ modules.xml .nfs* # Rust -/target +/target/ ### vscode ### .vscode/* @@ -84,14 +81,5 @@ public/ # macOS .DS_Store -# VS Code -.vscode/ - # Zed .zed/ - -# idk where you're coming from, but i'm tired of you -rustc-ice-* - -# complement test logs are huge -tests/test_results/complement/test_logs.jsonl diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 00000000..4e4a9490 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,183 @@ +stages: + - ci + - artifacts + - publish + +variables: + # Makes some things print in color + TERM: ansi + +# Avoid duplicate pipelines +# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines +workflow: + rules: + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS + when: never + - if: $CI + +before_script: + # Enable nix-command and flakes + - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi + + # Add conduwuit binary cache + - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduwuit" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw=" >> /etc/nix/nix.conf; fi + + - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduit" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg=" >> /etc/nix/nix.conf; fi + + # Add upstream Conduit binary cache + - if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi + + # Add alternate binary cache + - if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi + + # Add crane binary cache + - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi + + # Add nix-community binary cache + - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi + + # Install direnv and nix-direnv + - if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi + + # Allow .envrc + - if command -v nix > /dev/null; then direnv allow; fi + + # Set CARGO_HOME to a cacheable path + - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" + +ci: + stage: ci + image: nixos/nix:2.21.2 + script: + # Cache the inputs required for the devShell + - ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation + + - direnv exec . engage + cache: + key: nix + paths: + - target + - .gitlab-ci.d + rules: + # CI on upstream runners (only available for maintainers) + - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" + # Manual CI on unprotected branches that are not MRs + - if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false" + when: manual + # Manual CI on forks + - if: $IS_UPSTREAM_CI != "true" + when: manual + - if: $CI + interruptible: true + +artifacts: + stage: artifacts + image: nixos/nix:2.21.2 + script: + - ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl + - cp result/bin/conduit x86_64-unknown-linux-musl + + - mkdir -p target/release + - cp result/bin/conduit target/release + - direnv exec . cargo deb --no-build + - mv target/debian/*.deb x86_64-unknown-linux-musl.deb + + # Since the OCI image package is based on the binary package, this has the + # fun side effect of uploading the normal binary too. Conduit users who are + # deploying with Nix can leverage this fact by adding our binary cache to + # their systems. + # + # Note that although we have an `oci-image-x86_64-unknown-linux-musl` + # output, we don't build it because it would be largely redundant to this + # one since it's all containerized anyway. + - ./bin/nix-build-and-cache .#oci-image + - cp result oci-image-amd64.tar.gz + + - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl + - cp result/bin/conduit aarch64-unknown-linux-musl + + - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl + - cp result oci-image-arm64v8.tar.gz + + - ./bin/nix-build-and-cache .#book + # We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746 + - cp -r --dereference result public + artifacts: + paths: + - x86_64-unknown-linux-musl + - aarch64-unknown-linux-musl + - x86_64-unknown-linux-musl.deb + - oci-image-amd64.tar.gz + - oci-image-arm64v8.tar.gz + - public + rules: + # CI required for all MRs + - if: $CI_PIPELINE_SOURCE == "merge_request_event" + # Optional CI on forks + - if: $IS_UPSTREAM_CI != "true" + when: manual + allow_failure: true + - if: $CI + interruptible: true + +.push-oci-image: + stage: publish + image: docker:26.0.1 + services: + - docker:26.0.1-dind + variables: + IMAGE_SUFFIX_AMD64: amd64 + IMAGE_SUFFIX_ARM64V8: arm64v8 + script: + - docker load -i oci-image-amd64.tar.gz + - IMAGE_ID_AMD64=$(docker images -q conduit:main) + - docker load -i oci-image-arm64v8.tar.gz + - IMAGE_ID_ARM64V8=$(docker images -q conduit:main) + # Tag and push the architecture specific images + - docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 + - docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + - docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 + - docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + # Tag the multi-arch image + - docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + - docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA + # Tag and push the git ref + - docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + - docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME + # Tag git tags as 'latest' + - | + if [[ -n "$CI_COMMIT_TAG" ]]; then + docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 + docker manifest push $IMAGE_NAME:latest + fi + dependencies: + - artifacts + only: + - main + - tags + +oci-image:push-gitlab: + extends: .push-oci-image + variables: + IMAGE_NAME: $CI_REGISTRY_IMAGE/conduwuit + before_script: + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + +pages: + stage: publish + dependencies: + - artifacts + only: + - next + script: + - "true" + artifacts: + paths: + - public diff --git a/.gitlab/merge_request_templates/MR.md b/.gitlab/merge_request_templates/MR.md new file mode 100644 index 00000000..4210554b --- /dev/null +++ b/.gitlab/merge_request_templates/MR.md @@ -0,0 +1,8 @@ + + + +----------------------------------------------------------------------------- + +- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test` +- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license + diff --git a/.gitlab/route-map.yml b/.gitlab/route-map.yml new file mode 100644 index 00000000..cf31bd18 --- /dev/null +++ b/.gitlab/route-map.yml @@ -0,0 +1,3 @@ +# Docs: Map markdown to html files +- source: /docs/(.+)\.md/ + public: '\1.html' diff --git a/.mailmap b/.mailmap deleted file mode 100644 index fa267e13..00000000 --- a/.mailmap +++ /dev/null @@ -1,15 +0,0 @@ -AlexPewMaster <68469103+AlexPewMaster@users.noreply.github.com> -Daniel Wiesenberg -Devin Ragotzy -Devin Ragotzy -Jonas Platte -Jonas Zohren -Jonathan de Jong -June Clementine Strawberry -June Clementine Strawberry -June Clementine Strawberry -Olivia Lee -Rudi Floren -Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> -Timo Kösters -x4u <14617923-x4u@users.noreply.gitlab.com> diff --git a/.markdownlintignore b/.markdownlintignore deleted file mode 120000 index 3e4e48b0..00000000 --- a/.markdownlintignore +++ /dev/null @@ -1 +0,0 @@ -.gitignore \ No newline at end of file diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 00000000..24f8af80 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,11 @@ +{ + "recommendations": [ + "rust-lang.rust-analyzer", + "editorconfig.editorconfig", + "ms-azuretools.vscode-docker", + "eamodio.gitlens", + "serayuzgur.crates", + "vadimcn.vscode-lldb", + "timonwong.shellcheck" + ] +} diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..fec4ee08 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,35 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug conduit", + "sourceLanguages": ["rust"], + "cargo": { + "args": [ + "build", + "--bin=conduit", + "--package=conduit" + ], + "filter": { + "name": "conduit", + "kind": "bin" + } + }, + "args": [], + "env": { + "RUST_BACKTRACE": "1", + "CONDUIT_DATABASE_PATH": "/tmp/awawawa", + "CONDUIT_ADDRESS": "0.0.0.0", + "CONDUIT_PORT": "55551", + "CONDUIT_SERVER_NAME": "your.server.name", + "CONDUIT_LOG": "debug" + }, + "cwd": "${workspaceFolder}" + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index a4fad964..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "cSpell.words": [ - "Forgejo", - "appservice", - "appservices", - "conduwuit", - "continuwuity", - "homeserver", - "homeservers" - ] -} diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 476e68fb..a8682537 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,4 @@ + # Contributor Covenant Code of Conduct ## Our Pledge @@ -59,7 +60,8 @@ representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at , and respectively. +reported to the community leaders responsible for enforcement over email at +strawberry@puppygock.gay or over Matrix at @strawberry:puppygock.gay. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the @@ -129,3 +131,4 @@ For answers to common questions about this code of conduct, see the FAQ at [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index ecff7173..00000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,144 +0,0 @@ -# Contributing guide - -This page is for about contributing to conduwuit. The -[development](./development.md) page may be of interest for you as well. - -If you would like to work on an [issue][issues] that is not assigned, preferably -ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix], -and comment on it. - -### Linting and Formatting - -It is mandatory all your changes satisfy the lints (clippy, rustc, rustdoc, etc) -and your code is formatted via the **nightly** `cargo fmt`. A lot of the -`rustfmt.toml` features depend on nightly toolchain. It would be ideal if they -weren't nightly-exclusive features, but they currently still are. CI's rustfmt -uses nightly. - -If you need to allow a lint, please make sure it's either obvious as to why -(e.g. clippy saying redundant clone but it's actually required) or it has a -comment saying why. Do not write inefficient code for the sake of satisfying -lints. If a lint is wrong and provides a more inefficient solution or -suggestion, allow the lint and mention that in a comment. - -### Running CI tests locally - -continuwuity's CI for tests, linting, formatting, audit, etc use -[`engage`][engage]. engage can be installed from nixpkgs or `cargo install -engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`. -Use `engage --help` for more usage details. - -To test, format, lint, etc that CI would do, install engage, allow the `.envrc` -file using `direnv allow`, and run `engage`. - -All of the tasks are defined at the [engage.toml][engage.toml] file. You can -view all of them neatly by running `engage list` - -If you would like to run only a specific engage task group, use `just`: - -- `engage just ` -- Example: `engage just lints` - -If you would like to run a specific engage task in a specific group, use `just - [TASK]`: `engage just lints cargo-fmt` - -The following binaries are used in [`engage.toml`][engage.toml]: - -- [`engage`][engage] -- `nix` -- [`direnv`][direnv] -- `rustc` -- `cargo` -- `cargo-fmt` -- `rustdoc` -- `cargo-clippy` -- [`cargo-audit`][cargo-audit] -- [`cargo-deb`][cargo-deb] -- [`lychee`][lychee] -- [`markdownlint-cli`][markdownlint-cli] -- `dpkg` - -### Matrix tests - -CI runs [Complement][complement], but currently does not fail if results from -the checked-in results differ with the new results. If your changes are done to -fix Matrix tests, note that in your pull request. If more Complement tests start -failing from your changes, please review the logs (they are uploaded as -artifacts) and determine if they're intended or not. - -If you'd like to run Complement locally using Nix, see the -[testing](development/testing.md) page. - -[Sytest][sytest] support will come soon. - -### Writing documentation - -conduwuit's website uses [`mdbook`][mdbook] and deployed via CI using GitHub -Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's -mdbook in the devshell. All documentation is in the `docs/` directory at the top -level. The compiled mdbook website is also uploaded as an artifact. - -To build the documentation using Nix, run: `bin/nix-build-and-cache just .#book` - -The output of the mdbook generation is in `result/`. mdbooks can be opened in -your browser from the individual HTML files without any web server needed. - -### Inclusivity and Diversity - -All **MUST** code and write with inclusivity and diversity in mind. See the -[following page by Google on writing inclusive code and -documentation](https://developers.google.com/style/inclusive-documentation). - -This **EXPLICITLY** forbids usage of terms like "blacklist"/"whitelist" and -"master"/"slave", [forbids gender-specific words and -phrases](https://developers.google.com/style/pronouns#gender-neutral-pronouns), -forbids ableist language like "sanity-check", "cripple", or "insane", and -forbids culture-specific language (e.g. US-only holidays or cultures). - -No exceptions are allowed. Dependencies that may use these terms are allowed but -[do not replicate the name in your functions or -variables](https://developers.google.com/style/inclusive-documentation#write-around). - -In addition to language, write and code with the user experience in mind. This -is software that intends to be used by everyone, so make it easy and comfortable -for everyone to use. 🏳️‍⚧️ - -### Variable, comment, function, etc standards - -Rust's default style and standards with regards to [function names, variable -names, comments](https://rust-lang.github.io/api-guidelines/naming.html), etc -applies here. - -### Creating pull requests - -Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity -allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely -manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts. -This prevents us from having to ping once in a while to double check the status -of it, especially when the CI completed successfully and everything so it -*looks* done. - - -Direct all PRs/MRs to the `main` branch. - -By sending a pull request or patch, you are agreeing that your changes are -allowed to be licenced under the Apache-2.0 licence and all of your conduct is -in line with the Contributor's Covenant, and continuwuity's Code of Conduct. - -Contribution by users who violate either of these code of conducts will not have -their contributions accepted. This includes users who have been banned from -continuwuityMatrix rooms for Code of Conduct violations. - -[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues -[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org -[complement]: https://github.com/matrix-org/complement/ -[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml -[engage]: https://charles.page.computer.surgery/engage/ -[sytest]: https://github.com/matrix-org/sytest/ -[cargo-deb]: https://github.com/kornelski/cargo-deb -[lychee]: https://github.com/lycheeverse/lychee -[markdownlint-cli]: https://github.com/igorshubovych/markdownlint-cli -[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit -[direnv]: https://direnv.net/ -[mdbook]: https://rust-lang.github.io/mdBook/ -[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml diff --git a/Cargo.lock b/Cargo.lock index 2d8a2d0f..bdf9d374 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,21 +1,33 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] -name = "adler2" -version = "2.0.0" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] [[package]] name = "aho-corasick" @@ -26,12 +38,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "aligned-vec" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" - [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -47,23 +53,17 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "allocator-api2" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" - -[[package]] -name = "anyhow" -version = "1.0.97" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" - -[[package]] -name = "arbitrary" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "arc-swap" @@ -71,17 +71,6 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" -[[package]] -name = "arg_enum_proc_macro" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "argon2" version = "0.5.3" @@ -94,20 +83,11 @@ dependencies = [ "password-hash", ] -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" -dependencies = [ - "serde", -] - [[package]] name = "as_variant" -version = "1.3.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" +checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" [[package]] name = "assign" @@ -115,22 +95,11 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" -[[package]] -name = "async-channel" -version = "2.3.1" -source = "git+https://forgejo.ellis.link/continuwuation/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" -dependencies = [ - "concurrent-queue", - "event-listener-strategy", - "futures-core", - "pin-project-lite", -] - [[package]] name = "async-compression" -version = "0.4.22" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" +checksum = "07dbbf24db18d609b1462965249abdf49129ccad073ec257da372adc83259c60" dependencies = [ "brotli", "flate2", @@ -144,46 +113,24 @@ dependencies = [ [[package]] name = "async-recursion" -version = "1.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +checksum = "30c5ef0ede93efbf733c1a727f3b6b5a1060bbedd5600183e66f6e4be4af0ec5" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "async-stream" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "syn 2.0.58", ] [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] @@ -195,79 +142,27 @@ dependencies = [ "bytemuck", ] -[[package]] -name = "atomic-waker" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" - [[package]] name = "autocfg" -version = "1.4.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "av1-grain" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6678909d8c5d46a42abcf571271e15fdbc0a225e3646cf23762cd415046c78bf" -dependencies = [ - "anyhow", - "arrayvec", - "log", - "nom", - "num-rational", - "v_frame", -] - -[[package]] -name = "avif-serialize" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98922d6a4cfbcb08820c69d8eeccc05bb1f29bfa06b4f5b1dbfe9a868bd7608e" -dependencies = [ - "arrayvec", -] - -[[package]] -name = "aws-lc-rs" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" -dependencies = [ - "aws-lc-sys", - "zeroize", -] - -[[package]] -name = "aws-lc-sys" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f7720b74ed28ca77f90769a71fd8c637a0137f6fae4ae947e1050229cff57f" -dependencies = [ - "bindgen 0.69.5", - "cc", - "cmake", - "dunce", - "fs_extra", -] +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "axum" -version = "0.7.9" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", + "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "http-body-util", - "hyper", - "hyper-util", + "headers", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.28", "itoa", "matchit", "memchr", @@ -280,121 +175,78 @@ dependencies = [ "serde_path_to_error", "serde_urlencoded", "sync_wrapper", - "tokio", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", - "tracing", -] - -[[package]] -name = "axum-client-ip" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eefda7e2b27e1bda4d6fa8a06b50803b8793769045918bc37ad062d48a6efac" -dependencies = [ - "axum", - "forwarded-header-value", - "serde", ] [[package]] name = "axum-core" -version = "0.4.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", - "http-body-util", + "http 0.2.12", + "http-body 0.4.6", "mime", - "pin-project-lite", "rustversion", - "sync_wrapper", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "axum-extra" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c794b30c904f0a1c2fb7740f7df7f7972dfaa14ef6f57cb6178dc63e5dca2f04" -dependencies = [ - "axum", - "axum-core", - "bytes", - "futures-util", - "headers", - "http", - "http-body", - "http-body-util", - "mime", - "pin-project-lite", - "serde", - "tower 0.5.2", "tower-layer", "tower-service", ] [[package]] name = "axum-server" -version = "0.7.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" +checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" dependencies = [ "arc-swap", "bytes", - "fs-err", - "http", - "http-body", - "hyper", - "hyper-util", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.28", "pin-project-lite", - "rustls", - "rustls-pemfile", - "rustls-pki-types", + "rustls 0.21.10", + "rustls-pemfile 1.0.4", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tower-service", ] [[package]] name = "axum-server-dual-protocol" -version = "0.7.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2164551db024e87f20316d164eab9f5ad342d8188b08051ceb15ca92a60ea7b7" +checksum = "3d1a8f5076b5dbfeb706bcce30fe73caf20971e6e5ca80b83a7f1d990e73e185" dependencies = [ "axum-server", "bytes", - "http", - "http-body-util", + "http 0.2.12", + "hyper 0.14.28", "pin-project", - "rustls", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", "tokio-util", "tower-layer", - "tower-service", ] [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", + "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", ] [[package]] @@ -405,63 +257,36 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" [[package]] name = "base64ct" -version = "1.7.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bindgen" -version = "0.69.5" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.5.0", "cexpr", "clang-sys", "itertools 0.12.1", "lazy_static", "lazycell", - "log", - "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash 1.1.0", + "rustc-hash", "shlex", - "syn", - "which", + "syn 2.0.58", ] -[[package]] -name = "bindgen" -version = "0.71.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" -dependencies = [ - "bitflags 2.9.0", - "cexpr", - "clang-sys", - "itertools 0.13.0", - "proc-macro2", - "quote", - "regex", - "rustc-hash 2.1.1", - "shlex", - "syn", -] - -[[package]] -name = "bit_field" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" - [[package]] name = "bitflags" version = "1.3.2" @@ -470,15 +295,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" - -[[package]] -name = "bitstream-io" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "blake2" @@ -498,20 +317,11 @@ dependencies = [ "generic-array", ] -[[package]] -name = "blurhash" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79769241dcd44edf79a732545e8b5cec84c247ac060f5252cd51885d093a8fc" -dependencies = [ - "image", -] - [[package]] name = "brotli" -version = "7.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" +checksum = "125740193d7fee5cc63ab9e16c2fdc4e07c74ba755cc53b327d6ea029e9fc569" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -520,31 +330,25 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" +checksum = "65622a320492e09b5e0ac436b14c54ff68199bac392d0e89a6832c4518eea525" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] -[[package]] -name = "built" -version = "0.7.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" - [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.22.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" [[package]] name = "byteorder" @@ -552,53 +356,31 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" -[[package]] -name = "byteorder-lite" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" - [[package]] name = "bytes" -version = "1.10.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" - -[[package]] -name = "bytesize" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c8f83209414aacf0eeae3cf730b18d6981697fba62f200fcfb92b9f082acba" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "bzip2-sys" -version = "0.1.13+1.0.8" +version = "0.1.11+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" dependencies = [ "cc", + "libc", "pkg-config", ] -[[package]] -name = "cargo_toml" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbd1fe9db3ebf71b89060adaf7b0504c2d6a425cf061313099547e382c2e472" -dependencies = [ - "serde", - "toml", -] - [[package]] name = "cc" -version = "1.2.17" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" +checksum = "2678b2e3449475e95b0aa6f9b506a28e61b3dc8996592b983695e8ebb58a8b41" dependencies = [ "jobserver", "libc", - "shlex", ] [[package]] @@ -610,16 +392,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-expr" -version = "0.15.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" -dependencies = [ - "smallvec", - "target-lexicon", -] - [[package]] name = "cfg-if" version = "1.0.0" @@ -628,33 +400,24 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" -version = "0.2.1" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" - -[[package]] -name = "checked_ops" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b491d76efc1d99d74de3c8529bee64c62312c275c7eb124f9185291de45801d5" -dependencies = [ - "num-traits", -] +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "num-traits", ] [[package]] name = "clang-sys" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -663,9 +426,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.35" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -673,9 +436,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.35" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstyle", "clap_lex", @@ -683,30 +446,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" - -[[package]] -name = "cmake" -version = "0.1.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" -dependencies = [ - "cc", -] +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "color_quant" @@ -715,323 +469,91 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "conduwuit" -version = "0.5.0-rc.5" -dependencies = [ - "clap", - "conduwuit_admin", - "conduwuit_api", - "conduwuit_core", - "conduwuit_database", - "conduwuit_router", - "conduwuit_service", - "console-subscriber", - "const-str", - "hardened_malloc-rs", - "log", - "opentelemetry", - "opentelemetry-jaeger", - "opentelemetry_sdk", - "sentry", - "sentry-tower", - "sentry-tracing", - "tokio", - "tokio-metrics", - "tracing", - "tracing-flame", - "tracing-opentelemetry", - "tracing-subscriber", -] - -[[package]] -name = "conduwuit_admin" -version = "0.5.0-rc.5" -dependencies = [ - "clap", - "conduwuit_api", - "conduwuit_core", - "conduwuit_database", - "conduwuit_macros", - "conduwuit_service", - "const-str", - "futures", - "log", - "ruma", - "serde_json", - "serde_yaml", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "conduwuit_api" -version = "0.5.0-rc.5" -dependencies = [ - "async-trait", - "axum", - "axum-client-ip", - "axum-extra", - "base64 0.22.1", - "bytes", - "conduwuit_core", - "conduwuit_service", - "const-str", - "futures", - "hmac", - "http", - "http-body-util", - "hyper", - "ipaddress", - "itertools 0.14.0", - "log", - "rand 0.8.5", - "reqwest", - "ruma", - "serde", - "serde_html_form", - "serde_json", - "sha1", - "tokio", - "tracing", -] - -[[package]] -name = "conduwuit_core" -version = "0.5.0-rc.5" +name = "conduit" +version = "0.7.0+conduwuit-0.2.0" dependencies = [ "argon2", - "arrayvec", + "async-trait", "axum", - "axum-extra", + "axum-server", + "axum-server-dual-protocol", + "base64 0.22.0", "bytes", - "bytesize", - "cargo_toml", - "checked_ops", "chrono", "clap", - "conduwuit_macros", - "const-str", - "core_affinity", - "ctor", "cyborgtime", "either", "figment", - "futures", + "futures-util", "hardened_malloc-rs", - "http", - "http-body-util", + "hickory-resolver", + "hmac", + "http 0.2.12", + "hyper 0.14.28", + "hyperlocal", + "image", "ipaddress", - "itertools 0.14.0", - "libc", - "libloading", + "itertools 0.12.1", + "jsonwebtoken", "log", - "maplit", + "loole", + "lru-cache", "nix", - "num-traits", - "rand 0.8.5", + "num_cpus", + "opentelemetry", + "opentelemetry-jaeger", + "opentelemetry_sdk", + "parking_lot", + "rand", "regex", - "reqwest", + "reqwest 0.11.27", "ring", "ruma", - "sanitize-filename", - "serde", - "serde_json", - "serde_regex", - "serde_yaml", - "smallstr", - "smallvec", - "thiserror 2.0.12", - "tikv-jemalloc-ctl", - "tikv-jemalloc-sys", - "tikv-jemallocator", - "tokio", - "tokio-metrics", - "toml", - "tracing", - "tracing-core", - "tracing-subscriber", - "url", -] - -[[package]] -name = "conduwuit_database" -version = "0.5.0-rc.5" -dependencies = [ - "async-channel", - "conduwuit_core", - "const-str", - "futures", - "log", - "minicbor", - "minicbor-serde", + "rusqlite", "rust-rocksdb", - "serde", - "serde_json", - "tokio", - "tracing", -] - -[[package]] -name = "conduwuit_macros" -version = "0.5.0-rc.5" -dependencies = [ - "itertools 0.14.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "conduwuit_router" -version = "0.5.0-rc.5" -dependencies = [ - "axum", - "axum-client-ip", - "axum-server", - "axum-server-dual-protocol", - "bytes", - "conduwuit_admin", - "conduwuit_api", - "conduwuit_core", - "conduwuit_service", - "const-str", - "futures", - "http", - "http-body-util", - "hyper", - "hyper-util", - "log", - "ruma", - "rustls", "sd-notify", "sentry", "sentry-tower", "sentry-tracing", + "serde", + "serde_html_form", "serde_json", + "serde_regex", + "serde_yaml", + "sha-1", + "sha2", + "thiserror", + "thread_local", + "tikv-jemalloc-ctl", + "tikv-jemallocator", "tokio", - "tower 0.5.2", + "tower", "tower-http", "tracing", -] - -[[package]] -name = "conduwuit_service" -version = "0.5.0-rc.5" -dependencies = [ - "async-trait", - "base64 0.22.1", - "blurhash", - "bytes", - "conduwuit_core", - "conduwuit_database", - "const-str", - "either", - "futures", - "hickory-resolver 0.25.1", - "http", - "image", - "ipaddress", - "itertools 0.14.0", - "log", - "loole", - "lru-cache", - "rand 0.8.5", - "regex", - "reqwest", - "ruma", - "rustyline-async", - "serde", - "serde_json", - "serde_yaml", - "sha2", - "termimad", - "tokio", - "tracing", + "tracing-flame", + "tracing-opentelemetry", + "tracing-subscriber", "url", "webpage", ] -[[package]] -name = "console-api" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857" -dependencies = [ - "futures-core", - "prost", - "prost-types", - "tonic", - "tracing-core", -] - -[[package]] -name = "console-subscriber" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01" -dependencies = [ - "console-api", - "crossbeam-channel", - "crossbeam-utils", - "futures-task", - "hdrhistogram", - "humantime", - "hyper-util", - "prost", - "prost-types", - "serde", - "serde_json", - "thread_local", - "tokio", - "tokio-stream", - "tonic", - "tracing", - "tracing-core", - "tracing-subscriber", -] - [[package]] name = "const-oid" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" -[[package]] -name = "const-str" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e991226a70654b49d34de5ed064885f0bef0348a8e70018b8ff1ac80aa984a2" - [[package]] name = "const_panic" -version = "0.2.12" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2459fc9262a1aa204eb4b5764ad4f189caec88aea9634389c0a25f8be7f6265e" - -[[package]] -name = "coolor" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "691defa50318376447a73ced869862baecfab35f6aabaa91a4cd726b315bfe1a" -dependencies = [ - "crossterm", -] +checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1039,157 +561,42 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.7" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "core_affinity" -version = "0.8.1" -source = "git+https://forgejo.ellis.link/continuwuation/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" -dependencies = [ - "libc", - "num_cpus", - "winapi", -] +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.17" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] -[[package]] -name = "critical-section" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" - -[[package]] -name = "crokey" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ff945e42bb93d29b10ba509970066a269903a932f0ea07d99d8621f97e90d7" -dependencies = [ - "crokey-proc_macros", - "crossterm", - "once_cell", - "serde", - "strict", -] - -[[package]] -name = "crokey-proc_macros" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "665f2180fd82d0ba2bf3deb45fafabb18f23451024ff71ee47f6bfdfb4bbe09e" -dependencies = [ - "crossterm", - "proc-macro2", - "quote", - "strict", - "syn", -] - -[[package]] -name = "crossbeam" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" -dependencies = [ - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", -] - [[package]] name = "crossbeam-channel" -version = "0.5.15" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.21" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "crossterm" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" -dependencies = [ - "bitflags 2.9.0", - "crossterm_winapi", - "futures-core", - "mio", - "parking_lot", - "rustix", - "signal-hook", - "signal-hook-mio", - "winapi", -] - -[[package]] -name = "crossterm_winapi" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" -dependencies = [ - "winapi", -] - -[[package]] -name = "crunchy" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -1201,27 +608,18 @@ dependencies = [ "typenum", ] -[[package]] -name = "ctor" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" -dependencies = [ - "quote", - "syn", -] - [[package]] name = "curve25519-dalek" -version = "4.1.3" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest", "fiat-crypto", + "platforms", "rustc_version", "subtle", "zeroize", @@ -1235,7 +633,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] @@ -1246,9 +644,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.8.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "date_header" @@ -1278,9 +676,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -1296,23 +694,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "ed25519" version = "2.2.3" @@ -1331,7 +712,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core 0.6.4", + "rand_core", "serde", "sha2", "subtle", @@ -1340,96 +721,72 @@ dependencies = [ [[package]] name = "either" -version = "1.15.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" dependencies = [ "serde", ] [[package]] -name = "enum-as-inner" -version = "0.6.1" +name = "encoding_rs" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ - "heck", + "cfg-if", +] + +[[package]] +name = "enum-as-inner" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +dependencies = [ + "heck 0.4.1", "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] name = "equivalent" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] -name = "errno" -version = "0.3.10" +name = "fallible-iterator" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" -dependencies = [ - "libc", - "windows-sys 0.59.0", -] +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] -name = "event-listener" -version = "5.3.1" -source = "git+https://forgejo.ellis.link/continuwuation/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.4" +name = "fallible-streaming-iterator" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" -dependencies = [ - "event-listener", - "pin-project-lite", -] - -[[package]] -name = "exr" -version = "1.73.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83197f59927b46c04a183a619b7c29df34e63e63c7869320862268c0ef687e0" -dependencies = [ - "bit_field", - "half", - "lebe", - "miniz_oxide", - "rayon-core", - "smallvec", - "zune-inflate", -] +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fdeflate" -version = "0.3.7" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" +checksum = "4f9bfee30e4dedf0ab8b422f03af778d9612b63f502710fc500a334ebe2de645" dependencies = [ "simd-adler32", ] [[package]] name = "fiat-crypto" -version = "0.2.9" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" [[package]] name = "figment" -version = "0.10.19" +version = "0.10.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" +checksum = "752eb150770d6f51eb24d60e3ff84a2c24ccc5e5b3b0f550917ce5ec77c13fe4" dependencies = [ "atomic", "pear", @@ -1453,9 +810,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -1476,32 +833,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "forwarded-header-value" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" -dependencies = [ - "nonempty", - "thiserror 1.0.69", -] - -[[package]] -name = "fs-err" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" -dependencies = [ - "autocfg", - "tokio", -] - -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "futf" version = "0.1.5" @@ -1512,25 +843,11 @@ dependencies = [ "new_debug_unreachable", ] -[[package]] -name = "futures" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1538,15 +855,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1555,40 +872,39 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ - "futures-channel", "futures-core", "futures-io", "futures-macro", @@ -1600,19 +916,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generator" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" -dependencies = [ - "cfg-if", - "libc", - "log", - "rustversion", - "windows 0.58.0", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1625,28 +928,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", -] - -[[package]] -name = "getrandom" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasi", "wasm-bindgen", ] @@ -1662,29 +951,29 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.4.8" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http", - "indexmap 2.8.0", + "futures-util", + "http 0.2.12", + "indexmap", "slab", "tokio", "tokio-util", @@ -1692,56 +981,40 @@ dependencies = [ ] [[package]] -name = "half" -version = "2.5.0" +name = "hardened_malloc-rs" +version = "0.1.1+12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" +checksum = "40505dfce340c2deec648b3b413c1f484b5b21f2aedb69e3ae68272a5f35d430" + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "cfg-if", - "crunchy", + "ahash", + "allocator-api2", ] [[package]] -name = "hardened_malloc-rs" -version = "0.1.2+12" +name = "hashlink" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "647deb1583b14d160f85f3ff626f20b6edd366e3852c9843b06077388f794cb6" - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hashbrown" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" - -[[package]] -name = "hdrhistogram" -version = "7.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +checksum = "692eaaf7f7607518dd3cef090f1474b61edc5301d8012f09579920df68b725ee" dependencies = [ - "base64 0.21.7", - "byteorder", - "flate2", - "nom", - "num-traits", + "hashbrown", ] [[package]] name = "headers" -version = "0.4.0" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http", + "http 0.2.12", "httpdate", "mime", "sha1", @@ -1749,13 +1022,19 @@ dependencies = [ [[package]] name = "headers-core" -version = "0.3.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http", + "http 0.2.12", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" @@ -1776,38 +1055,12 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna", - "ipnet", - "once_cell", - "rand 0.8.5", - "thiserror 1.0.69", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "hickory-proto" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" +version = "0.24.0" +source = "git+https://github.com/hickory-dns/hickory-dns?rev=94ac564c3f677e038f7255ddb762e9301d0f2c5d#94ac564c3f677e038f7255ddb762e9301d0f2c5d" dependencies = [ "async-recursion", "async-trait", "cfg-if", - "critical-section", "data-encoding", "enum-as-inner", "futures-channel", @@ -1816,10 +1069,8 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.0", - "ring", - "serde", - "thiserror 2.0.12", + "rand", + "thiserror", "tinyvec", "tokio", "tracing", @@ -1828,43 +1079,20 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" +version = "0.24.0" +source = "git+https://github.com/hickory-dns/hickory-dns?rev=94ac564c3f677e038f7255ddb762e9301d0f2c5d#94ac564c3f677e038f7255ddb762e9301d0f2c5d" dependencies = [ "cfg-if", "futures-util", - "hickory-proto 0.24.4", + "hickory-proto", "ipconfig", "lru-cache", "once_cell", "parking_lot", - "rand 0.8.5", + "rand", "resolv-conf", "smallvec", - "thiserror 1.0.69", - "tokio", - "tracing", -] - -[[package]] -name = "hickory-resolver" -version = "0.25.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" -dependencies = [ - "cfg-if", - "futures-util", - "hickory-proto 0.25.1", - "ipconfig", - "moka", - "once_cell", - "parking_lot", - "rand 0.9.0", - "resolv-conf", - "serde", - "smallvec", - "thiserror 2.0.12", + "thiserror", "tokio", "tracing", ] @@ -1879,12 +1107,14 @@ dependencies = [ ] [[package]] -name = "home" -version = "0.5.11" +name = "hostname" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ - "windows-sys 0.59.0", + "libc", + "match_cfg", + "winapi", ] [[package]] @@ -1895,28 +1125,28 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows 0.52.0", + "windows", ] [[package]] name = "html5ever" -version = "0.27.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c13771afe0e6e846f1e67d038d4cb29998a6779f93c809212e4e9c32efd244d4" +checksum = "bea68cab48b8459f17cf1c944c67ddc572d272d9f2b274140f223ecb1da4a3b7" dependencies = [ "log", "mac", "markup5ever", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "http" -version = "1.3.1" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1924,42 +1154,61 @@ dependencies = [ ] [[package]] -name = "http-auth" -version = "0.1.10" +name = "http" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "150fa4a9462ef926824cf4519c84ed652ca8f4fbae34cb8af045b5cbcaf98822" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ - "memchr", + "bytes", + "fnv", + "itoa", ] [[package]] name = "http-body" -version = "1.0.1" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", -] - -[[package]] -name = "http-body-util" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" -dependencies = [ - "bytes", - "futures-core", - "http", - "http-body", + "http 0.2.12", "pin-project-lite", ] [[package]] -name = "httparse" -version = "1.10.1" +name = "http-body" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +dependencies = [ + "bytes", + "futures-core", + "http 1.1.0", + "http-body 1.0.0", + "pin-project-lite", +] + +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1968,25 +1217,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] -name = "humantime" -version = "2.2.0" +name = "hyper" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] [[package]] name = "hyper" -version = "1.6.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2", - "http", - "http-body", + "http 1.1.0", + "http-body 1.0.0", "httparse", - "httpdate", "itoa", "pin-project-lite", "smallvec", @@ -1996,251 +1261,111 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", - "hyper-util", - "rustls", - "rustls-native-certs", - "rustls-pki-types", + "http 0.2.12", + "hyper 0.14.28", + "rustls 0.21.10", "tokio", - "tokio-rustls", - "tower-service", - "webpki-roots", + "tokio-rustls 0.24.1", ] [[package]] -name = "hyper-timeout" -version = "0.5.2" +name = "hyper-rustls" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ - "hyper", + "futures-util", + "http 1.1.0", + "hyper 1.3.1", "hyper-util", - "pin-project-lite", + "rustls 0.22.3", + "rustls-pki-types", "tokio", + "tokio-rustls 0.25.0", "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.11" -source = "git+https://forgejo.ellis.link/continuwuation/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", "futures-channel", "futures-util", - "http", - "http-body", - "hyper", - "libc", + "http 1.1.0", + "http-body 1.0.0", + "hyper 1.3.1", "pin-project-lite", "socket2", "tokio", + "tower", "tower-service", "tracing", ] [[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +name = "hyperlocal" +version = "0.8.0" +source = "git+https://github.com/softprops/hyperlocal?rev=2ee4d149644600d326559af0d2b235c945b05c04#2ee4d149644600d326559af0d2b235c945b05c04" dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" - -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "hex", + "hyper 0.14.28", + "pin-project-lite", + "tokio", ] [[package]] name = "idna" -version = "1.0.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" -dependencies = [ - "icu_normalizer", - "icu_properties", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "image" -version = "0.25.6" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" +checksum = "fd54d660e773627692c524beaad361aca785a4f9f5730ce91f42aabe5bce3d11" dependencies = [ "bytemuck", - "byteorder-lite", + "byteorder", "color_quant", - "exr", "gif", "image-webp", "num-traits", "png", - "qoi", - "ravif", - "rayon", - "rgb", - "tiff", "zune-core", "zune-jpeg", ] [[package]] name = "image-webp" -version = "0.2.1" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" +checksum = "7a84a25dcae3ac487bc24ef280f9e20c79c9b1a3e5e32cbed3041d1c514aa87c" dependencies = [ - "byteorder-lite", - "quick-error", -] - -[[package]] -name = "imgref" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0263a3d970d5c054ed9312c0057b4f3bde9c0b33836d3637361d4a9e6e7a408" - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", + "byteorder", + "thiserror", ] [[package]] name = "indexmap" -version = "2.8.0" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown", "serde", ] @@ -2256,17 +1381,6 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" -[[package]] -name = "interpolate_name" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ipaddress" version = "0.1.3" @@ -2290,14 +1404,23 @@ dependencies = [ "socket2", "widestring", "windows-sys 0.48.0", - "winreg", + "winreg 0.50.0", ] [[package]] name = "ipnet" -version = "2.11.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" + +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] [[package]] name = "itertools" @@ -2308,53 +1431,27 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" -dependencies = [ - "either", -] - [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" dependencies = [ - "getrandom 0.3.2", "libc", ] -[[package]] -name = "jpeg-decoder" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" - [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ - "once_cell", "wasm-bindgen", ] @@ -2377,10 +1474,25 @@ dependencies = [ ] [[package]] -name = "konst" -version = "0.3.16" +name = "jsonwebtoken" +version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4381b9b00c55f251f2ebe9473aef7c117e96828def1a7cb3bd3f0f903c6894e9" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +dependencies = [ + "base64 0.21.7", + "js-sys", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "konst" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d712a8c49d4274f8d8a5cf61368cb5f3c143d149882b1a2918129e53395fdb0" dependencies = [ "const_panic", "konst_kernel", @@ -2389,41 +1501,18 @@ dependencies = [ [[package]] name = "konst_kernel" -version = "0.3.15" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4b1eb7788f3824c629b1116a7a9060d6e898c358ebff59070093d51103dcc3c" +checksum = "dac6ea8c376b6e208a81cf39b8e82bebf49652454d98a4829e907dac16ef1790" dependencies = [ "typewit", ] -[[package]] -name = "lazy-regex" -version = "3.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60c7310b93682b36b98fa7ea4de998d3463ccbebd94d935d6b48ba5b6ffa7126" -dependencies = [ - "lazy-regex-proc_macros", - "once_cell", - "regex", -] - -[[package]] -name = "lazy-regex-proc_macros" -version = "3.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ba01db5ef81e17eb10a5e0f2109d1b3a3e29bac3070fdbd7d156bf7dbd206a1" -dependencies = [ - "proc-macro2", - "quote", - "regex", - "syn", -] - [[package]] name = "lazy_static" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" @@ -2431,43 +1520,37 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" -[[package]] -name = "lebe" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" - [[package]] name = "libc" -version = "0.2.171" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" - -[[package]] -name = "libfuzzer-sys" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" -dependencies = [ - "arbitrary", - "cc", -] +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.52.5", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.28.0" +source = "git+https://github.com/rusqlite/rusqlite?rev=e00b626e2b1c67347d789fb7f600281705c89381#e00b626e2b1c67347d789fb7f600281705c89381" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", ] [[package]] name = "libz-sys" -version = "1.1.22" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "pkg-config", @@ -2480,23 +1563,11 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "litemap" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" - [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", @@ -2504,41 +1575,15 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "loole" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2998397c725c822c6b2ba605fd9eb4c6a7a0810f1629ba3cc232ef4f0308d96" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "loom" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" -dependencies = [ - "cfg-if", - "generator", - "scoped-tls", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "loop9" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062" -dependencies = [ - "imgref", -] +checksum = "c6725f0feab07fcf90f6de5417c06d7fef976fa6e5912fa9e21cb5e4dc6ae5da" [[package]] name = "lru-cache" @@ -2551,9 +1596,9 @@ dependencies = [ [[package]] name = "lz4-sys" -version = "1.11.1+lz4-1.10.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" dependencies = [ "cc", "libc", @@ -2573,9 +1618,9 @@ checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] name = "markup5ever" -version = "0.12.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ce3abbeba692c8b8441d036ef91aea6df8da2c6b6e21c7e14d3c18e526be45" +checksum = "7a2629bb1404f3d34c2e921f21fd34ba00b206124c81f65c50b43b6aaefeb016" dependencies = [ "log", "phf", @@ -2587,9 +1632,9 @@ dependencies = [ [[package]] name = "markup5ever_rcdom" -version = "0.3.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edaa21ab3701bfee5099ade5f7e1f84553fd19228cf332f13cd6e964bf59be18" +checksum = "b9521dd6750f8e80ee6c53d65e2e4656d7de37064f3a7a5d2d11d05df93839c2" dependencies = [ "html5ever", "markup5ever", @@ -2597,6 +1642,12 @@ dependencies = [ "xml5ever", ] +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.1.0" @@ -2612,21 +1663,11 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "maybe-rayon" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" -dependencies = [ - "cfg-if", - "rayon", -] - [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "mime" @@ -2634,45 +1675,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "minicbor" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1936e27fffe7d8557c060eb82cb71668608cd1a5fb56b63e66d22ae8d7564321" -dependencies = [ - "minicbor-derive", -] - -[[package]] -name = "minicbor-derive" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9882ef5c56df184b8ffc107fc6c61e33ee3a654b021961d790a78571bb9d67a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "minicbor-serde" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e45e8beeefea1b8b6f52fa188a5b6ea3746c2885606af8d4d8bf31cee633fb" -dependencies = [ - "minicbor", - "serde", -] - -[[package]] -name = "minimad" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c5d708226d186590a7b6d4a9780e2bdda5f689e0d58cd17012a298efd745d2" -dependencies = [ - "once_cell", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -2681,43 +1683,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ - "adler2", + "adler", "simd-adler32", ] [[package]] name = "mio" -version = "1.0.3" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", -] - -[[package]] -name = "moka" -version = "0.12.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" -dependencies = [ - "crossbeam-channel", - "crossbeam-epoch", - "crossbeam-utils", - "loom", - "parking_lot", - "portable-atomic", - "rustc_version", - "smallvec", - "tagptr", - "thiserror 1.0.69", - "uuid", + "wasi", + "windows-sys 0.48.0", ] [[package]] @@ -2728,11 +1710,11 @@ checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" [[package]] name = "nix" -version = "0.29.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.5.0", "cfg-if", "cfg_aliases", "libc", @@ -2748,18 +1730,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "nonempty" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" - -[[package]] -name = "noop_proc_macro" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8" - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2772,9 +1742,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" dependencies = [ "num-bigint", "num-complex", @@ -2786,19 +1756,20 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.6" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ + "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-complex" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" dependencies = [ "num-traits", ] @@ -2809,17 +1780,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" -[[package]] -name = "num-derive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "num-integer" version = "0.1.46" @@ -2831,9 +1791,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.45" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" dependencies = [ "autocfg", "num-integer", @@ -2842,10 +1802,11 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ + "autocfg", "num-bigint", "num-integer", "num-traits", @@ -2853,9 +1814,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.19" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -2872,28 +1833,24 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.21.3" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" -dependencies = [ - "critical-section", - "portable-atomic", -] +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssl-probe" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" @@ -2903,11 +1860,11 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.8.0", + "indexmap", "js-sys", "once_cell", "pin-project-lite", - "thiserror 1.0.69", + "thiserror", "urlencoding", ] @@ -2950,10 +1907,10 @@ dependencies = [ "glob", "once_cell", "opentelemetry", - "ordered-float 4.6.0", + "ordered-float 4.2.0", "percent-encoding", - "rand 0.8.5", - "thiserror 1.0.69", + "rand", + "thiserror", "tokio", "tokio-stream", ] @@ -2969,18 +1926,18 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.6.0" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" dependencies = [ "num-traits", ] [[package]] name = "os_info" -version = "3.10.0" +version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" +checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092" dependencies = [ "log", "serde", @@ -2993,17 +1950,11 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "parking" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" - [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", @@ -3011,15 +1962,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3029,15 +1980,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] [[package]] name = "paste" -version = "1.0.15" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pear" @@ -3059,7 +2010,17 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn", + "syn 2.0.58", +] + +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.0", + "serde", ] [[package]] @@ -3070,18 +2031,18 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "phf" -version = "0.11.3" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259" dependencies = [ "phf_shared", ] [[package]] name = "phf_codegen" -version = "0.11.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" +checksum = "4fb1c3a8bc4dd4e5cfce29b44ffc14bedd2ee294559a294e2a4d4c9e9a6a13cd" dependencies = [ "phf_generator", "phf_shared", @@ -3089,48 +2050,48 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" dependencies = [ "phf_shared", - "rand 0.8.5", + "rand", ] [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -3150,15 +2111,21 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.32" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" + +[[package]] +name = "platforms" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "png" -version = "0.17.16" +version = "0.17.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82151a2fc869e011c153adc57cf2789ccb8d9906ce52c0b39a6b5697749d7526" +checksum = "06e4b0d3d1312775e782c86c91a111aa1f910cbb65e1337f9975b5f9a554b5e1" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -3167,12 +2134,6 @@ dependencies = [ "miniz_oxide", ] -[[package]] -name = "portable-atomic" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" - [[package]] name = "powerfmt" version = "0.2.0" @@ -3181,12 +2142,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.21" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "precomputed-hash" @@ -3194,30 +2152,21 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" -[[package]] -name = "prettyplease" -version = "0.2.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" -dependencies = [ - "proc-macro2", - "syn", -] - [[package]] name = "proc-macro-crate" -version = "3.3.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" dependencies = [ + "toml_datetime", "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -3230,164 +2179,26 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", "version_check", "yansi", ] -[[package]] -name = "profiling" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afbdc74edc00b6f6a218ca6a5364d6226a259d4b8ea1af4a0ea063f27e179f4d" -dependencies = [ - "profiling-procmacros", -] - -[[package]] -name = "profiling-procmacros" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "prost" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-derive" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" -dependencies = [ - "anyhow", - "itertools 0.14.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" -dependencies = [ - "prost", -] - -[[package]] -name = "pulldown-cmark" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" -dependencies = [ - "bitflags 2.9.0", - "memchr", - "pulldown-cmark-escape", - "unicase", -] - -[[package]] -name = "pulldown-cmark-escape" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "007d8adb5ddab6f8e3f491ac63566a7d5002cc7ed73901f72057943fa71ae1ae" - -[[package]] -name = "qoi" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001" -dependencies = [ - "bytemuck", -] - [[package]] name = "quick-error" -version = "2.0.1" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - -[[package]] -name = "quinn" -version = "0.11.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" -dependencies = [ - "bytes", - "cfg_aliases", - "pin-project-lite", - "quinn-proto", - "quinn-udp", - "rustc-hash 2.1.1", - "rustls", - "socket2", - "thiserror 2.0.12", - "tokio", - "tracing", - "web-time 1.1.0", -] - -[[package]] -name = "quinn-proto" -version = "0.11.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" -dependencies = [ - "bytes", - "getrandom 0.3.2", - "rand 0.9.0", - "ring", - "rustc-hash 2.1.1", - "rustls", - "rustls-pki-types", - "slab", - "thiserror 2.0.12", - "tinyvec", - "tracing", - "web-time 1.1.0", -] - -[[package]] -name = "quinn-udp" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" -dependencies = [ - "cfg_aliases", - "libc", - "once_cell", - "socket2", - "tracing", - "windows-sys 0.59.0", -] +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.40" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] -[[package]] -name = "r-efi" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" - [[package]] name = "rand" version = "0.8.5" @@ -3395,19 +2206,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" -dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.3", - "zerocopy", + "rand_chacha", + "rand_core", ] [[package]] @@ -3417,17 +2217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core 0.9.3", + "rand_core", ] [[package]] @@ -3436,107 +2226,28 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", -] - -[[package]] -name = "rand_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" -dependencies = [ - "getrandom 0.3.2", -] - -[[package]] -name = "rav1e" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9" -dependencies = [ - "arbitrary", - "arg_enum_proc_macro", - "arrayvec", - "av1-grain", - "bitstream-io", - "built", - "cfg-if", - "interpolate_name", - "itertools 0.12.1", - "libc", - "libfuzzer-sys", - "log", - "maybe-rayon", - "new_debug_unreachable", - "noop_proc_macro", - "num-derive", - "num-traits", - "once_cell", - "paste", - "profiling", - "rand 0.8.5", - "rand_chacha 0.3.1", - "simd_helpers", - "system-deps", - "thiserror 1.0.69", - "v_frame", - "wasm-bindgen", -] - -[[package]] -name = "ravif" -version = "0.11.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2413fd96bd0ea5cdeeb37eaf446a22e6ed7b981d792828721e74ded1980a45c6" -dependencies = [ - "avif-serialize", - "imgref", - "loop9", - "quick-error", - "rav1e", - "rayon", - "rgb", -] - -[[package]] -name = "rayon" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", + "getrandom", ] [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ - "bitflags 2.9.0", + "bitflags 1.3.2", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -3550,13 +2261,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax 0.8.3", ] [[package]] @@ -3567,29 +2278,70 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "reqwest" -version = "0.12.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +version = "0.11.27" +source = "git+https://github.com/girlbossceo/reqwest?rev=319335e000fdea2e3d01f44245c8a21864d0c1c3#319335e000fdea2e3d01f44245c8a21864d0c1c3" dependencies = [ "async-compression", - "base64 0.22.1", + "base64 0.21.7", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "hickory-resolver", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.28", + "hyper-rustls 0.24.2", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls 0.21.10", + "rustls-native-certs", + "rustls-pemfile 1.0.4", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "system-configuration", + "tokio", + "tokio-rustls 0.24.1", + "tokio-socks", + "tokio-util", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.50.0", +] + +[[package]] +name = "reqwest" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e6cc1e89e689536eb5aeede61520e874df5a4707df811cd5da4aa5fbb2aae19" +dependencies = [ + "base64 0.22.0", "bytes", "futures-channel", "futures-core", "futures-util", - "h2", - "hickory-resolver 0.24.4", - "http", - "http-body", + "http 1.1.0", + "http-body 1.0.0", "http-body-util", - "hyper", - "hyper-rustls", + "hyper 1.3.1", + "hyper-rustls 0.26.0", "hyper-util", "ipnet", "js-sys", @@ -3598,61 +2350,53 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "quinn", - "rustls", - "rustls-native-certs", - "rustls-pemfile", + "rustls 0.22.3", + "rustls-pemfile 2.1.2", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-rustls", - "tokio-socks", - "tokio-util", - "tower 0.5.2", + "tokio-rustls 0.25.0", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", - "windows-registry", + "webpki-roots 0.26.1", + "winreg 0.52.0", ] [[package]] name = "resolv-conf" -version = "0.7.1" -source = "git+https://forgejo.ellis.link/continuwuation/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ - "hostname", + "hostname 0.3.1", + "quick-error", ] -[[package]] -name = "rgb" -version = "0.8.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" - [[package]] name = "ring" -version = "0.17.14" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom", "libc", + "spin", "untrusted", "windows-sys 0.52.0", ] [[package]] name = "ruma" -version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.9.4" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "assign", "js_int", @@ -3662,17 +2406,17 @@ dependencies = [ "ruma-common", "ruma-events", "ruma-federation-api", - "ruma-identifiers-validation", "ruma-identity-service-api", "ruma-push-gateway-api", "ruma-signatures", + "ruma-state-res", "web-time 1.1.0", ] [[package]] name = "ruma-appservice-api" -version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.9.0" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "js_int", "ruma-common", @@ -3683,14 +2427,14 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.17.4" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "as_variant", "assign", "bytes", "date_header", - "http", + "http 0.2.12", "js_int", "js_option", "maplit", @@ -3699,35 +2443,32 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.12", - "url", + "thiserror", "web-time 1.1.0", ] [[package]] name = "ruma-common" -version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.12.1" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "as_variant", - "base64 0.22.1", + "base64 0.21.7", "bytes", "form_urlencoded", - "getrandom 0.2.15", - "http", - "indexmap 2.8.0", + "http 0.2.12", + "indexmap", "js_int", "konst", "percent-encoding", - "rand 0.8.5", + "rand", "regex", "ruma-identifiers-validation", "ruma-macros", "serde", "serde_html_form", "serde_json", - "smallvec", - "thiserror 2.0.12", + "thiserror", "time", "tracing", "url", @@ -3738,64 +2479,51 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.27.11" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "as_variant", - "indexmap 2.8.0", + "indexmap", "js_int", "js_option", "percent-encoding", - "pulldown-cmark", "regex", "ruma-common", "ruma-identifiers-validation", "ruma-macros", "serde", "serde_json", - "smallvec", - "thiserror 2.0.12", + "thiserror", "tracing", "url", - "web-time 1.1.0", "wildmatch", ] [[package]] name = "ruma-federation-api" -version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.8.0" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ - "bytes", - "headers", - "http", - "http-auth", - "httparse", "js_int", - "memchr", - "mime", - "rand 0.8.5", "ruma-common", "ruma-events", "serde", "serde_json", - "thiserror 2.0.12", - "tracing", ] [[package]] name = "ruma-identifiers-validation" -version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.9.3" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "js_int", - "thiserror 2.0.12", + "thiserror", ] [[package]] name = "ruma-identity-service-api" -version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.8.0" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "js_int", "ruma-common", @@ -3804,23 +2532,23 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.12.0" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ - "cfg-if", + "once_cell", "proc-macro-crate", "proc-macro2", "quote", "ruma-identifiers-validation", "serde", - "syn", + "syn 2.0.58", "toml", ] [[package]] name = "ruma-push-gateway-api" -version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.8.0" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ "js_int", "ruma-common", @@ -3831,26 +2559,54 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +version = "0.14.0" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "ed25519-dalek", "pkcs8", - "rand 0.8.5", + "rand", "ruma-common", "serde_json", "sha2", "subslice", - "thiserror 2.0.12", + "thiserror", +] + +[[package]] +name = "ruma-state-res" +version = "0.10.0" +source = "git+https://github.com/girlbossceo/ruma?branch=conduwuit-changes#62fcc7361bb339f003b8c3db2e6a83837df4ddaa" +dependencies = [ + "itertools 0.11.0", + "js_int", + "ruma-common", + "ruma-events", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "rusqlite" +version = "0.31.0" +source = "git+https://github.com/rusqlite/rusqlite?rev=e00b626e2b1c67347d789fb7f600281705c89381#e00b626e2b1c67347d789fb7f600281705c89381" +dependencies = [ + "bitflags 2.5.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", ] [[package]] name = "rust-librocksdb-sys" -version = "0.33.0+9.11.1" -source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" +version = "0.19.2+9.0.0" +source = "git+https://github.com/zaidoon1/rust-rocksdb?branch=master#75b0264ef111014d3a3870607e56fdf527e5ae3b" dependencies = [ - "bindgen 0.71.1", + "bindgen", "bzip2-sys", "cc", "glob", @@ -3864,8 +2620,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.37.0" -source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" +version = "0.23.2" +source = "git+https://github.com/zaidoon1/rust-rocksdb?branch=master#75b0264ef111014d3a3870607e56fdf527e5ae3b" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3873,9 +2629,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -3883,87 +2639,94 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "rustc-hash" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" - [[package]] name = "rustc_version" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] -name = "rustix" -version = "0.38.44" +name = "rustls" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ - "bitflags 2.9.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.59.0", + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", ] [[package]] name = "rustls" -version = "0.23.25" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" dependencies = [ - "aws-lc-rs", "log", - "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.102.2", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pki-types", + "rustls-pemfile 1.0.4", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" -version = "2.2.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +dependencies = [ + "base64 0.22.0", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "web-time 1.1.0", + "ring", + "untrusted", ] [[package]] name = "rustls-webpki" -version = "0.103.1" +version = "0.102.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" dependencies = [ - "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -3971,55 +2734,25 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" - -[[package]] -name = "rustyline-async" -version = "0.4.3" -source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" -dependencies = [ - "crossterm", - "futures-channel", - "futures-util", - "pin-project", - "thingbuf", - "thiserror 2.0.12", - "unicode-segmentation", - "unicode-width 0.2.0", -] +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "sanitize-filename" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc984f4f9ceb736a7bb755c3e3bd17dc56370af2600c9780dcc48c66453da34d" -dependencies = [ - "regex", -] +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] -[[package]] -name = "scoped-tls" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" - [[package]] name = "scopeguard" version = "1.2.0" @@ -4027,21 +2760,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "sd-notify" -version = "0.4.5" +name = "sct" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "libc", + "ring", + "untrusted", ] [[package]] -name = "security-framework" -version = "3.2.0" +name = "sd-notify" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" + +[[package]] +name = "security-framework" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ - "bitflags 2.9.0", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -4050,9 +2790,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -4060,19 +2800,19 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "sentry" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "255914a8e53822abd946e2ce8baa41d4cded6b8e938913b7f7b9da5b7ab44335" +checksum = "00421ed8fa0c995f07cde48ba6c89e80f2b312f74ff637326f392fbfd23abe02" dependencies = [ "httpdate", - "reqwest", - "rustls", + "reqwest 0.12.3", + "rustls 0.21.10", "sentry-backtrace", "sentry-contexts", "sentry-core", @@ -4083,14 +2823,14 @@ dependencies = [ "sentry-tracing", "tokio", "ureq", - "webpki-roots", + "webpki-roots 0.25.4", ] [[package]] name = "sentry-backtrace" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00293cd332a859961f24fd69258f7e92af736feaeb91020cff84dac4188a4302" +checksum = "a79194074f34b0cbe5dd33896e5928bbc6ab63a889bd9df2264af5acb186921e" dependencies = [ "backtrace", "once_cell", @@ -4100,11 +2840,11 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961990f9caa76476c481de130ada05614cd7f5aa70fb57c2142f0e09ad3fb2aa" +checksum = "eba8870c5dba2bfd9db25c75574a11429f6b95957b0a78ac02e2970dd7a5249a" dependencies = [ - "hostname", + "hostname 0.4.0", "libc", "os_info", "rustc_version", @@ -4114,12 +2854,12 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a6409d845707d82415c800290a5d63be5e3df3c2e417b0997c60531dfbd35ef" +checksum = "46a75011ea1c0d5c46e9e57df03ce81f5c7f0a9e199086334a1f9c0a541e0826" dependencies = [ "once_cell", - "rand 0.8.5", + "rand", "sentry-types", "serde", "serde_json", @@ -4127,9 +2867,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ab5df4f3b64760508edfe0ba4290feab5acbbda7566a79d72673065888e5cc" +checksum = "7ec2a486336559414ab66548da610da5e9626863c3c4ffca07d88f7dc71c8de8" dependencies = [ "findshlibs", "once_cell", @@ -4138,9 +2878,9 @@ dependencies = [ [[package]] name = "sentry-log" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693841da8dfb693af29105edfbea1d91348a13d23dd0a5d03761eedb9e450c46" +checksum = "e74b7261245ff17a8c48e8f3e1e96fb6b84146870121af880d53aef6a5b4f784" dependencies = [ "log", "sentry-core", @@ -4148,9 +2888,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "609b1a12340495ce17baeec9e08ff8ed423c337c1a84dffae36a178c783623f3" +checksum = "2eaa3ecfa3c8750c78dcfd4637cfa2598b95b52897ed184b4dc77fcf7d95060d" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4158,11 +2898,11 @@ dependencies = [ [[package]] name = "sentry-tower" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b98005537e38ee3bc10e7d36e7febe9b8e573d03f2ddd85fcdf05d21f9abd6d" +checksum = "df141464944fdf8e2a6f2184eb1d973a20456466f788346b6e3a51791cdaa370" dependencies = [ - "http", + "http 1.1.0", "pin-project", "sentry-core", "tower-layer", @@ -4172,9 +2912,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f4e86402d5c50239dc7d8fd3f6d5e048221d5fcb4e026d8d50ab57fe4644cb" +checksum = "f715932bf369a61b7256687c6f0554141b7ce097287e30e3f7ed6e9de82498fe" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4184,16 +2924,16 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.37.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d3f117b8755dbede8260952de2aeb029e20f432e72634e8969af34324591631" +checksum = "4519c900ce734f7a0eb7aba0869dfb225a7af8820634a7dd51449e3b093cfb7c" dependencies = [ "debugid", "hex", - "rand 0.8.5", + "rand", "serde", "serde_json", - "thiserror 1.0.69", + "thiserror", "time", "url", "uuid", @@ -4201,32 +2941,32 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] name = "serde_html_form" -version = "0.2.7" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" +checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.8.0", + "indexmap", "itoa", "ryu", "serde", @@ -4234,21 +2974,20 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", - "memchr", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -4266,9 +3005,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -4291,13 +3030,24 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.8.0", + "indexmap", "itoa", "ryu", "serde", "unsafe-libyaml", ] +[[package]] +name = "sha-1" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sha1" version = "0.10.6" @@ -4335,32 +3085,11 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "signal-hook" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" -dependencies = [ - "libc", - "signal-hook-registry", -] - -[[package]] -name = "signal-hook-mio" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" -dependencies = [ - "libc", - "mio", - "signal-hook", -] - [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -4371,7 +3100,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -4381,19 +3110,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] -name = "simd_helpers" -version = "0.1.0" +name = "simple_asn1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "quote", + "num-bigint", + "num-traits", + "thiserror", + "time", ] [[package]] name = "siphasher" -version = "1.0.1" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "slab" @@ -4404,35 +3136,28 @@ dependencies = [ "autocfg", ] -[[package]] -name = "smallstr" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63b1aefdf380735ff8ded0b15f31aab05daf1f70216c01c02a12926badd1df9d" -dependencies = [ - "serde", - "smallvec", -] - [[package]] name = "smallvec" -version = "1.14.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" -dependencies = [ - "serde", -] +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + [[package]] name = "spki" version = "0.7.3" @@ -4443,25 +3168,14 @@ dependencies = [ "der", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "strict" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" - [[package]] name = "string_cache" -version = "0.8.9" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" dependencies = [ "new_debug_unreachable", + "once_cell", "parking_lot", "phf_shared", "precomputed-hash", @@ -4470,9 +3184,9 @@ dependencies = [ [[package]] name = "string_cache_codegen" -version = "0.5.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0" +checksum = "6bb30289b722be4ff74a408c3cc27edeaad656e06cb1fe8fa9231fa59c728988" dependencies = [ "phf_generator", "phf_shared", @@ -4491,15 +3205,26 @@ dependencies = [ [[package]] name = "subtle" -version = "2.6.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "2.0.100" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -4508,49 +3233,31 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "1.0.2" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "futures-core", + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", ] [[package]] -name = "synstructure" -version = "0.13.1" +name = "system-configuration-sys" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" dependencies = [ - "proc-macro2", - "quote", - "syn", + "core-foundation-sys", + "libc", ] -[[package]] -name = "system-deps" -version = "6.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" -dependencies = [ - "cfg-expr", - "heck", - "pkg-config", - "toml", - "version-compare", -] - -[[package]] -name = "tagptr" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" - -[[package]] -name = "target-lexicon" -version = "0.12.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" - [[package]] name = "tendril" version = "0.4.3" @@ -4562,70 +3269,24 @@ dependencies = [ "utf-8", ] -[[package]] -name = "termimad" -version = "0.31.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8e19c6dbf107bec01d0e216bb8219485795b7d75328e4fa5ef2756c1be4f8dc" -dependencies = [ - "coolor", - "crokey", - "crossbeam", - "lazy-regex", - "minimad", - "serde", - "thiserror 1.0.69", - "unicode-width 0.1.14", -] - -[[package]] -name = "thingbuf" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662b54ef6f7b4e71f683dadc787bbb2d8e8ef2f91b682ebed3164a5a7abca905" -dependencies = [ - "parking_lot", - "pin-project", -] - [[package]] name = "thiserror" -version = "1.0.69" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ - "thiserror-impl 1.0.69", -] - -[[package]] -name = "thiserror" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" -dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.69" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "thiserror-impl" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "syn 2.0.58", ] [[package]] @@ -4660,21 +3321,11 @@ dependencies = [ "threadpool", ] -[[package]] -name = "tiff" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba1310fcea54c6a9a4fd1aad794ecc02c31682f6bfbecdf460bf19533eed1e3e" -dependencies = [ - "flate2", - "jpeg-decoder", - "weezl", -] - [[package]] name = "tikv-jemalloc-ctl" -version = "0.6.0" -source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c" dependencies = [ "libc", "paste", @@ -4683,8 +3334,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" -source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" dependencies = [ "cc", "libc", @@ -4692,8 +3344,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.6.0" -source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -4701,9 +3354,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -4716,35 +3369,25 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -4757,72 +3400,71 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.2" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", "libc", "mio", + "num_cpus", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "tracing", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "tokio-metrics" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb2bb07a8451c4c6fa8b3497ad198510d8b8dffa5df5cfb97a64102a58b113c8" -dependencies = [ - "futures-util", - "pin-project-lite", - "tokio", - "tokio-stream", + "syn 2.0.58", ] [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls", + "rustls 0.21.10", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.3", + "rustls-pki-types", "tokio", ] [[package]] name = "tokio-socks" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d4770b8024672c1101b3f6733eab95b18007dbe0847a8afe341fcf79e06043f" +checksum = "51165dfa029d2a65969413a6cc96f354b86b464498702f174a4efa13608fd8c0" dependencies = [ "either", "futures-util", - "thiserror 1.0.69", + "thiserror", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -4831,22 +3473,23 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.14" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", + "tracing", ] [[package]] name = "toml" -version = "0.8.20" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" +checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" dependencies = [ "serde", "serde_spanned", @@ -4856,56 +3499,26 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.8.0", + "indexmap", "serde", "serde_spanned", "toml_datetime", "winnow", ] -[[package]] -name = "tonic" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" -dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64 0.22.1", - "bytes", - "h2", - "http", - "http-body", - "http-body-util", - "hyper", - "hyper-timeout", - "hyper-util", - "percent-encoding", - "pin-project", - "prost", - "socket2", - "tokio", - "tokio-stream", - "tower 0.4.13", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tower" version = "0.4.13" @@ -4914,51 +3527,32 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", - "slab", "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", ] -[[package]] -name = "tower" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" -dependencies = [ - "futures-core", - "futures-util", - "pin-project-lite", - "sync_wrapper", - "tokio", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-http" -version = "0.6.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", - "bitflags 2.9.0", + "bitflags 2.5.0", "bytes", "futures-core", "futures-util", - "http", - "http-body", - "http-body-util", + "http 0.2.12", + "http-body 0.4.6", + "http-range-header", "pin-project-lite", "tokio", "tokio-util", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", @@ -4966,21 +3560,23 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.41" -source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -4988,18 +3584,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" -source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", ] [[package]] name = "tracing-core" -version = "0.1.33" -source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -5019,7 +3617,8 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", @@ -5046,8 +3645,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" -source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -5069,15 +3669,15 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.18.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typewit" -version = "1.11.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb77c29baba9e4d3a6182d51fa75e3215c7fd1dab8f4ea9d107c716878e55fc0" +checksum = "c6fb9ae6a3cafaf0a5d14c2302ca525f9ae8e07a0f0e6949de88d882c37a6e24" dependencies = [ "typewit_proc_macros", ] @@ -5107,34 +3707,25 @@ dependencies = [ ] [[package]] -name = "unicase" -version = "2.8.1" +name = "unicode-bidi" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] -name = "unicode-segmentation" -version = "1.12.0" +name = "unicode-normalization" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - -[[package]] -name = "unicode-width" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] [[package]] name = "unsafe-libyaml" @@ -5150,24 +3741,25 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.12.1" +version = "2.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" +checksum = "11f214ce18d8b2cbe84ed3aa6486ed3f5b285cf8d8fbdbce9f3f767a724adc35" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "log", "once_cell", - "rustls", + "rustls 0.22.3", "rustls-pki-types", + "rustls-webpki 0.102.2", "url", - "webpki-roots", + "webpki-roots 0.26.1", ] [[package]] name = "url" -version = "2.5.4" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna", @@ -5187,44 +3779,21 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "uuid" -version = "1.16.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.3.2", + "getrandom", "serde", ] -[[package]] -name = "v_frame" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" -dependencies = [ - "aligned-vec", - "num-traits", - "wasm-bindgen", -] - [[package]] name = "valuable" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" @@ -5232,17 +3801,11 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "version-compare" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" - [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -5259,59 +3822,48 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" -[[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" -dependencies = [ - "wit-bindgen-rt", -] - [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", - "once_cell", - "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", + "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.58", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", - "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5319,31 +3871,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -5371,9 +3920,9 @@ dependencies = [ [[package]] name = "webpage" -version = "2.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70862efc041d46e6bbaa82bb9c34ae0596d090e86cbd14bd9e93b36ee6802eac" +checksum = "3fb86b12e58d490a99867f561ce8466ffa7b73e24d015a8e7f5bc111d4424ba2" dependencies = [ "html5ever", "markup5ever_rcdom", @@ -5383,9 +3932,15 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.8" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" + +[[package]] +name = "webpki-roots" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" dependencies = [ "rustls-pki-types", ] @@ -5396,29 +3951,17 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - [[package]] name = "widestring" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "wildmatch" -version = "2.4.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ce1ab1f8c62655ebe1350f589c61e505cf94d385bc6a12899442d9081e71fd" +checksum = "939e59c1bc731542357fdaad98b209ef78c8743d652bb61439d16b16a79eb025" [[package]] name = "winapi" @@ -5448,18 +3991,8 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core 0.52.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" -dependencies = [ - "windows-core 0.58.0", - "windows-targets 0.52.6", + "windows-core", + "windows-targets 0.52.5", ] [[package]] @@ -5468,96 +4001,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-core" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-result 0.2.0", - "windows-strings 0.1.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-implement" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "windows-interface" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "windows-link" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" - -[[package]] -name = "windows-registry" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" -dependencies = [ - "windows-result 0.3.2", - "windows-strings 0.3.1", - "windows-targets 0.53.0", -] - -[[package]] -name = "windows-result" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result 0.2.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-strings" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" -dependencies = [ - "windows-link", + "windows-targets 0.52.5", ] [[package]] @@ -5575,16 +4019,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -5604,34 +4039,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" -dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -5642,15 +4061,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -5660,15 +4073,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -5678,27 +4085,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -5708,15 +4103,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -5726,15 +4115,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -5744,15 +4127,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -5762,21 +4139,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" -version = "0.7.4" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] @@ -5792,31 +4163,20 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "winreg" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ - "bitflags 2.9.0", + "cfg-if", + "windows-sys 0.48.0", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "xml5ever" -version = "0.18.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bbb26405d8e919bc1547a5aa9abc95cbfa438f04844f5fdd9dc7596b748bf69" +checksum = "4034e1d05af98b51ad7214527730626f019682d797ba38b51689212118d8e650" dependencies = [ "log", "mac", @@ -5829,124 +4189,56 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" -[[package]] -name = "yoke" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - [[package]] name = "zerocopy" -version = "0.8.24" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.24" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "zerofrom" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", + "syn 2.0.58", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" [[package]] name = "zstd" -version = "0.13.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.2.4" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ - "bindgen 0.71.1", "cc", "pkg-config", ] @@ -5957,20 +4249,11 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" -[[package]] -name = "zune-inflate" -version = "0.2.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02" -dependencies = [ - "simd-adler32", -] - [[package]] name = "zune-jpeg" -version = "0.4.14" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a5bab8d7dedf81405c4bb1f2b83ea057643d9cb28778cea9eecddeedd2e028" +checksum = "ec866b44a2a1fd6133d363f073ca1b179f438f99e7e5bfb1e33f7181facfe448" dependencies = [ "zune-core", ] diff --git a/Cargo.toml b/Cargo.toml index 1ce5c1db..e9d6b11b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,433 +1,182 @@ -#cargo-features = ["profile-rustflags"] - -[workspace] -resolver = "2" -members = ["src/*"] -default-members = ["src/*"] - -[workspace.package] -authors = [ - "June Clementine Strawberry ", - "strawberry ", # woof - "Jason Volk ", -] -categories = ["network-programming"] -description = "a very cool Matrix chat homeserver written in Rust" -edition = "2024" -homepage = "https://continuwuity.org/" -keywords = ["chat", "matrix", "networking", "server", "uwu"] +[package] +name = "conduit" +description = "a cool fork of Conduit, a Matrix homeserver written in Rust" license = "Apache-2.0" -# See also `rust-toolchain.toml` +authors = [ + "strawberry ", + "timokoesters ", +] +homepage = "https://puppygock.gay/conduwuit" +repository = "https://github.com/girlbossceo/conduwuit" readme = "README.md" -repository = "https://forgejo.ellis.link/continuwuation/continuwuity" -rust-version = "1.86.0" -version = "0.5.0-rc.5" +version = "0.7.0+conduwuit-0.2.0" +edition = "2021" -[workspace.metadata.crane] -name = "conduwuit" +# See also `rust-toolchain.toml` +rust-version = "1.75.0" -[workspace.dependencies.arrayvec] -version = "0.7.6" -features = ["serde"] -[workspace.dependencies.smallvec] -version = "1.14.0" -features = [ - "const_generics", - "const_new", - "serde", - "union", - "write", -] +[dependencies] +# Used for secure identifiers +rand = "0.8.5" -[workspace.dependencies.smallstr] -version = "0.3" -features = ["ffi", "std", "union"] +# Used for conduit::Error type +thiserror = "1.0.58" -[workspace.dependencies.const-str] -version = "0.6.2" +# Used to encode server public key +base64 = "0.22.0" -[workspace.dependencies.ctor] -version = "0.2.9" +# Used when hashing the state +ring = "0.17.8" -[workspace.dependencies.cargo_toml] -version = "0.21" -default-features = false -features = ["features"] - -[workspace.dependencies.toml] -version = "0.8.14" -default-features = false -features = ["parse"] - -[workspace.dependencies.sanitize-filename] -version = "0.6.0" - -[workspace.dependencies.base64] -version = "0.22.1" -default-features = false - -# used for TURN server authentication -[workspace.dependencies.hmac] -version = "0.12.1" -default-features = false - -# used for checking if an IP is in specific subnets / CIDR ranges easier -[workspace.dependencies.ipaddress] -version = "0.1.3" - -[workspace.dependencies.rand] -version = "0.8.5" - -# Used for the http request / response body type for Ruma endpoints used with reqwest -[workspace.dependencies.bytes] -version = "1.10.1" - -[workspace.dependencies.http-body-util] -version = "0.1.3" - -[workspace.dependencies.http] -version = "1.3.1" - -[workspace.dependencies.regex] -version = "1.11.1" - -[workspace.dependencies.axum] -version = "0.7.9" -default-features = false -features = [ - "form", - "http1", - "http2", - "json", - "matched-path", - "tokio", - "tracing", -] - -[workspace.dependencies.axum-extra] -version = "0.9.6" -default-features = false -features = ["typed-header", "tracing"] - -[workspace.dependencies.axum-server] -version = "0.7.2" -default-features = false - -# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest -[workspace.dependencies.axum-server-dual-protocol] -version = "0.7" - -[workspace.dependencies.axum-client-ip] -version = "0.6.1" - -[workspace.dependencies.tower] -version = "0.5.2" -default-features = false -features = ["util"] - -[workspace.dependencies.tower-http] -version = "0.6.2" -default-features = false -features = [ - "add-extension", - "catch-panic", - "cors", - "sensitive-headers", - "set-header", - "timeout", - "trace", - "util", -] - -[workspace.dependencies.rustls] -version = "0.23.25" -default-features = false -features = ["aws_lc_rs"] - -[workspace.dependencies.reqwest] -version = "0.12.15" -default-features = false -features = [ - "rustls-tls-native-roots", - "socks", - "hickory-dns", - "http2", -] - -[workspace.dependencies.serde] -version = "1.0.219" -default-features = false -features = ["rc"] - -[workspace.dependencies.serde_json] -version = "1.0.140" -default-features = false -features = ["raw_value"] - -# Used for appservice registration files -[workspace.dependencies.serde_yaml] -version = "0.9.34" +# Used to find matching events for appservices +regex = "1.10.4" # Used to load forbidden room/user regex from config -[workspace.dependencies.serde_regex] -version = "1.1.0" +serde_regex = "1.1.0" + +# Used to make working with iterators easier, was already a transitive depdendency +itertools = "0.12.1" + +# jwt jsonwebtokens +jsonwebtoken = "9.3.0" + +lru-cache = "0.1.2" # Used for ruma wrapper -[workspace.dependencies.serde_html_form] -version = "0.2.6" +serde_html_form = "0.2.6" + +# used for TURN server authentication +hmac = "0.12.1" +sha-1 = "0.10.1" + +async-trait = "0.1.80" + +# used for checking if an IP is in specific subnets / CIDR ranges easier +ipaddress = "0.1.3" + +# to encode/decode percent URIs when conduwuit is running without a reverse proxy +#urlencoding = "2.1.3" + +# to get the client IP address of requests +#axum-client-ip = "0.4.2" + +# to parse user-friendly time durations in admin commands +cyborgtime = "2.1.1" + +# all the web/HTTP dependencies +# Used for the http request / response body type for Ruma endpoints used with reqwest +bytes = "1.6.0" +http = "0.2.12" + +# used to replace the channels of the tokio runtime +loole = "0.3.0" + +# Validating urls in config, was already a transitive dependency +url = { version = "2", features = ["serde"] } + +# standard date and time tools +[dependencies.chrono] +version = "0.4.38" +features = ["alloc"] +default-features = false + +# Web framework +[dependencies.axum] +version = "0.6.20" +default-features = false +features = ["form", "headers", "http1", "http2", "json", "matched-path"] + +[dependencies.axum-server] +version = "0.5.1" +features = ["tls-rustls"] + +[dependencies.tower] +version = "0.4.13" +features = ["util"] + +[dependencies.tower-http] +version = "0.4.4" +features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] + +[dependencies.hyper] +version = "0.14" +features = ["server", "http1", "http2"] + +[dependencies.reqwest] +#version = "0.11.27" +git = "https://github.com/girlbossceo/reqwest" +rev = "319335e000fdea2e3d01f44245c8a21864d0c1c3" +default-features = false +features = ["rustls-tls-native-roots", "socks", "hickory-dns"] + +# all the serde stuff +# Used for pdu definition +[dependencies.serde] +version = "1.0.197" +features = ["rc"] +# Used for appservice registration files +[dependencies.serde_yaml] +version = "0.9.34" +# Used for ruma wrapper +[dependencies.serde_json] +version = "1.0.116" +features = ["raw_value"] + # Used for password hashing -[workspace.dependencies.argon2] +[dependencies.argon2] version = "0.5.3" features = ["alloc", "rand"] default-features = false -# Used to generate thumbnails for images & blurhashes -[workspace.dependencies.image] -version = "0.25.5" -default-features = false -features = [ - "jpeg", - "png", - "gif", - "webp", -] - -[workspace.dependencies.blurhash] -version = "0.2.3" -default-features = false -features = [ - "fast-linear-to-srgb", - "image", -] - -# logging -[workspace.dependencies.log] -version = "0.4.27" -default-features = false -[workspace.dependencies.tracing] -version = "0.1.41" -default-features = false -[workspace.dependencies.tracing-subscriber] -version = "0.3.19" -default-features = false -features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] -[workspace.dependencies.tracing-core] -version = "0.1.33" -default-features = false - -# for URL previews -[workspace.dependencies.webpage] -version = "2.0.1" -default-features = false - -# used for conduwuit's CLI and admin room command parsing -[workspace.dependencies.clap] -version = "4.5.35" -default-features = false -features = [ - "derive", - "env", - "error-context", - "help", - "std", - "string", - "usage", -] - -[workspace.dependencies.futures] -version = "0.3.31" -default-features = false -features = ["std", "async-await"] - -[workspace.dependencies.tokio] -version = "1.44.2" -default-features = false -features = [ - "fs", - "net", - "macros", - "sync", - "signal", - "time", - "rt-multi-thread", - "io-util", - "tracing", -] - -[workspace.dependencies.tokio-metrics] -version = "0.4.0" - -[workspace.dependencies.libloading] -version = "0.8.6" - -# Validating urls in config, was already a transitive dependency -[workspace.dependencies.url] -version = "2.5.4" -default-features = false -features = ["serde"] - -# standard date and time tools -[workspace.dependencies.chrono] -version = "0.4.38" -features = ["alloc", "std"] -default-features = false - -[workspace.dependencies.hyper] -version = "1.6.0" -default-features = false -features = [ - "server", - "http1", - "http2", -] - -[workspace.dependencies.hyper-util] -version = "0.1.11" -default-features = false -features = [ - "server-auto", - "server-graceful", - "tokio", -] - -# to support multiple variations of setting a config option -[workspace.dependencies.either] -version = "1.15.0" -default-features = false -features = ["serde"] - -# Used for reading the configuration from conduwuit.toml & environment variables -[workspace.dependencies.figment] -version = "0.10.19" -default-features = false -features = ["env", "toml"] - -[workspace.dependencies.hickory-resolver] +# Used to generate thumbnails for images +[dependencies.image] version = "0.25.1" default-features = false -features = [ - "serde", - "system-config", - "tokio", -] +features = ["jpeg", "png", "gif", "webp"] -# Used for conduwuit::Error type -[workspace.dependencies.thiserror] -version = "2.0.12" +# logging +[dependencies.log] +version = "0.4.21" default-features = false - -# Used when hashing the state -[workspace.dependencies.ring] -version = "0.17.14" +features = ["max_level_trace", "release_max_level_info"] +[dependencies.tracing] +version = "0.1.40" default-features = false +features = ["max_level_trace", "release_max_level_info"] +[dependencies.tracing-subscriber] +version = "0.3.18" +features = ["env-filter"] -# Used to make working with iterators easier, was already a transitive depdendency -[workspace.dependencies.itertools] -version = "0.14.0" - -# to parse user-friendly time durations in admin commands -#TODO: overlaps chrono? -[workspace.dependencies.cyborgtime] -version = "2.1.1" - -# used for MPSC channels -[workspace.dependencies.loole] -version = "0.4.0" - -# used for MPMC channels -[workspace.dependencies.async-channel] -version = "2.3.1" - -[workspace.dependencies.async-trait] -version = "0.1.88" - -[workspace.dependencies.lru-cache] -version = "0.1.2" - -# Used for matrix spec type definitions and helpers -[workspace.dependencies.ruma] -git = "https://forgejo.ellis.link/continuwuation/ruwuma" -#branch = "conduwuit-changes" -rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" -features = [ - "compat", - "rand", - "appservice-api-c", - "client-api", - "federation-api", - "markdown", - "push-gateway-api-c", - "unstable-exhaustive-types", - "ring-compat", - "compat-upload-signatures", - "identifiers-validation", - "unstable-unspecified", - "unstable-msc2448", - "unstable-msc2666", - "unstable-msc2867", - "unstable-msc2870", - "unstable-msc3026", - "unstable-msc3061", - "unstable-msc3245", - "unstable-msc3266", - "unstable-msc3381", # polls - "unstable-msc3489", # beacon / live location - "unstable-msc3575", - "unstable-msc3930", # polls push rules - "unstable-msc4075", - "unstable-msc4095", - "unstable-msc4121", - "unstable-msc4125", - "unstable-msc4186", - "unstable-msc4203", # sending to-device events to appservices - "unstable-msc4210", # remove legacy mentions - "unstable-extensible-events", - "unstable-pdu", -] - -[workspace.dependencies.rust-rocksdb] -git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1" -rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" -default-features = false -features = [ - "multi-threaded-cf", - "mt_static", - "lz4", - "zstd", - "bzip2", -] - -[workspace.dependencies.sha2] +# optional SHA256 media keys feature +[dependencies.sha2] version = "0.10.8" -default-features = false - -[workspace.dependencies.sha1] -version = "0.10.6" -default-features = false +optional = true # optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring -[workspace.dependencies.opentelemetry] +[dependencies.opentelemetry] version = "0.21.0" - -[workspace.dependencies.tracing-flame] +optional = true +[dependencies.tracing-flame] version = "0.2.0" - -[workspace.dependencies.tracing-opentelemetry] +optional = true +[dependencies.tracing-opentelemetry] version = "0.22.0" - -[workspace.dependencies.opentelemetry_sdk] +optional = true +[dependencies.opentelemetry_sdk] version = "0.21.2" +optional = true features = ["rt-tokio"] - -[workspace.dependencies.opentelemetry-jaeger] +[dependencies.opentelemetry-jaeger] version = "0.20.0" +optional = true features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting -[workspace.dependencies.sentry] -version = "0.37.0" +[dependencies.sentry] +version = "0.32.3" +optional = true default-features = false features = [ "backtrace", @@ -441,543 +190,411 @@ features = [ "reqwest", "log", ] +[dependencies.sentry-tracing] +version = "0.32.3" +optional = true +[dependencies.sentry-tower] +version = "0.32.3" +optional = true -[workspace.dependencies.sentry-tracing] -version = "0.37.0" -[workspace.dependencies.sentry-tower] -version = "0.37.0" - -# jemalloc usage -[workspace.dependencies.tikv-jemalloc-sys] -git = "https://forgejo.ellis.link/continuwuation/jemallocator" -rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +# optional jemalloc usage +[dependencies.tikv-jemallocator] +version = "0.5.4" +optional = true default-features = false -features = [ - "background_threads_runtime_support", - "unprefixed_malloc_on_supported_platforms", -] -[workspace.dependencies.tikv-jemallocator] -git = "https://forgejo.ellis.link/continuwuation/jemallocator" -rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" -default-features = false -features = [ - "background_threads_runtime_support", - "unprefixed_malloc_on_supported_platforms", -] -[workspace.dependencies.tikv-jemalloc-ctl] -git = "https://forgejo.ellis.link/continuwuation/jemallocator" -rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +features = ["unprefixed_malloc_on_supported_platforms"] +[dependencies.tikv-jemalloc-ctl] +version = "0.5.4" +optional = true default-features = false features = ["use_std"] -[workspace.dependencies.console-subscriber] -version = "0.4" - -[workspace.dependencies.nix] -version = "0.29.0" -default-features = false -features = ["resource"] - -[workspace.dependencies.sd-notify] -version = "0.4.5" +# for URL previews +[dependencies.webpage] +version = "2.0" default-features = false -[workspace.dependencies.hardened_malloc-rs] -version = "0.1.2" +# to support multiple variations of setting a config option +[dependencies.either] +version = "1.11.0" +features = ["serde"] + +# to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest +[dependencies.axum-server-dual-protocol] +version = "0.5.2" +optional = true + +# used for conduit's CLI and admin room command parsing +[dependencies.clap] +version = "4.5.4" default-features = false +features = ["std", "derive", "help", "usage", "error-context", "string"] + +[dependencies.futures-util] +version = "0.3.30" +default-features = false + +# Used for reading the configuration from conduit.toml & environment variables +[dependencies.figment] +version = "0.10.17" +features = ["env", "toml"] + +# Used for matrix spec type definitions and helpers +#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +#ruma = { git = "https://github.com/ruma/ruma", rev = "4d9f754657a099df8e61533787b8eebd12946435", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified", "unstable-msc2870", "unstable-msc3061", "unstable-msc2867", "unstable-extensible-events"] } +#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +[dependencies.ruma] +git = "https://github.com/girlbossceo/ruma" +#rev = "c988b5ff158ede9c10aeffc76ad5e31604f19ddb" +branch = "conduwuit-changes" +#path = "../ruma/crates/ruma" features = [ - "static", - "gcc", - "light", + "compat", + "rand", + "appservice-api-c", + "client-api", + "federation-api", + "push-gateway-api-c", + "state-res", + "unstable-exhaustive-types", + "ring-compat", + "unstable-unspecified", + "unstable-msc2448", + "unstable-msc2666", + "unstable-msc2867", + "unstable-msc2870", + "unstable-msc3026", + "unstable-msc3061", + "unstable-msc3575", + "unstable-msc4121", + "unstable-msc4125", + "unstable-extensible-events", ] -[workspace.dependencies.rustyline-async] -version = "0.4.3" -default-features = false +[dependencies.hickory-resolver] +git = "https://github.com/hickory-dns/hickory-dns" +rev = "94ac564c3f677e038f7255ddb762e9301d0f2c5d" -[workspace.dependencies.termimad] -version = "0.31.2" -default-features = false +[dependencies.rust-rocksdb] +git = "https://github.com/zaidoon1/rust-rocksdb" +branch = "master" +#rev = "60f783b06b49d2f6fcf1d3dda66c7194e49095d4" +optional = true +default-features = true +features = ["multi-threaded-cf", "zstd"] -[workspace.dependencies.checked_ops] -version = "0.1" +[dependencies.rusqlite] +git = "https://github.com/rusqlite/rusqlite" +#branch = "master" +rev = "e00b626e2b1c67347d789fb7f600281705c89381" +optional = true +features = ["bundled"] -[workspace.dependencies.syn] -version = "2.0" -default-features = false -features = ["full", "extra-traits"] +# used only by rusqlite +[dependencies.parking_lot] +version = "0.12.1" +optional = true -[workspace.dependencies.quote] -version = "1.0" +# used only by rusqlite +[dependencies.thread_local] +version = "1.1.8" +optional = true -[workspace.dependencies.proc-macro2] -version = "1.0" +# used only by rusqlite and rust-rocksdb +[dependencies.num_cpus] +version = "1.16.0" +optional = true -[workspace.dependencies.bytesize] -version = "2.0" +[dependencies.tokio] +version = "1.37.0" +features = ["fs", "macros", "sync", "signal"] -[workspace.dependencies.core_affinity] -version = "0.8.1" +# *nix-specific dependencies +[target.'cfg(unix)'.dependencies] +nix = { version = "0.28.0", features = ["resource"] } +sd-notify = { version = "0.4.1", optional = true } # systemd is only available/relevant on *nix platforms +hyperlocal = { git = "https://github.com/softprops/hyperlocal", rev = "2ee4d149644600d326559af0d2b235c945b05c04", features = [ + "server", +] } # unix socket support -[workspace.dependencies.libc] -version = "0.2" +[target.'cfg(all(not(target_env = "msvc"), not(target_os = "macos"), target_os = "linux"))'.dependencies] +hardened_malloc-rs = { version = "0.1", optional = true, features = [ + "static", + "clang", + "light", +], default-features = false } +#hardened_malloc-rs = { optional = true, features = ["static","clang","light"], path = "../hardened_malloc-rs", default-features = false } -[workspace.dependencies.num-traits] -version = "0.2" -[workspace.dependencies.minicbor] -version = "0.26.3" -features = ["std"] +[features] +default = [ + "backend_rocksdb", + "systemd", + "element_hacks", + "sentry_telemetry", + "gzip_compression", + "brotli_compression", + "zstd_compression", +] +backend_sqlite = ["sqlite"] +backend_rocksdb = ["rocksdb"] +rocksdb = ["rust-rocksdb", "num_cpus"] +jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator", "rust-rocksdb/jemalloc"] +sqlite = ["rusqlite", "parking_lot", "thread_local", "num_cpus"] +systemd = ["sd-notify"] +sentry_telemetry = ["sentry", "sentry-tracing", "sentry-tower"] -[workspace.dependencies.minicbor-serde] -version = "0.4.1" -features = ["std"] +gzip_compression = ["tower-http/compression-gzip", "reqwest/gzip"] +zstd_compression = ["tower-http/compression-zstd"] +brotli_compression = ["tower-http/compression-br", "reqwest/brotli"] -[workspace.dependencies.maplit] -version = "1.0.2" +sha256_media = ["sha2"] +io_uring = ["rust-rocksdb/io-uring"] +axum_dual_protocol = ["axum-server-dual-protocol"] +perf_measurements = [ + "opentelemetry", + "tracing-flame", + "tracing-opentelemetry", + "opentelemetry_sdk", + "opentelemetry-jaeger", +] + +hardened_malloc = ["hardened_malloc-rs"] + +# client/server interopability hacks # -# Patches -# +## element has various non-spec compliant behaviour +element_hacks = [] -# backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing. -# we can switch back to upstream if #2956 is merged and backported in the upstream repo. -# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c -[patch.crates-io.tracing-subscriber] -git = "https://forgejo.ellis.link/continuwuation/tracing" -rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -[patch.crates-io.tracing] -git = "https://forgejo.ellis.link/continuwuation/tracing" -rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -[patch.crates-io.tracing-core] -git = "https://forgejo.ellis.link/continuwuation/tracing" -rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -[patch.crates-io.tracing-log] -git = "https://forgejo.ellis.link/continuwuation/tracing" -rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 -# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b -[patch.crates-io.rustyline-async] -git = "https://forgejo.ellis.link/continuwuation/rustyline-async" -rev = "deaeb0694e2083f53d363b648da06e10fc13900c" +[[bin]] +name = "conduit" +path = "src/main.rs" -# adds LIFO queue scheduling; this should be updated with PR progress. -[patch.crates-io.event-listener] -git = "https://forgejo.ellis.link/continuwuation/event-listener" -rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" -[patch.crates-io.async-channel] -git = "https://forgejo.ellis.link/continuwuation/async-channel" -rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" +[lib] +name = "conduit" +path = "src/lib.rs" -# adds affinity masks for selecting more than one core at a time -[patch.crates-io.core_affinity] -git = "https://forgejo.ellis.link/continuwuation/core_affinity_rs" -rev = "9c8e51510c35077df888ee72a36b4b05637147da" +[package.metadata.deb] +name = "matrix-conduit" +maintainer = "strawberry " +copyright = "2024, Timo Kösters " +license-file = ["LICENSE", "3"] +depends = "$auto, ca-certificates" +extended-description = """\ +a cool fork of Conduit, a Matrix homeserver written in Rust""" +section = "net" +priority = "optional" +assets = [ + [ + "debian/README.md", + "usr/share/doc/matrix-conduit/README.Debian", + "644", + ], + [ + "README.md", + "usr/share/doc/matrix-conduit/", + "644", + ], + [ + "target/release/conduit", + "usr/sbin/matrix-conduit", + "755", + ], + [ + "conduwuit-example.toml", + "etc/matrix-conduit/conduit.toml", + "640", + ], +] +conf-files = ["/etc/matrix-conduit/conduit.toml"] +maintainer-scripts = "debian/" +systemd-units = { unit-name = "matrix-conduit" } -# reverts hyperium#148 conflicting with our delicate federation resolver hooks -[patch.crates-io.hyper-util] -git = "https://forgejo.ellis.link/continuwuation/hyper-util" -rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" -# allows no-aaaa option in resolv.conf -# bumps rust edition and toolchain to 1.86.0 and 2024 -# use sat_add on line number errors -[patch.crates-io.resolv-conf] -git = "https://forgejo.ellis.link/continuwuation/resolv-conf" -rev = "200e958941d522a70c5877e3d846f55b5586c68d" - -# -# Our crates -# - -[workspace.dependencies.conduwuit-router] -package = "conduwuit_router" -path = "src/router" -default-features = false - -[workspace.dependencies.conduwuit-admin] -package = "conduwuit_admin" -path = "src/admin" -default-features = false - -[workspace.dependencies.conduwuit-api] -package = "conduwuit_api" -path = "src/api" -default-features = false - -[workspace.dependencies.conduwuit-service] -package = "conduwuit_service" -path = "src/service" -default-features = false - -[workspace.dependencies.conduwuit-database] -package = "conduwuit_database" -path = "src/database" -default-features = false - -[workspace.dependencies.conduwuit-core] -package = "conduwuit_core" -path = "src/core" -default-features = false - -[workspace.dependencies.conduwuit-macros] -package = "conduwuit_macros" -path = "src/macros" -default-features = false - -############################################################################### -# -# Release profiles -# +[profile.dev] +debug = 0 +lto = 'off' +codegen-units = 512 +incremental = true +# seems to speed up continuous debug compilations +[profile.dev.build-override] +opt-level = 3 +[profile.dev.package."*"] # external dependencies +opt-level = 1 +[profile.dev.package."tokio"] +opt-level = 3 +# default release profile [profile.release] +lto = 'thin' +incremental = false +opt-level = 3 +overflow-checks = true strip = "symbols" -lto = "thin" - -# release profile with debug symbols -[profile.release-debuginfo] -inherits = "release" -debug = "full" -strip = "none" +control-flow-guard = true # Windows only +debug = 0 +# high performance release profile which uses fat LTO across all crates, 1 codegen unit, max opt-level, and optimises across all crates [profile.release-high-perf] inherits = "release" -lto = "fat" +lto = 'fat' codegen-units = 1 panic = "abort" -# do not use without profile-rustflags enabled -[profile.release-max-perf] -inherits = "release" -strip = "symbols" -lto = "fat" -#rustflags = [ -# '-Ctarget-cpu=native', -# '-Ztune-cpu=native', -# '-Ctarget-feature=+crt-static', -# '-Crelocation-model=static', -# '-Ztls-model=local-exec', -# '-Zinline-in-all-cgus=true', -# '-Zinline-mir=true', -# '-Zmir-opt-level=3', -# '-Clink-arg=-fuse-ld=gold', -# '-Clink-arg=-Wl,--threads', -# '-Clink-arg=-Wl,--gc-sections', -# '-Clink-arg=-luring', -# '-Clink-arg=-lstdc++', -# '-Clink-arg=-lc', -# '-Ztime-passes', -# '-Ztime-llvm-passes', -#] - -[profile.release-max-perf.build-override] -inherits = "release-max-perf" -opt-level = 0 -codegen-units = 32 -#rustflags = [ -# '-Crelocation-model=pic', -# '-Ctarget-feature=-crt-static', -# '-Clink-arg=-Wl,--no-gc-sections', -#] - -[profile.release-max-perf.package.conduwuit_macros] -inherits = "release-max-perf.build-override" -#rustflags = [ -# '-Crelocation-model=pic', -# '-Ctarget-feature=-crt-static', -#] - -[profile.bench] -inherits = "release" -#rustflags = [ -# "-Cremark=all", -# '-Ztime-passes', -# '-Ztime-llvm-passes', -#] - -############################################################################### -# -# Developer profile -# - -# To enable hot-reloading: -# 1. Uncomment all of the rustflags here. -# 2. Uncomment crate-type=dylib in src/*/Cargo.toml -# -# opt-level, mir-opt-level, validate-mir are not known to interfere with reloading -# and can be raised if build times are tolerable. - -[profile.dev] -debug = "full" -opt-level = 0 -panic = "unwind" -debug-assertions = true -incremental = true -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztime-passes', -# '-Zmir-opt-level=0', -# '-Zvalidate-mir=false', -# '-Ztls-model=global-dynamic', -# '-Cprefer-dynamic=true', -# '-Zstaticlib-prefer-dynamic=true', -# '-Zstaticlib-allow-rdylib-deps=true', -# '-Zpacked-bundled-libs=false', -# '-Zplt=true', -# '-Crpath=true', -# '-Clink-arg=-Wl,--as-needed', -# '-Clink-arg=-Wl,--allow-shlib-undefined', -# '-Clink-arg=-Wl,-z,keep-text-section-prefix', -# '-Clink-arg=-Wl,-z,lazy', -#] - -[profile.dev.package.conduwuit_core] -inherits = "dev" -incremental = false -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztime-passes', -# '-Zmir-opt-level=0', -# '-Ztls-model=initial-exec', -# '-Cprefer-dynamic=true', -# '-Zstaticlib-prefer-dynamic=true', -# '-Zstaticlib-allow-rdylib-deps=true', -# '-Zpacked-bundled-libs=false', -# '-Zplt=true', -# '-Clink-arg=-Wl,--as-needed', -# '-Clink-arg=-Wl,--allow-shlib-undefined', -# '-Clink-arg=-Wl,-z,lazy', -# '-Clink-arg=-Wl,-z,unique', -# '-Clink-arg=-Wl,-z,nodlopen', -# '-Clink-arg=-Wl,-z,nodelete', -#] - -[profile.dev.package.conduwuit] -inherits = "dev" -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztime-passes', -# '-Zmir-opt-level=0', -# '-Zvalidate-mir=false', -# '-Ztls-model=global-dynamic', -# '-Cprefer-dynamic=true', -# '-Zexport-executable-symbols=true', -# '-Zplt=true', -# '-Crpath=true', -# '-Clink-arg=-Wl,--as-needed', -# '-Clink-arg=-Wl,--allow-shlib-undefined', -# '-Clink-arg=-Wl,--export-dynamic', -# '-Clink-arg=-Wl,-z,lazy', -#] - -[profile.dev.package.'*'] -inherits = "dev" -debug = 'limited' -incremental = false -codegen-units = 1 -opt-level = 'z' -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztls-model=global-dynamic', -# '-Cprefer-dynamic=true', -# '-Zstaticlib-prefer-dynamic=true', -# '-Zstaticlib-allow-rdylib-deps=true', -# '-Zpacked-bundled-libs=true', -# '-Zplt=true', -# '-Clink-arg=-Wl,--as-needed', -# '-Clink-arg=-Wl,-z,lazy', -# '-Clink-arg=-Wl,-z,nodelete', -#] - -# primarily used for CI -[profile.test] -inherits = "dev" -strip = false -opt-level = 0 -codegen-units = 16 -incremental = false - -[profile.test.package.'*'] -inherits = "dev" +# For releases also try to max optimizations for dependencies: +[profile.release-high-perf.build-override] debug = 0 -strip = false -opt-level = 0 -codegen-units = 16 -incremental = false +opt-level = 3 +codegen-units = 1 -############################################################################### -# -# Linting -# +[profile.release-high-perf.package."*"] +debug = 0 +opt-level = 3 +codegen-units = 1 + + +[lints] +workspace = true [workspace.lints.rust] -absolute-paths-not-starting-with-crate = "warn" -#box-pointers = "warn" -deprecated-in-future = "warn" -elided-lifetimes-in-paths = "warn" -explicit-outlives-requirements = "warn" -ffi-unwind-calls = "warn" -keyword-idents = "warn" -macro-use-extern-crate = "warn" -meta-variable-misuse = "warn" -missing-abi = "warn" -#missing-copy-implementations = "warn" # TODO -#missing-debug-implementations = "warn" # TODO -non-ascii-idents = "warn" -rust-2021-incompatible-closure-captures = "warn" -rust-2021-incompatible-or-patterns = "warn" -rust-2021-prefixes-incompatible-syntax = "warn" -rust-2021-prelude-collisions = "warn" -single-use-lifetimes = "warn" -trivial-casts = "warn" -trivial-numeric-casts = "warn" -unit-bindings = "warn" -#unnameable-types = "warn" # TODO -unreachable-pub = "warn" -unsafe-op-in-unsafe-fn = "warn" -unstable-features = "warn" -unused-extern-crates = "warn" -unused-import-braces = "warn" -unused-lifetimes = "warn" -unused-macro-rules = "warn" -unused-qualifications = "warn" -#unused-results = "warn" # TODO +missing_abi = "warn" +noop_method_call = "warn" +pointer_structural_match = "warn" +explicit_outlives_requirements = "warn" +unused_extern_crates = "warn" +unused_import_braces = "warn" +unused_lifetimes = "warn" +unused_qualifications = "warn" +unused_macro_rules = "warn" +dead_code = "warn" +elided_lifetimes_in_paths = "warn" +macro_use_extern_crate = "warn" +single_use_lifetimes = "warn" +unsafe_op_in_unsafe_fn = "warn" + +# not in rust 1.75.0 (doesn't break CI but won't check for it) +unit_bindings = "warn" -## some sadness -elided_named_lifetimes = "allow" # TODO! -let_underscore_drop = "allow" -missing_docs = "allow" -# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g. -# tokio_unstable will warn. -unexpected_cfgs = "allow" # this seems to suggest broken code and is not working correctly unused_braces = "allow" -# buggy, but worth checking on occasionally -unused_crate_dependencies = "allow" -unsafe_code = "allow" -variant_size_differences = "allow" -# we check nightly clippy lints -unknown_lints = "allow" +# some sadness +unreachable_pub = "allow" +missing_docs = "allow" -####################################### -# -# Clippy lints -# [workspace.lints.clippy] +# pedantic = "warn" -################### -cargo = { level = "warn", priority = -1 } +suspicious = "warn" # assume deny in practice +perf = "warn" # assume deny in practice -## some sadness -multiple_crate_versions = { level = "allow", priority = 1 } - -################### -complexity = { level = "warn", priority = -1 } - -################### -correctness = { level = "warn", priority = -1 } - -################### -nursery = { level = "warn", priority = -1 } - -## some sadness -missing_const_for_fn = { level = "allow", priority = 1 } # TODO -option_if_let_else = { level = "allow", priority = 1 } # TODO -redundant_pub_crate = { level = "allow", priority = 1 } # TODO -significant_drop_in_scrutinee = { level = "allow", priority = 1 } # TODO -significant_drop_tightening = { level = "allow", priority = 1 } # TODO - -################### -pedantic = { level = "warn", priority = -1 } - -## some sadness -too_long_first_doc_paragraph = { level = "allow", priority = 1 } -doc_markdown = { level = "allow", priority = 1 } -enum_glob_use = { level = "allow", priority = 1 } -if_not_else = { level = "allow", priority = 1 } -if_then_some_else_none = { level = "allow", priority = 1 } -inline_always = { level = "allow", priority = 1 } -match_bool = { level = "allow", priority = 1 } -missing_docs_in_private_items = { level = "allow", priority = 1 } -missing_errors_doc = { level = "allow", priority = 1 } -missing_panics_doc = { level = "allow", priority = 1 } -module_name_repetitions = { level = "allow", priority = 1 } -needless_continue = { level = "allow", priority = 1 } -no_effect_underscore_binding = { level = "allow", priority = 1 } -similar_names = { level = "allow", priority = 1 } -single_match_else = { level = "allow", priority = 1 } -struct_excessive_bools = { level = "allow", priority = 1 } -struct_field_names = { level = "allow", priority = 1 } -unnecessary_wraps = { level = "allow", priority = 1 } -unused_async = { level = "allow", priority = 1 } - -################### -perf = { level = "warn", priority = -1 } - -################### -#restriction = "warn" - -#allow_attributes = "warn" # UNSTABLE -arithmetic_side_effects = "warn" -as_conversions = "warn" -as_underscore = "warn" -assertions_on_result_states = "warn" +redundant_clone = "warn" +cloned_instead_of_copied = "warn" +expl_impl_clone_on_copy = "warn" +unnecessary_cast = "warn" +cast_lossless = "warn" +ptr_as_ptr = "warn" +mut_mut = "warn" +char_lit_as_u8 = "warn" dbg_macro = "warn" +empty_structs_with_brackets = "warn" +get_unwrap = "warn" +negative_feature_names = "warn" +pub_without_shorthand = "warn" +rc_buffer = "warn" +rc_mutex = "warn" +redundant_feature_names = "warn" +redundant_type_annotations = "warn" +rest_pat_in_fully_bound_structs = "warn" +str_to_string = "warn" +string_to_string = "warn" +tests_outside_test_module = "warn" +undocumented_unsafe_blocks = "warn" +unneeded_field_pattern = "warn" +unseparated_literal_suffix = "warn" +wildcard_dependencies = "warn" +or_fun_call = "warn" +unnecessary_lazy_evaluations = "warn" +assertions_on_result_states = "warn" default_union_representation = "warn" deref_by_slicing = "warn" empty_drop = "warn" -empty_structs_with_brackets = "warn" exit = "warn" filetype_is_file = "warn" float_cmp_const = "warn" -fn_to_numeric_cast_any = "warn" format_push_string = "warn" -get_unwrap = "warn" impl_trait_in_params = "warn" -let_underscore_untyped = "warn" +ref_to_mut = "warn" lossy_float_literal = "warn" mem_forget = "warn" missing_assert_message = "warn" mutex_atomic = "warn" -pub_without_shorthand = "warn" -rc_buffer = "warn" -rc_mutex = "warn" -redundant_type_annotations = "warn" -rest_pat_in_fully_bound_structs = "warn" semicolon_outside_block = "warn" -str_to_string = "warn" +fn_to_numeric_cast = "warn" +fn_to_numeric_cast_with_truncation = "warn" string_lit_chars_any = "warn" -string_slice = "warn" -string_to_string = "warn" suspicious_xor_used_as_pow = "warn" -tests_outside_test_module = "warn" try_err = "warn" -undocumented_unsafe_blocks = "warn" unnecessary_safety_comment = "warn" unnecessary_safety_doc = "warn" unnecessary_self_imports = "warn" -unneeded_field_pattern = "warn" -unseparated_literal_suffix = "warn" -#unwrap_used = "warn" # TODO verbose_file_reads = "warn" +cast_possible_wrap = "warn" +redundant_closure_for_method_calls = "warn" +large_futures = "warn" +semicolon_if_nothing_returned = "warn" +match_bool = "warn" +struct_excessive_bools = "warn" +must_use_candidate = "warn" +collapsible_else_if = "warn" +inconsistent_struct_constructor = "warn" +manual_string_new = "warn" +zero_sized_map_values = "warn" +unnecessary_box_returns = "warn" +map_unwrap_or = "warn" +implicit_clone = "warn" +match_wildcard_for_single_variants = "warn" +unnecessary_wraps = "warn" +match_same_arms = "warn" +ignored_unit_patterns = "warn" +redundant_else = "warn" +explicit_into_iter_loop = "warn" +used_underscore_binding = "warn" +needless_pass_by_value = "warn" +too_many_lines = "warn" +let_underscore_untyped = "warn" +single_match = "warn" +single_match_else = "warn" +explicit_deref_methods = "warn" +explicit_iter_loop = "warn" +manual_let_else = "warn" +trivially_copy_pass_by_ref = "warn" +wildcard_imports = "warn" +checked_conversions = "warn" -################### -style = { level = "warn", priority = -1 } - -## some sadness -# trivial assertions are quite alright -assertions_on_constants = { level = "allow", priority = 1 } -module_inception = { level = "allow", priority = 1 } -obfuscated_if_else = { level = "allow", priority = 1 } - -################### -suspicious = { level = "warn", priority = -1 } - -## some sadness -let_underscore_future = { level = "allow", priority = 1 } - -# rust doesnt understand conduwuit's custom log macros -literal_string_with_formatting_args = { level = "allow", priority = 1 } +# some sadness +missing_errors_doc = "allow" +missing_panics_doc = "allow" +module_name_repetitions = "allow" +if_not_else = "allow" +doc_markdown = "allow" +cast_possible_truncation = "allow" +cast_precision_loss = "allow" +cast_sign_loss = "allow" +same_name_method = "allow" +mod_module_files = "allow" +unwrap_used = "allow" +expect_used = "allow" +if_then_some_else_none = "allow" +let_underscore_must_use = "allow" +map_err_ignore = "allow" +missing_docs_in_private_items = "allow" +multiple_inherent_impl = "allow" +error_impl_error = "allow" +as_conversions = "allow" +string_add = "allow" +string_slice = "allow" +ref_patterns = "allow" diff --git a/README.md b/README.md index bf4f5613..669c1cf8 100644 --- a/README.md +++ b/README.md @@ -1,115 +1,87 @@ -# continuwuity +# conduwuit + +[![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) - -## A community-driven [Matrix](https://matrix.org/) homeserver in Rust - +### a well maintained fork of [Conduit](https://conduit.rs/) -[continuwuity] is a Matrix homeserver written in Rust. -It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. +Visit the [Conduwuit documentation](https://conduwuit.puppyirl.gay/) for more information. +Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository. +#### What is Matrix? +[Matrix](https://matrix.org) is an open network for secure and decentralized +communication. Users from every Matrix homeserver can chat with users from all +other Matrix servers. You can even use bridges (also called Matrix Appservices) +to communicate with users outside of Matrix, like a community on Discord. -### Why does this exist? +#### What is the goal? -The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. +An efficient Matrix homeserver that's easy to set up and just works. You can install +it on a mini-computer like the Raspberry Pi to host Matrix for your family, +friends or company. -We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. +#### Can I try it out? -### Who are we? +An official conduwuit server ran by me is available at transfem.dev ([element.transfem.dev](https://element.transfem.dev) / [cinny.transfem.dev](https://cinny.transfem.dev)) -We are a group of Matrix enthusiasts, developers and system administrators who have used conduwuit and believe in its potential. Our team includes both previous -contributors to the original project and new developers who want to help maintain and improve this important piece of Matrix infrastructure. +#### What is the current status? -We operate as an open community project, welcoming contributions from anyone interested in improving continuwuity. +conduwuit is a fork of Conduit which is in beta, meaning you can join and participate in most +Matrix rooms, but not all features are supported and you might run into bugs +from time to time. -### What is Matrix? +#### Why does this fork exist? Why don't you contribute back upstream? -[Matrix](https://matrix.org) is an open, federated, and extensible network for -decentralized communication. Users from any Matrix homeserver can chat with users from all -other homeservers over federation. Matrix is designed to be extensible and built on top of. -You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord. - -### What are the project's goals? - -Continuwuity aims to: - -- Maintain a stable, reliable Matrix homeserver implementation in Rust -- Improve compatibility and specification compliance with the Matrix protocol -- Fix bugs and performance issues from the original conduwuit -- Add missing features needed by homeserver administrators -- Provide comprehensive documentation and easy deployment options -- Create a sustainable development model for long-term maintenance -- Keep a lightweight, efficient codebase that can run on modest hardware - -### Can I try it out? - -Check out the [documentation](introduction) for installation instructions. - -There are currently no open registration Continuwuity instances available. - -### What are we working on? - -We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues). - -- [Replacing old conduwuit links with working continuwuity links](https://forgejo.ellis.link/continuwuation/continuwuity/issues/742) -- [Getting CI and docs deployment working on the new Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues/740) -- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747) -- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0) -- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119) -- Automated testing -- [Admin API](https://forgejo.ellis.link/continuwuation/continuwuity/issues/748) -- [Policy-list controlled moderation](https://forgejo.ellis.link/continuwuation/continuwuity/issues/750) - -### Can I migrate my data from x? - -- Conduwuit: Yes -- Conduit: No, database is now incompatible -- Grapevine: No, database is now incompatible -- Dendrite: No -- Synapse: No - -We haven't written up a guide on migrating from incompatible homeservers yet. Reach out to us if you need to do this! +I now intend on contributing back as time and mental energy sees fit, but my fork still exists as a way to: +- avoid unnecessary Matrix and general developer politics +- avoid bikeshedding unnecessary or irrelevant things in upstream MRs +- Fast tracked bug fixes, performance improvements, security improvements, and new features +- Have early access to MRs that may not be suitable/acceptable for Conduit (e.g. too niche, too advanced for general users, only being blocked due to pending on contributor actions that we can fix ourselves downstream, pending Matrix spec stuff, etc) +- Support unspecced or WIP features +- Have official support for other OS's like Windows, macOS, and BSD. +- Have a **stable** testing ground for some MRs or new features and bug fixes +And various other reasons that may not be listed here. -## Contribution - -### Development flow - -- Features / changes must developed in a separate branch -- For each change, create a descriptive PR -- Your code will be reviewed by one or more of the continuwuity developers -- The branch will be deployed live on multiple tester's matrix servers to shake out bugs -- Once all testers and reviewers have agreed, the PR will be merged to the main branch -- The main branch will have nightly builds deployed to users on the cutting edge -- Every week or two, a new release is cut. - -The main branch is always green! - - -### Policy on pulling from other forks - -We welcome contributions from other forks of conduwuit, subject to our review process. -When incorporating code from other forks: - -- All external contributions must go through our standard PR process -- Code must meet our quality standards and pass tests -- Code changes will require testing on multiple test servers before merging -- Attribution will be given to original authors and forks -- We prioritize stability and compatibility when evaluating external contributions -- Features that align with our project goals will be given priority consideration - +#### How can I contribute? + +1. Look for an issue you would like to work on and make sure it's not assigned + to other users +2. Ask someone to assign the issue to you (comment on the issue or chat in + [#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay)) +3. Fork the repo and work on the issue. +4. Submit a PR (please keep contributions to the GitHub repo, main development is done here, not the GitLab repo which exists just as a mirror. If you are avoiding GitHub, feel free to join our Matrix chat to get your patch in.) #### Contact -Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project! +If you run into any question, feel free to +- Ask us in `#conduwuit:puppygock.gay` on Matrix +- [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) +#### Donate + +- Liberapay: +- Ko-fi: +- GitHub Sponsors: + +#### Logo + +No official conduwuit logo exists. Repo and Matrix room picture is from bran (<3). Banner image is directly from [this cohost post](https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). + +#### Is it conduwuit or Conduwuit? + +Both. + +#### Mirrors of conduwuit + +- GitHub: +- GitLab: +- git.gay: +- Codeberg: +- sourcehut: - - -[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity - diff --git a/alpine/APKBUILD b/alpine/APKBUILD deleted file mode 100644 index 97f84f65..00000000 --- a/alpine/APKBUILD +++ /dev/null @@ -1,63 +0,0 @@ -# Contributor: magmaus3 -# Maintainer: magmaus3 -pkgname=continuwuity - -# abuild doesn't like the format of v0.5.0-rc.5, so i had to change it -# see https://wiki.alpinelinux.org/wiki/Package_policies -pkgver=0.5.0_rc5 -pkgrel=0 -pkgdesc="a continuwuation of a very cool, featureful fork of conduit" -url="https://continuwuity.org/" -arch="all" -license="Apache-2.0" -depends="liburing" - -# cargo version on alpine v3.21 is too old to use the 2024 edition -# i recommend either building everything on edge, or adding -# the edge repo as a tag -makedepends="cargo liburing-dev clang-dev linux-headers" -checkdepends="" -install="$pkgname.pre-install" -subpackages="$pkgname-openrc" -source="https://forgejo.ellis.link/continuwuation/continuwuity/archive/v0.5.0-rc.5.tar.gz -continuwuity.initd -continuwuity.confd -" -builddir="$srcdir/continuwuity" -options="net !check" - -prepare() { - default_prepare - cd $srcdir/continuwuity - - # add the default database path to the config (commented out) - cat conduwuit-example.toml \ - | sed '/#database_path/ s:$: "/var/lib/continuwuity":' \ - > "$srcdir"/continuwuity.toml - - cargo fetch --target="$CTARGET" --locked -} - -build() { - cargo build --frozen --release --all-features -} - -check() { - # TODO: make sure the tests work - #cargo test --frozen - return -} - -package() { - cd $srcdir - install -Dm755 continuwuity/target/release/conduwuit "$pkgdir"/usr/bin/continuwuity - install -Dm644 "$srcdir"/continuwuity.toml -t "$pkgdir"/etc/continuwuity - install -Dm755 "$srcdir"/continuwuity.initd "$pkgdir"/etc/init.d/continuwuity - install -Dm644 "$srcdir"/continuwuity.confd "$pkgdir"/etc/conf.d/continuwuity -} - -sha512sums=" -66f6da5e98b6f7bb8c1082500101d5c87b1b79955c139b44c6ef5123919fb05feb0dffc669a3af1bc8d571ddb9f3576660f08dc10a6b19eab6db9e391175436a v0.5.0-rc.5.tar.gz -0482674be24740496d70da256d4121c5a5e3b749f2445d2bbe0e8991f1449de052724f8427da21a6f55574bc53eac9ca1e47e5012b4c13049b2b39044734d80d continuwuity.initd -38e2576278b450d16ba804dd8f4a128f18cd793e6c3ce55aedee1e186905755b31ee23baaa6586b1ab0e25a1f29bf1ea86bfaae4185b0cb1a29203726a199426 continuwuity.confd -" diff --git a/alpine/README.md b/alpine/README.md deleted file mode 100644 index 5f26d772..00000000 --- a/alpine/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# building - -1. [set up your build - environment](https://wiki.alpinelinux.org/wiki/Include:Setup_your_system_and_account_for_building_packages) - -2. run `abuild` (or `abuild -K` if you want to keep the source directory to make - rebuilding faster) diff --git a/alpine/continuwuity.confd b/alpine/continuwuity.confd deleted file mode 100644 index 03d7b0a0..00000000 --- a/alpine/continuwuity.confd +++ /dev/null @@ -1,3 +0,0 @@ -supervisor=supervise-daemon -export CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml - diff --git a/alpine/continuwuity.initd b/alpine/continuwuity.initd deleted file mode 100644 index 1354f4bd..00000000 --- a/alpine/continuwuity.initd +++ /dev/null @@ -1,19 +0,0 @@ -#!/sbin/openrc-run - -command="/usr/bin/continuwuity" -command_user="continuwuity:continuwuity" -command_args="--config ${CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml}" -command_background=true -pidfile="/run/$RC_SVCNAME.pid" - -output_log="/var/log/continuwuity.log" -error_log="/var/log/continuwuity.log" - -depend() { - need net -} - -start_pre() { - checkpath -d -m 0755 -o "$command_user" /var/lib/continuwuity - checkpath -f -m 0644 -o "$command_user" "$output_log" -} diff --git a/alpine/continuwuity.pre-install b/alpine/continuwuity.pre-install deleted file mode 100644 index edac789f..00000000 --- a/alpine/continuwuity.pre-install +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -addgroup -S continuwuity 2>/dev/null -adduser -S -D -H -h /var/lib/continuwuity -s /sbin/nologin -G continuwuity -g continuwuity continuwuity 2>/dev/null -exit 0 diff --git a/arch/conduwuit.service b/arch/conduwuit.service deleted file mode 100644 index 4f45ddc0..00000000 --- a/arch/conduwuit.service +++ /dev/null @@ -1,77 +0,0 @@ -[Unit] -Description=conduwuit Matrix homeserver -Wants=network-online.target -After=network-online.target -Documentation=https://conduwuit.puppyirl.gay/ -RequiresMountsFor=/var/lib/private/conduwuit -Alias=matrix-conduwuit.service - -[Service] -DynamicUser=yes -Type=notify-reload -ReloadSignal=SIGUSR1 - -TTYPath=/dev/tty25 -DeviceAllow=char-tty -StandardInput=tty-force -StandardOutput=tty -StandardError=journal+console -TTYReset=yes -# uncomment to allow buffer to be cleared every restart -TTYVTDisallocate=no - -TTYColumns=120 -TTYRows=40 - -AmbientCapabilities= -CapabilityBoundingSet= - -DevicePolicy=closed -LockPersonality=yes -MemoryDenyWriteExecute=yes -NoNewPrivileges=yes -#ProcSubset=pid -ProtectClock=yes -ProtectControlGroups=yes -ProtectHome=yes -ProtectHostname=yes -ProtectKernelLogs=yes -ProtectKernelModules=yes -ProtectKernelTunables=yes -ProtectProc=invisible -ProtectSystem=strict -PrivateDevices=yes -PrivateMounts=yes -PrivateTmp=yes -PrivateUsers=yes -PrivateIPC=yes -RemoveIPC=yes -RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX -RestrictNamespaces=yes -RestrictRealtime=yes -RestrictSUIDSGID=yes -SystemCallArchitectures=native -SystemCallFilter=@system-service @resources -SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc -SystemCallErrorNumber=EPERM -StateDirectory=conduwuit - -RuntimeDirectory=conduwuit -RuntimeDirectoryMode=0750 - -Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" -BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit -BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit - -ExecStart=/usr/bin/conduwuit -Restart=on-failure -RestartSec=5 - -TimeoutStopSec=4m -TimeoutStartSec=4m - -StartLimitInterval=1m -StartLimitBurst=5 - -[Install] -WantedBy=multi-user.target diff --git a/bin/complement b/bin/complement index c437503e..a0991e95 100755 --- a/bin/complement +++ b/bin/complement @@ -3,93 +3,35 @@ set -euo pipefail # Path to Complement's source code -# -# The `COMPLEMENT_SRC` environment variable is set in the Nix dev shell, which -# points to a store path containing the Complement source code. It's likely you -# want to just pass that as the first argument to use it here. -COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}" +COMPLEMENT_SRC="$1" # A `.jsonl` file to write test logs to -LOG_FILE="${2:-complement_test_logs.jsonl}" +LOG_FILE="$2" # A `.jsonl` file to write test results to -RESULTS_FILE="${3:-complement_test_results.jsonl}" +RESULTS_FILE="$3" -COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" +OCI_IMAGE="complement-conduit:dev" -# Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time -SKIPPED_COMPLEMENT_TESTS='TestPartialStateJoin.*|TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.*|TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias|TestUnbanViaInvite.*|TestRoomState/Parallel/GET_/publicRooms_lists.*"|TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other.*' - -# $COMPLEMENT_SRC needs to be a directory to Complement source code -if [ -f "$COMPLEMENT_SRC" ]; then - echo "\$COMPLEMENT_SRC must be a directory/path to Complement source code" - exit 1 -fi - -# quick test to make sure we can actually write to $LOG_FILE and $RESULTS_FILE -touch $LOG_FILE && rm -v $LOG_FILE -touch $RESULTS_FILE && rm -v $RESULTS_FILE - -toplevel="$(git rev-parse --show-toplevel)" - -pushd "$toplevel" > /dev/null - -if [ ! -f "complement_oci_image.tar.gz" ]; then - echo "building complement conduwuit image" - - # if using macOS, use linux-complement - #bin/nix-build-and-cache just .#linux-complement - bin/nix-build-and-cache just .#complement - #nix build -L .#complement - - echo "complement conduwuit image tar.gz built at \"result\"" - - echo "loading into docker" - docker load < result - popd > /dev/null -else - echo "skipping building a complement conduwuit image as complement_oci_image.tar.gz was already found, loading this" - - docker load < complement_oci_image.tar.gz - popd > /dev/null -fi - -echo "" -echo "running go test with:" -echo "\$COMPLEMENT_SRC: $COMPLEMENT_SRC" -echo "\$COMPLEMENT_BASE_IMAGE: $COMPLEMENT_BASE_IMAGE" -echo "\$RESULTS_FILE: $RESULTS_FILE" -echo "\$LOG_FILE: $LOG_FILE" -echo "" +env \ + -C "$(git rev-parse --show-toplevel)" \ + docker build \ + --tag "$OCI_IMAGE" \ + --file tests/complement/Dockerfile \ + . # It's okay (likely, even) that `go test` exits nonzero -# `COMPLEMENT_ENABLE_DIRTY_RUNS=1` reuses the same complement container for faster complement, at the possible expense of test environment pollution set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - go test -tags="conduwuit_blacklist" -skip="$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ + go test -vet=all -timeout 30m -json ./tests | tee "$LOG_FILE" set -o pipefail -# Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results -cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' +# Post-process the results into an easy-to-compare format +cat "$LOG_FILE" | jq -c ' select( (.Action == "pass" or .Action == "fail" or .Action == "skip") and .Test != null ) | {Action: .Action, Test: .Test} - ' > "$RESULTS_FILE" - -#if command -v gotestfmt &> /dev/null; then -# echo "using gotestfmt on $LOG_FILE" -# grep '{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" -#fi - -echo "" -echo "" -echo "complement logs saved at $LOG_FILE" -echo "complement results saved at $RESULTS_FILE" -#if command -v gotestfmt &> /dev/null; then -# echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" -#fi -echo "" -echo "" + ' | sort > "$RESULTS_FILE" diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index ac64ff23..6dcc13b6 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -2,109 +2,40 @@ set -eo pipefail -toplevel="$(git rev-parse --show-toplevel)" +# The first argument must be the desired installable +INSTALLABLE="$1" -# Build just the single installable and forward any other arguments too -just() { - # uses nix-output-monitor (nom) if available - if command -v nom &> /dev/null; then - nom build "$@" - else - nix build -L "$@" - fi +# Build the installable and forward any other arguments too +nix build -L "$@" - if [ -z "$ATTIC_TOKEN" ]; then - echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" - return - fi - - # historical "conduit" store for compatibility purposes, same as conduwuit - nix run --inputs-from "$toplevel" attic -- \ +if [ ! -z "$ATTIC_TOKEN" ]; then + nix run --inputs-from . attic -- \ login \ conduit \ "${ATTIC_ENDPOINT:-https://attic.kennel.juneis.dog/conduit}" \ "$ATTIC_TOKEN" - # Find all output paths of the installables and their build dependencies - #readarray -t derivations < <(nix path-info --derivation "$@") - derivations=() - while IFS=$'\n' read derivation; do - derivations+=("$derivation") - done < <(nix path-info --derivation "$@") + # Push the target installable and its build dependencies + nix run --inputs-from . attic -- \ + push \ + conduit \ + "$(nix path-info "$INSTALLABLE" --derivation)" \ + "$(nix path-info "$INSTALLABLE")" - cache=() - for derivation in "${derivations[@]}"; do - cache+=( - "$(nix-store --query --requisites --include-outputs "$derivation")" - ) - done - withattic() { - nix shell --inputs-from "$toplevel" attic --command xargs attic push "$@" <<< "${cache[*]}" - } - # Upload them to Attic (conduit store) - # - # Use `xargs` and a here-string because something would probably explode if - # several thousand arguments got passed to a command at once. Hopefully no - # store paths include a newline in them. - ( - IFS=$'\n' - withattic conduit || withattic conduit || withattic conduit || true - ) - - # main "conduwuit" store - nix run --inputs-from "$toplevel" attic -- \ + # push to "conduwuit" too + nix run --inputs-from . attic -- \ login \ conduwuit \ "${ATTIC_ENDPOINT:-https://attic.kennel.juneis.dog/conduwuit}" \ "$ATTIC_TOKEN" - # Upload them to Attic (conduwuit store) and Cachix - # - # Use `xargs` and a here-string because something would probably explode if - # several thousand arguments got passed to a command at once. Hopefully no - # store paths include a newline in them. - ( - IFS=$'\n' - withattic conduwuit || withattic conduwuit || withattic conduwuit || true - - # push to cachix if available - if [ "$CACHIX_AUTH_TOKEN" ]; then - nix shell --inputs-from "$toplevel" cachix -c xargs \ - cachix push conduwuit <<< "${cache[*]}" - fi - ) -} - -# Build and cache things needed for CI -ci() { - cache=( - --inputs-from "$toplevel" - - # Keep sorted - #"$toplevel#devShells.x86_64-linux.default" - #"$toplevel#devShells.x86_64-linux.all-features" - attic#default - cachix#default - nixpkgs#direnv - nixpkgs#jq - nixpkgs#nix-direnv - ) - - just "${cache[@]}" -} - -# Build and cache *all* the package outputs from the flake.nix -packages() { - declare -a cache="($( - nix flake show --json 2> /dev/null | - nix run --inputs-from "$toplevel" nixpkgs#jq -- \ - -r \ - '.packages."x86_64-linux" | keys | map("'"$toplevel"'#" + .) | @sh' - ))" - - just "${cache[@]}" -} - - -eval "$@" + # Push the target installable and its build dependencies + nix run --inputs-from . attic -- \ + push \ + conduwuit \ + "$(nix path-info "$INSTALLABLE" --derivation)" \ + "$(nix path-info "$INSTALLABLE")" +else + echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" +fi diff --git a/book.toml b/book.toml index 46d3a7b0..2b5d0baa 100644 --- a/book.toml +++ b/book.toml @@ -1,24 +1,18 @@ [book] -title = "continuwuity" -description = "continuwuity is a community continuation of the conduwuit Matrix homeserver, written in Rust." +title = "conduwuit" +description = "conduwuit, which is a fork of Conduit, is a simple, fast and reliable chat server for the Matrix protocol" language = "en" -authors = ["The continuwuity Community"] -text-direction = "ltr" multilingual = false src = "docs" [build] build-dir = "public" create-missing = true -extra-watch-dirs = ["debian", "docs"] - -[rust] -edition = "2024" [output.html] -edit-url-template = "https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/{path}" -git-repository-url = "https://forgejo.ellis.link/continuwuation/continuwuity" -git-repository-icon = "fa-git-alt" +git-repository-url = "https://github.com/girlbossceo/conduwuit" +edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}" +git-repository-icon = "fa-github-square" [output.html.search] limit-results = 15 diff --git a/clippy.toml b/clippy.toml index 863759aa..afa92de4 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,20 +1 @@ -array-size-threshold = 4096 -cognitive-complexity-threshold = 94 # TODO reduce me ALARA -excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5 -future-size-threshold = 7745 # TODO reduce me ALARA -stack-size-threshold = 196608 # TODO reduce me ALARA -too-many-lines-threshold = 780 # TODO reduce me to <= 100 -type-complexity-threshold = 250 # reduce me to ~200 -large-error-threshold = 256 # TODO reduce me ALARA - -disallowed-macros = [ - { path = "log::error", reason = "use conduwuit_core::error" }, - { path = "log::warn", reason = "use conduwuit_core::warn" }, - { path = "log::info", reason = "use conduwuit_core::info" }, - { path = "log::debug", reason = "use conduwuit_core::debug" }, - { path = "log::trace", reason = "use conduwuit_core::trace" }, -] - -disallowed-methods = [ - { path = "tokio::spawn", reason = "use and pass conduuwit_core::server::Server::runtime() to spawn from" }, -] +too-many-lines-threshold = 700 diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3d92ab15..65a012a9 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1,896 +1,455 @@ -### conduwuit Configuration -### -### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE -### OVERWRITTEN! -### -### You should rename this file before configuring your server. Changes to -### documentation and defaults can be contributed in source code at -### src/core/config/mod.rs. This file is generated when building. -### -### Any values pre-populated are the default values for said config option. -### -### At the minimum, you MUST edit all the config options to your environment -### that say "YOU NEED TO EDIT THIS". -### -### For more information, see: -### https://conduwuit.puppyirl.gay/configuration.html +# ============================================================================= +# This is the official example config for conduwuit. +# If you use it for your server, you will need to adjust it to your own needs. +# At the very least, change the server_name field! +# +# This documentation can also be found at https://conduwuit.puppyirl.gay/configuration.html +# ============================================================================= [global] -# The server_name is the pretty name of this server. It is used as a -# suffix for user and room IDs/aliases. -# -# See the docs for reverse proxying and delegation: -# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy -# -# Also see the `[global.well_known]` config section at the very bottom. -# -# Examples of delegation: -# - https://puppygock.gay/.well-known/matrix/server -# - https://puppygock.gay/.well-known/matrix/client -# -# YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE -# WIPE. -# -# example: "conduwuit.woof" -# -#server_name = +# The server_name is the pretty name of this server. It is used as a suffix for user +# and room ids. Examples: matrix.org, conduit.rs -# The default address (IPv4 or IPv6) conduwuit will listen on. -# -# If you are using Docker or a container NAT networking setup, this must -# be "0.0.0.0". -# -# To listen on multiple addresses, specify a vector e.g. ["127.0.0.1", -# "::1"] -# -#address = ["127.0.0.1", "::1"] +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). -# The port(s) conduwuit will listen on. +# If that's not possible for you, you can create /.well-known files to redirect +# requests (delegation). See +# https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient +# and +# https://spec.matrix.org/v1.9/server-server-api/#getwell-knownmatrixserver +# for more information + +# YOU NEED TO EDIT THIS +#server_name = "your.server.name" + +# Servers listed here will be used to gather public keys of other servers (notary trusted key servers). # -# For reverse proxying, see: -# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# The default behaviour for conduwuit is to attempt to query trusted key servers before querying the individual servers. +# This is done for performance reasons, but if you would like to query individual servers before the notary servers +# configured below, set to # -# If you are using Docker, don't change this, you'll need to map an -# external port to this. +# (Currently, conduwuit doesn't support batched key requests, so this list should only contain Synapse servers) +# Defaults to `matrix.org` +# trusted_servers = ["matrix.org"] + +# Sentry.io crash/panic reporting, performance monitoring/metrics, etc. +# Conduwuit's Sentry reporting endpoint is o4506996327251968.ingest.us.sentry.io # +# Defaults to false +#sentry = false + +# Report your Conduwuit server_name in Sentry.io crash reports and metrics +# +# Defaults to false +#sentry_send_server_name = false + +# Performance monitoring/tracing sample rate for Sentry.io +# +# Note that too high values may impact performance, and can be disabled by setting it to 0.0 +# +# Defaults to 0.15 +#sentry_traces_sample_rate = 0.15 + + +### Database configuration + +# This is the only directory where conduwuit will save its data, including media +database_path = "/var/lib/matrix-conduit/" + +# Database backend: Only rocksdb and sqlite are supported. Please note that sqlite +# will perform significantly worse than rocksdb as it is not intended to be used the +# way it is by conduwuit. sqlite only exists for historical reasons. +database_backend = "rocksdb" + + +### Network + +# The port(s) conduwuit will be running on. You need to set up a reverse proxy such as +# Caddy or Nginx so all requests to /_matrix on port 443 and 8448 will be +# forwarded to the conduwuit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. # To listen on multiple ports, specify a vector e.g. [8080, 8448] -# -#port = 8008 +port = 6167 -# The UNIX socket conduwuit will listen on. -# -# conduwuit cannot listen on both an IP address and a UNIX socket. If -# listening on a UNIX socket, you MUST remove/comment the `address` key. -# -# Remember to make sure that your reverse proxy has access to this socket -# file, either by adding your reverse proxy to the 'conduwuit' group or -# granting world R/W permissions with `unix_socket_perms` (666 minimum). -# -# example: "/run/conduwuit/conduwuit.sock" -# -#unix_socket_path = +# default address (IPv4 or IPv6) conduwuit will listen on. Generally you want this to be +# localhost (127.0.0.1 / ::1). If you are using Docker or a container NAT networking setup, you +# likely need this to be 0.0.0.0. +address = "127.0.0.1" -# The default permissions (in octal) to create the UNIX socket with. +# How many requests conduwuit sends to other servers at the same time concurrently. Default is 500 +# Note that because conduwuit is very fast unlike other homeserver implementations, setting this too +# high could inadvertently result in ratelimits kicking in, or overloading lower-end homeservers out there. # +# A valid use-case for enabling this is if you have a significant amount of overall federation activity +# such as many rooms joined/tracked, and many servers in the true destination cache caused by that. Upon +# rebooting conduwuit, depending on how fast your resources are, client and incoming federation requests +# may timeout or be "stalled" for a period of time due to hitting the max concurrent requests limit from +# refreshing federation/destination caches and such. +# +# If you have a lot of active users on your homeserver, you will definitely need to raise this. +# +# No this will not speed up room joins. +#max_concurrent_requests = 500 + +# Max request size for file uploads +max_request_size = 20_000_000 # in bytes + +# Uncomment unix_socket_path to listen on a UNIX socket at the specified path. +# If listening on a UNIX socket, you must remove/comment the 'address' key if defined and add your +# reverse proxy to the 'conduwuit' group, unless world RW permissions are specified with unix_socket_perms (666 minimum). +#unix_socket_path = "/run/conduwuit/conduwuit.sock" #unix_socket_perms = 660 -# This is the only directory where conduwuit will save its data, including -# media. Note: this was previously "/var/lib/matrix-conduit". -# -# YOU NEED TO EDIT THIS. -# -# example: "/var/lib/conduwuit" -# -#database_path = +# Set this to true for conduwuit to compress HTTP response bodies using zstd. +# This option does nothing if conduwuit was not built with `zstd_compression` feature. +# Please be aware that enabling HTTP compression may weaken TLS. +# Most users should not need to enable this. +# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. +zstd_compression = false -# conduwuit supports online database backups using RocksDB's Backup engine -# API. To use this, set a database backup path that conduwuit can write -# to. -# -# For more information, see: -# https://conduwuit.puppyirl.gay/maintenance.html#backups -# -# example: "/opt/conduwuit-db-backups" -# -#database_backup_path = +# Set this to true for conduwuit to compress HTTP response bodies using gzip. +# This option does nothing if conduwuit was not built with `gzip_compression` feature. +# Please be aware that enabling HTTP compression may weaken TLS. +# Most users should not need to enable this. +# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. +gzip_compression = false -# The amount of online RocksDB database backups to keep/retain, if using -# "database_backup_path", before deleting the oldest one. -# -#database_backups_to_keep = 1 +# Set this to true for conduwuit to compress HTTP response bodies using brotli. +# This option does nothing if conduwuit was not built with `brotli_compression` feature. +# Please be aware that enabling HTTP compression may weaken TLS. +# Most users should not need to enable this. +# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before deciding to enable this. +brotli_compression = false -# Text which will be added to the end of the user's displayname upon -# registration with a space before the text. In Conduit, this was the -# lightning bolt emoji. +# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you do not want conduwuit to send outbound requests to. +# Defaults to RFC1918, unroutable, loopback, multicast, and testnet addresses for security. # -# To disable, set this to "" (an empty string). +# To disable, set this to be an empty vector (`[]`). +# Please be aware that this is *not* a guarantee. You should be using a firewall with zones as doing this on the application layer may have bypasses. # -# The default is the trans pride flag. -# -# example: "🏳️‍⚧️" -# -#new_user_displayname_suffix = "🏳️‍⚧️" +# Currently this does not account for proxies in use like Synapse does. +ip_range_denylist = [ + "127.0.0.0/8", + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "100.64.0.0/10", + "192.0.0.0/24", + "169.254.0.0/16", + "192.88.99.0/24", + "198.18.0.0/15", + "192.0.2.0/24", + "198.51.100.0/24", + "203.0.113.0/24", + "224.0.0.0/4", + "::1/128", + "fe80::/10", + "fc00::/7", + "2001:db8::/32", + "ff00::/8", + "fec0::/10", +] -# If enabled, conduwuit will send a simple GET request periodically to -# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new -# announcements or major updates. This is not an update check endpoint. -# -#allow_announcements_check = true -# Set this to any float value to multiply conduwuit's in-memory LRU caches -# with such as "auth_chain_cache_capacity". -# -# May be useful if you have significant memory to spare to increase -# performance. -# -# If you have low memory, reducing this may be viable. -# -# By default, the individual caches such as "auth_chain_cache_capacity" -# are scaled by your CPU core count. -# -#cache_capacity_modifier = 1.0 +### Moderation / Privacy / Security -# Set this to any float value in megabytes for conduwuit to tell the -# database engine that this much memory is available for database read -# caches. -# -# May be useful if you have significant memory to spare to increase -# performance. -# -# Similar to the individual LRU caches, this is scaled up with your CPU -# core count. -# -# This defaults to 128.0 + (64.0 * CPU core count). -# -#db_cache_capacity_mb = varies by system +# Set to true to allow user type "guest" registrations. Element attempts to register guest users automatically. +# Defaults to false +allow_guest_registration = false -# Set this to any float value in megabytes for conduwuit to tell the -# database engine that this much memory is available for database write -# caches. -# -# May be useful if you have significant memory to spare to increase -# performance. -# -# Similar to the individual LRU caches, this is scaled up with your CPU -# core count. -# -# This defaults to 48.0 + (4.0 * CPU core count). -# -#db_write_buffer_capacity_mb = varies by system +# Set to true to log guest registrations in the admin room. +# Defaults to false as it may be noisy or unnecessary. +log_guest_registrations = false -# This item is undocumented. Please contribute documentation for it. -# -#pdu_cache_capacity = varies by system +# Set to true to allow guest registrations/users to auto join any rooms specified in `auto_join_rooms` +# Defaults to false +allow_guests_auto_join_rooms = false -# This item is undocumented. Please contribute documentation for it. -# -#auth_chain_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#shorteventid_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#eventidshort_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#eventid_pdu_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#shortstatekey_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#statekeyshort_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#servernameevent_data_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#stateinfo_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#roomid_spacehierarchy_cache_capacity = varies by system - -# Maximum entries stored in DNS memory-cache. The size of an entry may -# vary so please take care if raising this value excessively. Only -# decrease this when using an external DNS cache. Please note that -# systemd-resolved does *not* count as an external cache, even when -# configured to do so. -# -#dns_cache_entries = 32768 - -# Minimum time-to-live in seconds for entries in the DNS cache. The -# default may appear high to most administrators; this is by design as the -# majority of NXDOMAINs are correct for a long time (e.g. the server is no -# longer running Matrix). Only decrease this if you are using an external -# DNS cache. -# -#dns_min_ttl = 10800 - -# Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. -# This value is critical for the server to federate efficiently. -# NXDOMAIN's are assumed to not be returning to the federation and -# aggressively cached rather than constantly rechecked. -# -# Defaults to 3 days as these are *very rarely* false negatives. -# -#dns_min_ttl_nxdomain = 259200 - -# Number of DNS nameserver retries after a timeout or error. -# -#dns_attempts = 10 - -# The number of seconds to wait for a reply to a DNS query. Please note -# that recursive queries can take up to several seconds for some domains, -# so this value should not be too low, especially on slower hardware or -# resolvers. -# -#dns_timeout = 10 - -# Fallback to TCP on DNS errors. Set this to false if unsupported by -# nameserver. -# -#dns_tcp_fallback = true - -# Enable to query all nameservers until the domain is found. Referred to -# as "trust_negative_responses" in hickory_resolver. This can avoid -# useless DNS queries if the first nameserver responds with NXDOMAIN or -# an empty NOERROR response. -# -#query_all_nameservers = true - -# Enable using *only* TCP for querying your specified nameservers instead -# of UDP. -# -# If you are running conduwuit in a container environment, this config -# option may need to be enabled. For more details, see: -# https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker -# -#query_over_tcp_only = false - -# DNS A/AAAA record lookup strategy -# -# Takes a number of one of the following options: -# 1 - Ipv4Only (Only query for A records, no AAAA/IPv6) -# -# 2 - Ipv6Only (Only query for AAAA records, no A/IPv4) -# -# 3 - Ipv4AndIpv6 (Query for A and AAAA records in parallel, uses whatever -# returns a successful response first) -# -# 4 - Ipv6thenIpv4 (Query for AAAA record, if that fails then query the A -# record) -# -# 5 - Ipv4thenIpv6 (Query for A record, if that fails then query the AAAA -# record) -# -# If you don't have IPv6 networking, then for better DNS performance it -# may be suitable to set this to Ipv4Only (1) as you will never ever use -# the AAAA record contents even if the AAAA record is successful instead -# of the A record. -# -#ip_lookup_strategy = 5 - -# Max request size for file uploads in bytes. Defaults to 20MB. -# -#max_request_size = 20971520 - -# This item is undocumented. Please contribute documentation for it. -# -#max_fetch_prev_events = 192 - -# Default/base connection timeout (seconds). This is used only by URL -# previews and update/news endpoint checks. -# -#request_conn_timeout = 10 - -# Default/base request timeout (seconds). The time waiting to receive more -# data from another server. This is used only by URL previews, -# update/news, and misc endpoint checks. -# -#request_timeout = 35 - -# Default/base request total timeout (seconds). The time limit for a whole -# request. This is set very high to not cancel healthy requests while -# serving as a backstop. This is used only by URL previews and update/news -# endpoint checks. -# -#request_total_timeout = 320 - -# Default/base idle connection pool timeout (seconds). This is used only -# by URL previews and update/news endpoint checks. -# -#request_idle_timeout = 5 - -# Default/base max idle connections per host. This is used only by URL -# previews and update/news endpoint checks. Defaults to 1 as generally the -# same open connection can be re-used. -# -#request_idle_per_host = 1 - -# Federation well-known resolution connection timeout (seconds). -# -#well_known_conn_timeout = 6 - -# Federation HTTP well-known resolution request timeout (seconds). -# -#well_known_timeout = 10 - -# Federation client request timeout (seconds). You most definitely want -# this to be high to account for extremely large room joins, slow -# homeservers, your own resources etc. -# -#federation_timeout = 300 - -# Federation client idle connection pool timeout (seconds). -# -#federation_idle_timeout = 25 - -# Federation client max idle connections per host. Defaults to 1 as -# generally the same open connection can be re-used. -# -#federation_idle_per_host = 1 - -# Federation sender request timeout (seconds). The time it takes for the -# remote server to process sent transactions can take a while. -# -#sender_timeout = 180 - -# Federation sender idle connection pool timeout (seconds). -# -#sender_idle_timeout = 180 - -# Federation sender transaction retry backoff limit (seconds). -# -#sender_retry_backoff_limit = 86400 - -# Appservice URL request connection timeout. Defaults to 35 seconds as -# generally appservices are hosted within the same network. -# -#appservice_timeout = 35 - -# Appservice URL idle connection pool timeout (seconds). -# -#appservice_idle_timeout = 300 - -# Notification gateway pusher idle connection pool timeout. -# -#pusher_idle_timeout = 15 - -# Maximum time to receive a request from a client (seconds). -# -#client_receive_timeout = 75 - -# Maximum time to process a request received from a client (seconds). -# -#client_request_timeout = 180 - -# Maximum time to transmit a response to a client (seconds) -# -#client_response_timeout = 120 - -# Grace period for clean shutdown of client requests (seconds). -# -#client_shutdown_timeout = 10 - -# Grace period for clean shutdown of federation requests (seconds). -# -#sender_shutdown_timeout = 5 +# Vector list of servers that conduwuit will refuse to download remote media from. +# No default. +# prevent_media_downloads_from = ["example.com", "example.local"] # Enables registration. If set to false, no users can register on this # server. -# -# If set to true without a token configured, users can register with no -# form of 2nd-step only if you set the following option to true: -# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` -# -# If you would like registration only via token reg, please configure -# `registration_token` or `registration_token_file`. -# -#allow_registration = false +# If set to true without a token configured, users can register with no form of 2nd- +# step only if you set +# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to +# true in your config. If you would like +# registration only via token reg, please configure the `registration_token` key. +allow_registration = false +# Please note that an open registration homeserver with no second-step verification +# is highly prone to abuse and potential defederation by homeservers, including +# matrix.org. -# Enabling this setting opens registration to anyone without restrictions. -# This makes your server vulnerable to abuse -# -#yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = false +# A static registration token that new users will have to provide when creating +# an account. If unset and `allow_registration` is true, registration is open +# without any condition. YOU NEED TO EDIT THIS. +registration_token = "change this token for something specific to your server" -# A static registration token that new users will have to provide when -# creating an account. If unset and `allow_registration` is true, -# you must set -# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` -# to true to allow open registration without any conditions. -# -# YOU NEED TO EDIT THIS OR USE registration_token_file. -# -# example: "o&^uCtes4HPf0Vu@F20jQeeWE7" -# -#registration_token = +# controls whether federation is allowed or not +# defaults to true +# allow_federation = true -# Path to a file on the system that gets read for additional registration -# tokens. Multiple tokens can be added if you separate them with -# whitespace -# -# conduwuit must be able to access the file, and it must not be empty -# -# example: "/etc/conduwuit/.reg_token" -# -#registration_token_file = +# controls whether users are allowed to create rooms. +# appservices and admins are always allowed to create rooms +# defaults to true +# allow_room_creation = true -# Controls whether encrypted rooms and events are allowed. -# -#allow_encryption = true +# controls whether non-admin local users are forbidden from sending room invites (local and remote), +# and if non-admin users can receive remote room invites. admins are always allowed to send and receive all room invites. +# defaults to false +# block_non_admin_invites = false -# Controls whether federation is allowed or not. It is not recommended to -# disable this after the fact due to potential federation breakage. -# -#allow_federation = true +# List of forbidden username patterns/strings. Values in this list are matched as *contains*. +# This is checked upon username availability check, registration, and startup as warnings if any local users in your database +# have a forbidden username. +# No default. +# forbidden_usernames = [] -# Allows federation requests to be made to itself -# -# This isn't intended and is very likely a bug if federation requests are -# being sent to yourself. This currently mainly exists for development -# purposes. -# -#federation_loopback = false +# List of forbidden room aliases and room IDs as patterns/strings. Values in this list are matched as *contains*. +# This is checked upon room alias creation, custom room ID creation if used, and startup as warnings if any room aliases +# in your database have a forbidden room alias/ID. +# No default. +# forbidden_alias_names = [] -# Always calls /forget on behalf of the user if leaving a room. This is a -# part of MSC4267 "Automatically forgetting rooms on leave" -# -#forget_forced_upon_leave = false +# List of forbidden server names that we will block all client room joins, incoming federated room directory requests, incoming federated invites for, and incoming federated joins. This check is applied on the room ID, room alias, sender server name, and sender user's server name. +# Basically "global" ACLs. For our user (client) checks, admin users are allowed. +# No default. +# forbidden_remote_server_names = [] -# Set this to true to require authentication on the normally -# unauthenticated profile retrieval endpoints (GET) -# "/_matrix/client/v3/profile/{userId}". -# -# This can prevent profile scraping. -# -#require_auth_for_profile_requests = false +# List of forbidden server names that we will block all outgoing federated room directory requests for. Useful for preventing our users from wandering into bad servers or spaces. +# No default. +# forbidden_remote_room_directory_server_names = [] -# Set this to true to allow your server's public room directory to be -# federated. Set this to false to protect against /publicRooms spiders, -# but will forbid external users from viewing your server's public room -# directory. If federation is disabled entirely (`allow_federation`), this -# is inherently false. -# -#allow_public_room_directory_over_federation = false +# Set this to true to allow your server's public room directory to be federated. +# Set this to false to protect against /publicRooms spiders, but will forbid external users +# from viewing your server's public room directory. If federation is disabled entirely +# (`allow_federation`), this is inherently false. +allow_public_room_directory_over_federation = false -# Set this to true to allow your server's public room directory to be -# queried without client authentication (access token) through the Client -# APIs. Set this to false to protect against /publicRooms spiders. -# -#allow_public_room_directory_without_auth = false +# Set this to true to allow your server's public room directory to be queried without client +# authentication (access token) through the Client APIs. Set this to false to protect against /publicRooms spiders. +allow_public_room_directory_without_auth = false -# Allow guests/unauthenticated users to access TURN credentials. +# Set this to true to lock down your server's public room directory and only allow admins to publish rooms to the room directory. +# Unpublishing is still allowed by all users with this enabled. # -# This is the equivalent of Synapse's `turn_allow_guests` config option. -# This allows any unauthenticated user to call the endpoint -# `/_matrix/client/v3/voip/turnServer`. -# -# It is unlikely you need to enable this as all major clients support -# authentication for this endpoint and prevents misuse of your TURN server -# from potential bots. -# -#turn_allow_guests = false +# Defaults to false +lockdown_public_room_directory = false -# Set this to true to lock down your server's public room directory and -# only allow admins to publish rooms to the room directory. Unpublishing -# is still allowed by all users with this enabled. -# -#lockdown_public_room_directory = false +# Set this to true to allow federating device display names / allow external users to see your device display name. +# If federation is disabled entirely (`allow_federation`), this is inherently false. For privacy, this is best disabled. +allow_device_name_federation = false -# Set this to true to allow federating device display names / allow -# external users to see your device display name. If federation is -# disabled entirely (`allow_federation`), this is inherently false. For -# privacy reasons, this is best left disabled. -# -#allow_device_name_federation = false +# Vector list of domains allowed to send requests to for URL previews. Defaults to none. +# Note: this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com" and "http://mymaliciousdomainexamplegoogle.com" +# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. +url_preview_domain_contains_allowlist = [] -# Config option to allow or disallow incoming federation requests that -# obtain the profiles of our local users from -# `/_matrix/federation/v1/query/profile` -# -# Increases privacy of your local user's such as display names, but some -# remote users may get a false "this user does not exist" error when they -# try to invite you to a DM or room. Also can protect against profile -# spiders. +# Vector list of explicit domains allowed to send requests to for URL previews. Defaults to none. +# Note: This is an *explicit* match, not a contains match. Putting "google.com" will match "https://google.com", "http://google.com", but not "https://mymaliciousdomainexamplegoogle.com" +# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. +url_preview_domain_explicit_allowlist = [] + +# Vector list of URLs allowed to send requests to for URL previews. Defaults to none. +# Note that this is a *contains* match, not an explicit match. Putting "google.com" will match "https://google.com/", "https://google.com/url?q=https://mymaliciousdomainexample.com", and "https://mymaliciousdomainexample.com/hi/google.com" +# Setting this to "*" will allow all URL previews. Please note that this opens up significant attack surface to your server, you are expected to be aware of the risks by doing so. +url_preview_url_contains_allowlist = [] + +# Vector list of explicit domains not allowed to send requests to for URL previews. Defaults to none. +# Note: This is an *explicit* match, not a contains match. Putting "google.com" will match "https://google.com", "http://google.com", but not "https://mymaliciousdomainexamplegoogle.com" +# The denylist is checked first before allowlist. Setting this to "*" will not do anything. +url_preview_domain_explicit_denylist = [] + +# Maximum amount of bytes allowed in a URL preview body size when spidering. Defaults to 384KB (384_000 bytes) +url_preview_max_spider_size = 384_000 + +# Option to decide whether you would like to run the domain allowlist checks (contains and explicit) on the root domain or not. Does not apply to URL contains allowlist. Defaults to false. +# Example: If this is enabled and you have "wikipedia.org" allowed in the explicit and/or contains domain allowlist, it will allow all subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the root domain is checked and matched. +# Useful if the domain contains allowlist is still too broad for you but you still want to allow all the subdomains under a root domain. +url_preview_check_root_domain = false + +# Config option to allow or disallow incoming federation requests that obtain the profiles +# of our local users from `/_matrix/federation/v1/query/profile` # # This is inherently false if `allow_federation` is disabled # -#allow_inbound_profile_lookup_federation_requests = true +# Defaults to true +allow_profile_lookup_federation_requests = true -# Allow standard users to create rooms. Appservices and admins are always -# allowed to create rooms -# -#allow_room_creation = true -# Set to false to disable users from joining or creating room versions -# that aren't officially supported by conduwuit. -# -# conduwuit officially supports room versions 6 - 11. -# -# conduwuit has slightly experimental (though works fine in practice) -# support for versions 3 - 5. +### Misc + +# max log level for conduwuit. allows debug, info, warn, or error +# see also: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives +# **Caveat**: +# For release builds, the tracing crate is configured to only implement levels higher than error to avoid unnecessary overhead in the compiled binary from trace macros. +# For debug builds, this restriction is not applied. # +# Defaults to "warn" +#log = "warn" + +# controls whether encrypted rooms and events are allowed (default true) +#allow_encryption = false + +# if enabled, conduwuit will send a simple GET request periodically to `https://pupbrain.dev/check-for-updates/stable` +# for any new announcements made. Despite the name, this is not an update check +# endpoint, it is simply an announcement check endpoint. +# Defaults to false. +#allow_check_for_updates = false + +# Set to false to disable users from joining or creating room versions that aren't 100% officially supported by conduwuit. +# conduwuit officially supports room versions 6 - 10. conduwuit has experimental/unstable support for 3 - 5, and 11. +# Defaults to true. #allow_unstable_room_versions = true -# Default room version conduwuit will create rooms with. -# -# Per spec, room version 11 is the default. -# -#default_room_version = 11 +# Option to control adding arbitrary text to the end of the user's displayname upon registration with a space before the text. +# This was the lightning bolt emoji option, just replaced with support for adding your own custom text or emojis. +# To disable, set this to "" (an empty string) +# Defaults to "🏳️‍⚧️" (trans pride flag) +#new_user_displayname_suffix = "🏳️‍⚧️" -# This item is undocumented. Please contribute documentation for it. +# Option to control whether conduwuit will query your list of trusted notary key servers (`trusted_servers`) for +# remote homeserver signing keys it doesn't know *first*, or query the individual servers first before falling back to the trusted +# key servers. # -#allow_jaeger = false +# The former/default behaviour makes federated/remote rooms joins generally faster because we're querying a single (or list of) server +# that we know works, is reasonably fast, and is reliable for just about all the homeserver signing keys in the room. Querying individual +# servers may take longer depending on the general infrastructure of everyone in there, how many dead servers there are, etc. +# +# However, this does create an increased reliance on one single or multiple large entities as `trusted_servers` should generally +# contain long-term and large servers who know a very large number of homeservers. +# +# If you don't know what any of this means, leave this and `trusted_servers` alone to their defaults. +# +# Defaults to true as this is the fastest option for federation. +#query_trusted_key_servers_first = true -# This item is undocumented. Please contribute documentation for it. -# -#jaeger_filter = "info" - -# If the 'perf_measurements' compile-time feature is enabled, enables -# collecting folded stack trace profile of tracing spans using -# tracing_flame. The resulting profile can be visualized with inferno[1], -# speedscope[2], or a number of other tools. -# -# [1]: https://github.com/jonhoo/inferno -# [2]: www.speedscope.app -# -#tracing_flame = false - -# This item is undocumented. Please contribute documentation for it. -# -#tracing_flame_filter = "info" - -# This item is undocumented. Please contribute documentation for it. -# -#tracing_flame_output_path = "./tracing.folded" - -# Examples: -# -# - No proxy (default): -# -# proxy = "none" -# -# - For global proxy, create the section at the bottom of this file: -# -# [global.proxy] -# global = { url = "socks5h://localhost:9050" } -# -# - To proxy some domains: -# -# [global.proxy] -# [[global.proxy.by_domain]] -# url = "socks5h://localhost:9050" -# include = ["*.onion", "matrix.myspecial.onion"] -# exclude = ["*.myspecial.onion"] -# -# Include vs. Exclude: -# -# - If include is an empty list, it is assumed to be `["*"]`. -# -# - If a domain matches both the exclude and include list, the proxy will -# only be used if it was included because of a more specific rule than -# it was excluded. In the above example, the proxy would be used for -# `ordinary.onion`, `matrix.myspecial.onion`, but not -# `hello.myspecial.onion`. -# -#proxy = "none" - -# Servers listed here will be used to gather public keys of other servers -# (notary trusted key servers). -# -# Currently, conduwuit doesn't support inbound batched key requests, so -# this list should only contain other Synapse servers. -# -# example: ["matrix.org", "tchncs.de"] -# -#trusted_servers = ["matrix.org"] - -# Whether to query the servers listed in trusted_servers first or query -# the origin server first. For best security, querying the origin server -# first is advised to minimize the exposure to a compromised trusted -# server. For maximum federation/join performance this can be set to true, -# however other options exist to query trusted servers first under -# specific high-load circumstances and should be evaluated before setting -# this to true. -# -#query_trusted_key_servers_first = false - -# Whether to query the servers listed in trusted_servers first -# specifically on room joins. This option limits the exposure to a -# compromised trusted server to room joins only. The join operation -# requires gathering keys from many origin servers which can cause -# significant delays. Therefor this defaults to true to mitigate -# unexpected delays out-of-the-box. The security-paranoid or those willing -# to tolerate delays are advised to set this to false. Note that setting -# query_trusted_key_servers_first to true causes this option to be -# ignored. -# -#query_trusted_key_servers_first_on_join = true - -# Only query trusted servers for keys and never the origin server. This is -# intended for clusters or custom deployments using their trusted_servers -# as forwarding-agents to cache and deduplicate requests. Notary servers -# do not act as forwarding-agents by default, therefor do not enable this -# unless you know exactly what you are doing. -# -#only_query_trusted_key_servers = false - -# Maximum number of keys to request in each trusted server batch query. -# -#trusted_server_batch_size = 1024 - -# Max log level for conduwuit. Allows debug, info, warn, or error. -# -# See also: -# https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives -# -# **Caveat**: -# For release builds, the tracing crate is configured to only implement -# levels higher than error to avoid unnecessary overhead in the compiled -# binary from trace macros. For debug builds, this restriction is not -# applied. -# -#log = "info" - -# Output logs with ANSI colours. -# -#log_colors = true - -# Configures the span events which will be outputted with the log. -# -#log_span_events = "none" - -# Configures whether CONDUWUIT_LOG EnvFilter matches values using regular -# expressions. See the tracing_subscriber documentation on Directives. -# -#log_filter_regex = true - -# Toggles the display of ThreadId in tracing log output. -# -#log_thread_ids = false - -# OpenID token expiration/TTL in seconds. -# -# These are the OpenID tokens that are primarily used for Matrix account -# integrations (e.g. Vector Integrations in Element), *not* OIDC/OpenID -# Connect/etc. -# -#openid_token_ttl = 3600 - -# Allow an existing session to mint a login token for another client. -# This requires interactive authentication, but has security ramifications -# as a malicious client could use the mechanism to spawn more than one -# session. -# Enabled by default. -# -#login_via_existing_session = true - -# Login token expiration/TTL in milliseconds. -# -# These are short-lived tokens for the m.login.token endpoint. -# This is used to allow existing sessions to create new sessions. -# see login_via_existing_session. -# -#login_token_ttl = 120000 - -# Static TURN username to provide the client if not using a shared secret -# ("turn_secret"), It is recommended to use a shared secret over static -# credentials. -# -#turn_username = false - -# Static TURN password to provide the client if not using a shared secret -# ("turn_secret"). It is recommended to use a shared secret over static -# credentials. -# -#turn_password = false - -# Vector list of TURN URIs/servers to use. -# -# Replace "example.turn.uri" with your TURN domain, such as the coturn -# "realm" config option. If using TURN over TLS, replace the URI prefix -# "turn:" with "turns:". -# -# example: ["turn:example.turn.uri?transport=udp", -# "turn:example.turn.uri?transport=tcp"] -# -#turn_uris = [] - -# TURN secret to use for generating the HMAC-SHA1 hash apart of username -# and password generation. -# -# This is more secure, but if needed you can use traditional static -# username/password credentials. -# -#turn_secret = false - -# TURN secret to use that's read from the file path specified. -# -# This takes priority over "turn_secret" first, and falls back to -# "turn_secret" if invalid or failed to open. -# -# example: "/etc/conduwuit/.turn_secret" -# -#turn_secret_file = - -# TURN TTL, in seconds. -# -#turn_ttl = 86400 - -# List/vector of room IDs or room aliases that conduwuit will make newly -# registered users join. The rooms specified must be rooms that you have -# joined at least once on the server, and must be public. -# -# example: ["#conduwuit:puppygock.gay", -# "!eoIzvAvVwY23LPDay8:puppygock.gay"] +# List/vector of room **IDs** that conduwuit will make newly registered users join. +# The room IDs specified must be rooms that you have joined at least once on the server, and must be public. # +# No default. #auto_join_rooms = [] -# Config option to automatically deactivate the account of any user who -# attempts to join a: -# - banned room -# - forbidden room alias -# - room alias or ID with a forbidden server name -# -# This may be useful if all your banned lists consist of toxic rooms or -# servers that no good faith user would ever attempt to join, and -# to automatically remediate the problem without any admin user -# intervention. -# -# This will also make the user leave all rooms. Federation (e.g. remote -# room invites) are ignored here. -# -# Defaults to false as rooms can be banned for non-moderation-related -# reasons and this performs a full user deactivation. -# -#auto_deactivate_banned_room_attempts = false +# Retry failed and incomplete messages to remote servers immediately upon startup. This is called bursting. +# If this is disabled, said messages may not be delivered until more messages are queued for that server. +# Do not change this option unless server resources are extremely limited or the scale of the server's +# deployment is huge. Do not disable this unless you know what you are doing. +#startup_netburst = true -# RocksDB log level. This is not the same as conduwuit's log level. This -# is the log level for the RocksDB engine/library which show up in your -# database folder/path as `LOG` files. conduwuit will log RocksDB errors -# as normal through tracing or panics if severe for safety. -# -#rocksdb_log_level = "error" +# Limit the startup netburst to the most recent (default: 50) messages queued for each remote server. All older +# messages are dropped and not reattempted. The `startup_netburst` option must be enabled for this value to have +# any effect. Do not change this value unless you know what you are doing. Set this value to -1 to reattempt +# every message without trimming the queues; this may consume significant disk. Set this value to 0 to drop all +# messages without any attempt at redelivery. +#startup_netburst_keep = 50 -# This item is undocumented. Please contribute documentation for it. -# -#rocksdb_log_stderr = false -# Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB in -# bytes. -# -#rocksdb_max_log_file_size = 4194304 +### Generic database options -# Time in seconds before RocksDB will forcibly rotate logs. -# -#rocksdb_log_time_to_roll = 0 +# Set this to any float value to multiply conduwuit's in-memory LRU caches with. +# May be useful if you have significant memory to spare to increase performance. +# Defaults to 1.0. +#conduit_cache_capacity_modifier = 1.0 -# Set this to true to use RocksDB config options that are tailored to HDDs -# (slower device storage). +# Set this to any float value in megabytes for conduwuit to tell the database engine that this much memory is available for database-related caches. +# May be useful if you have significant memory to spare to increase performance. +# Defaults to 256.0 +#db_cache_capacity_mb = 256.0 + +# Interval in seconds when conduwuit will run database cleanup operations. # -# It is worth noting that by default, conduwuit will use RocksDB with -# Direct IO enabled. *Generally* speaking this improves performance as it -# bypasses buffered I/O (system page cache). However there is a potential -# chance that Direct IO may cause issues with database operations if your -# setup is uncommon. This has been observed with FUSE filesystems, and -# possibly ZFS filesystem. RocksDB generally deals/corrects these issues -# but it cannot account for all setups. If you experience any weird -# RocksDB issues, try enabling this option as it turns off Direct IO and -# feel free to report in the conduwuit Matrix room if this option fixes -# your DB issues. +# For SQLite: this will flush the WAL by executing `PRAGMA wal_checkpoint(RESTART)` (https://www.sqlite.org/pragma.html#pragma_wal_checkpoint) +# For RocksDB: this will run `flush_opt` to flush database memtables to SST files on disk (https://docs.rs/rocksdb/latest/rocksdb/struct.DBCommon.html#method.flush_opt) +# These operations always run on shutdown. # -# For more information, see: -# https://github.com/facebook/rocksdb/wiki/Direct-IO +# Defaults to 30 minutes (1800 seconds) to avoid IO amplification from too frequent cleanups +#cleanup_second_interval = 1800 + + +### RocksDB options + +# Set this to true to use RocksDB config options that are tailored to HDDs (slower device storage) # +# It is worth noting that by default, conduwuit will use RocksDB with Direct IO enabled. *Generally* speaking this improves performance as it bypasses buffered I/O (system page cache). +# However there is a potential chance that Direct IO may cause issues with database operations if your setup is uncommon. This has been observed with FUSE filesystems, and possibly ZFS filesystem. +# RocksDB generally deals/corrects these issues but it cannot account for all setups. +# If you experience any weird RocksDB issues, try enabling this option as it turns off Direct IO and feel free to report in the conduwuit Matrix room if this option fixes your DB issues. +# See https://github.com/facebook/rocksdb/wiki/Direct-IO for more information. +# +# Defaults to false #rocksdb_optimize_for_spinning_disks = false -# Enables direct-io to increase database performance via unbuffered I/O. -# -# For more details about direct I/O and RockDB, see: -# https://github.com/facebook/rocksdb/wiki/Direct-IO -# -# Set this option to false if the database resides on a filesystem which -# does not support direct-io like FUSE, or any form of complex filesystem -# setup such as possibly ZFS. -# -#rocksdb_direct_io = true +# RocksDB log level. This is not the same as conduwuit's log level. This is the log level for the RocksDB engine/library +# which show up in your database folder/path as `LOG` files. Defaults to error. conduwuit will typically log RocksDB errors as normal. +#rocksdb_log_level = "error" -# Amount of threads that RocksDB will use for parallelism on database -# operations such as cleanup, sync, flush, compaction, etc. Set to 0 to -# use all your logical threads. Defaults to your CPU logical thread count. -# -#rocksdb_parallelism_threads = varies by system +# Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB. +#rocksdb_max_log_file_size = 4194304 -# Maximum number of LOG files RocksDB will keep. This must *not* be set to -# 0. It must be at least 1. Defaults to 3 as these are not very useful -# unless troubleshooting/debugging a RocksDB bug. +# Time in seconds before RocksDB will forcibly rotate logs. Defaults to 0. +#rocksdb_log_time_to_roll = 0 + +# Amount of threads that RocksDB will use for parallelism on database operatons such as cleanup, sync, flush, compaction, etc. Set to 0 to use all your physical cores. # +# Defaults to your CPU physical core count (not logical threads). +#rocksdb_parallelism_threads = 0 + +# Maximum number of LOG files RocksDB will keep. This must *not* be set to 0. It must be at least 1. +# Defaults to 3 as these are not very useful. #rocksdb_max_log_files = 3 # Type of RocksDB database compression to use. +# Available options are "zstd", "zlib", "bz2" and "lz4" +# It is best to use ZSTD as an overall good balance between speed/performance, storage, IO amplification, and CPU usage. +# For more performance but less compression (more storage used) and less CPU usage, use LZ4. +# See https://github.com/facebook/rocksdb/wiki/Compression for more details. # -# Available options are "zstd", "bz2", "lz4", or "none". -# -# It is best to use ZSTD as an overall good balance between -# speed/performance, storage, IO amplification, and CPU usage. For more -# performance but less compression (more storage used) and less CPU usage, -# use LZ4. -# -# For more details, see: -# https://github.com/facebook/rocksdb/wiki/Compression -# -# "none" will disable compression. -# +# Defaults to "zstd" #rocksdb_compression_algo = "zstd" -# Level of compression the specified compression algorithm for RocksDB to -# use. -# -# Default is 32767, which is internally read by RocksDB as the default -# magic number and translated to the library's default compression level -# as they all differ. See their `kDefaultCompressionLevel`. -# -# Note when using the default value we may override it with a setting -# tailored specifically conduwuit. +# Level of compression the specified compression algorithm for RocksDB to use. +# Default is 32767, which is internally read by RocksDB as the default magic number and +# translated to the library's default compression level as they all differ. +# See their `kDefaultCompressionLevel`. # #rocksdb_compression_level = 32767 -# Level of compression the specified compression algorithm for the -# bottommost level/data for RocksDB to use. Default is 32767, which is -# internally read by RocksDB as the default magic number and translated to -# the library's default compression level as they all differ. See their -# `kDefaultCompressionLevel`. +# Level of compression the specified compression algorithm for the bottommost level/data for RocksDB to use. +# Default is 32767, which is internally read by RocksDB as the default magic number and +# translated to the library's default compression level as they all differ. +# See their `kDefaultCompressionLevel`. # -# Since this is the bottommost level (generally old and least used data), -# it may be desirable to have a very high compression level here as it's -# less likely for this data to be used. Research your chosen compression -# algorithm. -# -# Note when using the default value we may override it with a setting -# tailored specifically conduwuit. +# Since this is the bottommost level (generally old and least used data), it may be desirable to have a very +# high compression level here as it's lesss likely for this data to be used. Research your chosen compression algorithm. # #rocksdb_bottommost_compression_level = 32767 -# Whether to enable RocksDB's "bottommost_compression". -# -# At the expense of more CPU usage, this will further compress the -# database to reduce more storage. It is recommended to use ZSTD -# compression with this for best compression results. This may be useful -# if you're trying to reduce storage usage from the database. -# +# Whether to enable RocksDB "bottommost_compression". +# At the expense of more CPU usage, this will further compress the database to reduce more storage. +# It is recommended to use ZSTD compression with this for best compression results. # See https://github.com/facebook/rocksdb/wiki/Compression for more details. # -#rocksdb_bottommost_compression = true +# Defaults to false as this uses more CPU when compressing. +#rocksdb_bottommost_compression = false -# Database recovery mode (for RocksDB WAL corruption). +# Database recovery mode (for RocksDB WAL corruption) # -# Use this option when the server reports corruption and refuses to start. -# Set mode 2 (PointInTime) to cleanly recover from this corruption. The -# server will continue from the last good state, several seconds or -# minutes prior to the crash. Clients may have to run "clear-cache & -# reload" to account for the rollback. Upon success, you may reset the -# mode back to default and restart again. Please note in some cases the -# corruption error may not be cleared for at least 30 minutes of operation -# in PointInTime mode. +# Use this option when the server reports corruption and refuses to start. Set mode 2 (PointInTime) +# to cleanly recover from this corruption. The server will continue from the last good state, +# several seconds or minutes prior to the crash. Clients may have to run "clear-cache & reload" to +# account for the rollback. Upon success, you may reset the mode back to default and restart again. +# Please note in some cases the corruption error may not be cleared for at least 30 minutes of +# operation in PointInTime mode. # -# As a very last ditch effort, if PointInTime does not fix or resolve -# anything, you can try mode 3 (SkipAnyCorruptedRecord) but this will -# leave the server in a potentially inconsistent state. +# As a very last ditch effort, if PointInTime does not fix or resolve anything, you can try mode +# 3 (SkipAnyCorruptedRecord) but this will leave the server in a potentially inconsistent state. # -# The default mode 1 (TolerateCorruptedTailRecords) will automatically -# drop the last entry in the database if corrupted during shutdown, but -# nothing more. It is extraordinarily unlikely this will desynchronize -# clients. To disable any form of silent rollback set mode 0 -# (AbsoluteConsistency). +# The default mode 1 (TolerateCorruptedTailRecords) will automatically drop the last entry in the +# database if corrupted during shutdown, but nothing more. It is extraordinarily unlikely this will +# desynchronize clients. To disable any form of silent rollback set mode 0 (AbsoluteConsistency). # # The options are: # 0 = AbsoluteConsistency @@ -898,778 +457,206 @@ # 2 = PointInTime (use me if trying to recover) # 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) # -# For more information on these modes, see: -# https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes -# -# For more details on recovering a corrupt database, see: -# https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption +# See https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes for more information # +# Defaults to 1 (TolerateCorruptedTailRecords) #rocksdb_recovery_mode = 1 -# Enables or disables paranoid SST file checks. This can improve RocksDB -# database consistency at a potential performance impact due to further -# safety checks ran. -# -# For more information, see: -# https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks -# -#rocksdb_paranoid_file_checks = false +# Controls whether memory buffers are written to storage at the fixed interval set by `cleanup_period_interval` +# even when they are not full. Setting this will increase load on the storage backplane and is never advised +# under normal circumstances. +#rocksdb_periodic_cleanup = false -# Enables or disables checksum verification in rocksdb at runtime. -# Checksums are usually hardware accelerated with low overhead; they are -# enabled in rocksdb by default. Older or slower platforms may see gains -# from disabling. -# -#rocksdb_checksums = true -# Enables the "atomic flush" mode in rocksdb. This option is not intended -# for users. It may be removed or ignored in future versions. Atomic flush -# may be enabled by the paranoid to possibly improve database integrity at -# the cost of performance. -# -#rocksdb_atomic_flush = false +### Domain Name Resolution and Caching -# Database repair mode (for RocksDB SST corruption). -# -# Use this option when the server reports corruption while running or -# panics. If the server refuses to start use the recovery mode options -# first. Corruption errors containing the acronym 'SST' which occur after -# startup will likely require this option. -# -# - Backing up your database directory is recommended prior to running the -# repair. -# -# - Disabling repair mode and restarting the server is recommended after -# running the repair. -# -# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. -# -#rocksdb_repair = false +# Maximum entries stored in DNS memory-cache. The size of an entry may vary so please take care if +# raising this value excessively. Only decrease this when using an external DNS cache. Please note +# that systemd does *not* count as an external cache, even when configured to do so. +#dns_cache_entries = 12288 -# This item is undocumented. Please contribute documentation for it. -# -#rocksdb_read_only = false +# Minimum time-to-live in seconds for entries in the DNS cache. The default may appear high to most +# administrators; this is by design. Only decrease this if you are using an external DNS cache. +#dns_min_ttl = 10800 -# This item is undocumented. Please contribute documentation for it. -# -#rocksdb_secondary = false +# Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. This value is critical for +# the server to federate efficiently. NXDOMAIN's are assumed to not be returning to the federation +# and aggressively cached rather than constantly rechecked. +#dns_min_ttl_nxdomain = 86400 -# Enables idle CPU priority for compaction thread. This is not enabled by -# default to prevent compaction from falling too far behind on busy -# systems. -# -#rocksdb_compaction_prio_idle = false +# The number of seconds to wait for a reply to a DNS query. Please note that recursive queries can +# take up to several seconds for some domains, so this value should not be too low. +#dns_timeout = 10 -# Enables idle IO priority for compaction thread. This prevents any -# unexpected lag in the server's operation and is usually a good idea. -# Enabled by default. -# -#rocksdb_compaction_ioprio_idle = true +# Number of retries after a timeout. +#dns_attempts = 10 -# Enables RocksDB compaction. You should never ever have to set this -# option to false. If you for some reason find yourself needing to use -# this option as part of troubleshooting or a bug, please reach out to us -# in the conduwuit Matrix room with information and details. -# -# Disabling compaction will lead to a significantly bloated and -# explosively large database, gradually poor performance, unnecessarily -# excessive disk read/writes, and slower shutdowns and startups. -# -#rocksdb_compaction = true +# Fallback to TCP on DNS errors. Set this to false if unsupported by nameserver. +#dns_tcp_fallback = true -# Level of statistics collection. Some admin commands to display database -# statistics may require this option to be set. Database performance may -# be impacted by higher settings. +# Enable to query all nameservers until the domain is found. Referred to as "trust_negative_responses" in hickory_resolver. +# This can avoid useless DNS queries if the first nameserver responds with NXDOMAIN or an empty NOERROR response. # -# Option is a number ranging from 0 to 6: -# 0 = No statistics. -# 1 = No statistics in release mode (default). -# 2 to 3 = Statistics with no performance impact. -# 3 to 5 = Statistics with possible performance impact. -# 6 = All statistics. -# -#rocksdb_stats_level = 1 +# The default is to query one nameserver and stop (false). +#query_all_nameservers = true -# This is a password that can be configured that will let you login to the -# server bot account (currently `@conduit`) for emergency troubleshooting -# purposes such as recovering/recreating your admin room, or inviting -# yourself back. -# -# See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. -# -# Once this password is unset, all sessions will be logged out for -# security purposes. -# -# example: "F670$2CP@Hw8mG7RY1$%!#Ic7YA" -# -#emergency_password = -# This item is undocumented. Please contribute documentation for it. -# -#notification_push_path = "/_matrix/push/v1/notify" +### Request Timeouts, Connection Timeouts, and Connection Pooling -# Allow local (your server only) presence updates/requests. +## Request Timeouts are HTTP response timeouts +## Connection Timeouts are TCP connection timeouts +## +## Connection Pooling Timeouts are timeouts for keeping an open idle connection alive. +## Connection pooling and keepalive is very useful for federation or other places where for performance reasons, +## we want to keep connections open that we will re-use frequently due to TCP and TLS 1.3 overhead/expensiveness. +## +## Generally these defaults are the best, but if you find a reason to need to change these they are here. + +# Default/base connection timeout +# This is used only by URL previews and update/news endpoint checks # -# Note that presence on conduwuit is very fast unlike Synapse's. If using -# outgoing presence, this MUST be enabled. +# Defaults to 10 seconds +#request_conn_timeout = 10 + +# Default/base request timeout +# This is used only by URL previews and update/news endpoint checks +# +# Defaults to 35 seconds +#request_timeout = 35 + +# Default/base max idle connections per host +# This is used only by URL previews and update/news endpoint checks +# +# Defaults to 1 as generally the same open connection can be re-used +#request_idle_per_host = 1 + +# Default/base idle connection pool timeout +# This is used only by URL previews and update/news endpoint checks +# +# Defaults to 5 seconds +#request_idle_timeout = 5 + +# Federation well-known resolution connection timeout +# +# Defaults to 6 seconds +#well_known_conn_timeout = 6 + +# Federation HTTP well-known resolution request timeout +# +# Defaults to 10 seconds +#well_known_timeout = 10 + +# Federation client/server request timeout +# You most definitely want this to be high to account for extremely large room joins, slow homeservers, your own resources etc. +# +# Defaults to 300 seconds +#federation_timeout = 300 + +# Federation client/sender max idle connections per host +# +# Defaults to 1 as generally the same open connection can be re-used +#federation_idle_per_host = 1 + +# Federation client/sender idle connection pool timeout +# +# Defaults to 25 seconds +#federation_idle_timeout = 25 + +# Appservice URL request connection timeout +# +# Defaults to 120 seconds +#appservice_timeout = 120 + +# Appservice URL idle connection pool timeout +# +# Defaults to 300 seconds +#appservice_idle_timeout = 300 + +# Notification gateway pusher idle connection pool timeout +# +# Defaults to 15 seconds +#pusher_idle_timeout = 15 + + +### Presence / Typing Indicators / Read Receipts + +# Config option to control local (your server only) presence updates/requests. Defaults to true. +# Note that presence on conduwuit is very fast unlike Synapse's. +# If using outgoing presence, this MUST be enabled. # #allow_local_presence = true -# Allow incoming federated presence updates/requests. -# -# This option receives presence updates from other servers, but does not -# send any unless `allow_outgoing_presence` is true. Note that presence on -# conduwuit is very fast unlike Synapse's. +# Config option to control incoming federated presence updates/requests. Defaults to true. +# This option receives presence updates from other servers, but does not send any unless `allow_outgoing_presence` is true. +# Note that presence on conduwuit is very fast unlike Synapse's. # #allow_incoming_presence = true -# Allow outgoing presence updates/requests. -# -# This option sends presence updates to other servers, but does not -# receive any unless `allow_incoming_presence` is true. Note that presence -# on conduwuit is very fast unlike Synapse's. If using outgoing presence, -# you MUST enable `allow_local_presence` as well. +# Config option to control outgoing presence updates/requests. Defaults to true. +# This option sends presence updates to other servers, but does not receive any unless `allow_incoming_presence` is true. +# Note that presence on conduwuit is very fast unlike Synapse's. +# If using outgoing presence, you MUST enable `allow_local_presence` as well. # #allow_outgoing_presence = true -# How many seconds without presence updates before you become idle. -# Defaults to 5 minutes. -# +# Config option to control how many seconds before presence updates that you are idle. Defaults to 5 minutes. #presence_idle_timeout_s = 300 -# How many seconds without presence updates before you become offline. -# Defaults to 30 minutes. -# +# Config option to control how many seconds before presence updates that you are offline. Defaults to 30 minutes. #presence_offline_timeout_s = 1800 -# Enable the presence idle timer for remote users. -# -# Disabling is offered as an optimization for servers participating in -# many large rooms or when resources are limited. Disabling it may cause -# incorrect presence states (i.e. stuck online) to be seen for some remote -# users. -# -#presence_timeout_remote_users = true - -# Allow receiving incoming read receipts from remote servers. -# +# Config option to control whether we should receive remote incoming read receipts. +# Defaults to true. #allow_incoming_read_receipts = true -# Allow sending read receipts to remote servers. -# +# Config option to control whether we should send read receipts to remote servers. +# Defaults to true. #allow_outgoing_read_receipts = true -# Allow outgoing typing updates to federation. -# +# Config option to control outgoing typing updates to federation. Defaults to true. #allow_outgoing_typing = true -# Allow incoming typing updates from federation. -# +# Config option to control incoming typing updates from federation. Defaults to true. #allow_incoming_typing = true -# Maximum time federation user can indicate typing. -# +# Config option to control maximum time federation user can indicate typing. #typing_federation_timeout_s = 30 -# Minimum time local client can indicate typing. This does not override a -# client's request to stop typing. It only enforces a minimum value in -# case of no stop request. -# +# Config option to control minimum time local client can indicate typing. This does not override +# a client's request to stop typing. It only enforces a minimum value in case of no stop request. #typing_client_timeout_min_s = 15 -# Maximum time local client can indicate typing. -# +# Config option to control maximum time local client can indicate typing. #typing_client_timeout_max_s = 45 -# Set this to true for conduwuit to compress HTTP response bodies using -# zstd. This option does nothing if conduwuit was not built with -# `zstd_compression` feature. Please be aware that enabling HTTP -# compression may weaken TLS. Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH -# before deciding to enable this. -# -#zstd_compression = false -# Set this to true for conduwuit to compress HTTP response bodies using -# gzip. This option does nothing if conduwuit was not built with -# `gzip_compression` feature. Please be aware that enabling HTTP -# compression may weaken TLS. Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before -# deciding to enable this. +# Other options not in [global]: # -# If you are in a large amount of rooms, you may find that enabling this -# is necessary to reduce the significantly large response bodies. # -#gzip_compression = false - -# Set this to true for conduwuit to compress HTTP response bodies using -# brotli. This option does nothing if conduwuit was not built with -# `brotli_compression` feature. Please be aware that enabling HTTP -# compression may weaken TLS. Most users should not need to enable this. -# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH -# before deciding to enable this. +# Enables running conduwuit with direct TLS support +# It is strongly recommended you use a reverse proxy instead. This is primarily relevant for test suites like complement that require a private CA setup. +# [global.tls] +# certs = "/path/to/my/certificate.crt" +# key = "/path/to/my/private_key.key" # -#brotli_compression = false - -# Set to true to allow user type "guest" registrations. Some clients like -# Element attempt to register guest users automatically. -# -#allow_guest_registration = false - -# Set to true to log guest registrations in the admin room. Note that -# these may be noisy or unnecessary if you're a public homeserver. -# -#log_guest_registrations = false - -# Set to true to allow guest registrations/users to auto join any rooms -# specified in `auto_join_rooms`. -# -#allow_guests_auto_join_rooms = false - -# Enable the legacy unauthenticated Matrix media repository endpoints. -# These endpoints consist of: -# - /_matrix/media/*/config -# - /_matrix/media/*/upload -# - /_matrix/media/*/preview_url -# - /_matrix/media/*/download/* -# - /_matrix/media/*/thumbnail/* -# -# The authenticated equivalent endpoints are always enabled. -# -# Defaults to true for now, but this is highly subject to change, likely -# in the next release. -# -#allow_legacy_media = true - -# This item is undocumented. Please contribute documentation for it. -# -#freeze_legacy_media = true - -# Check consistency of the media directory at startup: -# 1. When `media_compat_file_link` is enabled, this check will upgrade -# media when switching back and forth between Conduit and conduwuit. -# Both options must be enabled to handle this. -# 2. When media is deleted from the directory, this check will also delete -# its database entry. -# -# If none of these checks apply to your use cases, and your media -# directory is significantly large setting this to false may reduce -# startup time. -# -#media_startup_check = true - -# Enable backward-compatibility with Conduit's media directory by creating -# symlinks of media. -# -# This option is only necessary if you plan on using Conduit again. -# Otherwise setting this to false reduces filesystem clutter and overhead -# for managing these symlinks in the directory. This is now disabled by -# default. You may still return to upstream Conduit but you have to run -# conduwuit at least once with this set to true and allow the -# media_startup_check to take place before shutting down to return to -# Conduit. -# -#media_compat_file_link = false - -# Prune missing media from the database as part of the media startup -# checks. -# -# This means if you delete files from the media directory the -# corresponding entries will be removed from the database. This is -# disabled by default because if the media directory is accidentally moved -# or inaccessible, the metadata entries in the database will be lost with -# sadness. -# -#prune_missing_media = false - -# List of forbidden server names via regex patterns that we will block -# incoming AND outgoing federation with, and block client room joins / -# remote user invites. -# -# Note that your messages can still make it to forbidden servers through -# backfilling. Events we receive from forbidden servers via backfill -# from servers we *do* federate with will be stored in the database. -# -# This check is applied on the room ID, room alias, sender server name, -# sender user's server name, inbound federation X-Matrix origin, and -# outbound federation handler. -# -# You can set this to ["*"] to block all servers by default, and then -# use `allowed_remote_server_names` to allow only specific servers. -# -# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] -# -#forbidden_remote_server_names = [] - -# List of allowed server names via regex patterns that we will allow, -# regardless of if they match `forbidden_remote_server_names`. -# -# This option has no effect if `forbidden_remote_server_names` is empty. -# -# example: ["goodserver\\.tld$", "goodphrase"] -# -#allowed_remote_server_names = [] - -# Vector list of regex patterns of server names that conduwuit will refuse -# to download remote media from. -# -# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] -# -#prevent_media_downloads_from = [] - -# List of forbidden server names via regex patterns that we will block all -# outgoing federated room directory requests for. Useful for preventing -# our users from wandering into bad servers or spaces. -# -# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] -# -#forbidden_remote_room_directory_server_names = [] - -# Vector list of regex patterns of server names that conduwuit will not -# send messages to the client from. -# -# Note that there is no way for clients to receive messages once a server -# has become unignored without doing a full sync. This is a protocol -# limitation with the current sync protocols. This means this is somewhat -# of a nuclear option. -# -# example: ["reallybadserver\.tld$", "reallybadphrase", -# "69dollarfortnitecards"] -# -#ignore_messages_from_server_names = [] - -# Send messages from users that the user has ignored to the client. -# -# There is no way for clients to receive messages sent while a user was -# ignored without doing a full sync. This is a protocol limitation with -# the current sync protocols. Disabling this option will move -# responsibility of ignoring messages to the client, which can avoid this -# limitation. -# -#send_messages_from_ignored_users_to_client = false - -# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you -# do not want conduwuit to send outbound requests to. Defaults to -# RFC1918, unroutable, loopback, multicast, and testnet addresses for -# security. -# -# Please be aware that this is *not* a guarantee. You should be using a -# firewall with zones as doing this on the application layer may have -# bypasses. -# -# Currently this does not account for proxies in use like Synapse does. -# -# To disable, set this to be an empty vector (`[]`). -# -# Defaults to: -# ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", -# "192.168.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "169.254.0.0/16", -# "192.88.99.0/24", "198.18.0.0/15", "192.0.2.0/24", "198.51.100.0/24", -# "203.0.113.0/24", "224.0.0.0/4", "::1/128", "fe80::/10", "fc00::/7", -# "2001:db8::/32", "ff00::/8", "fec0::/10"] -# -#ip_range_denylist = - -# Optional IP address or network interface-name to bind as the source of -# URL preview requests. If not set, it will not bind to a specific -# address or interface. -# -# Interface names only supported on Linux, Android, and Fuchsia platforms; -# all other platforms can specify the IP address. To list the interfaces -# on your system, use the command `ip link show`. -# -# example: `"eth0"` or `"1.2.3.4"` -# -#url_preview_bound_interface = - -# Vector list of domains allowed to send requests to for URL previews. -# -# This is a *contains* match, not an explicit match. Putting "google.com" -# will match "https://google.com" and -# "http://mymaliciousdomainexamplegoogle.com" Setting this to "*" will -# allow all URL previews. Please note that this opens up significant -# attack surface to your server, you are expected to be aware of the risks -# by doing so. -# -#url_preview_domain_contains_allowlist = [] - -# Vector list of explicit domains allowed to send requests to for URL -# previews. -# -# This is an *explicit* match, not a contains match. Putting "google.com" -# will match "https://google.com", "http://google.com", but not -# "https://mymaliciousdomainexamplegoogle.com". Setting this to "*" will -# allow all URL previews. Please note that this opens up significant -# attack surface to your server, you are expected to be aware of the risks -# by doing so. -# -#url_preview_domain_explicit_allowlist = [] - -# Vector list of explicit domains not allowed to send requests to for URL -# previews. -# -# This is an *explicit* match, not a contains match. Putting "google.com" -# will match "https://google.com", "http://google.com", but not -# "https://mymaliciousdomainexamplegoogle.com". The denylist is checked -# first before allowlist. Setting this to "*" will not do anything. -# -#url_preview_domain_explicit_denylist = [] - -# Vector list of URLs allowed to send requests to for URL previews. -# -# Note that this is a *contains* match, not an explicit match. Putting -# "google.com" will match "https://google.com/", -# "https://google.com/url?q=https://mymaliciousdomainexample.com", and -# "https://mymaliciousdomainexample.com/hi/google.com" Setting this to "*" -# will allow all URL previews. Please note that this opens up significant -# attack surface to your server, you are expected to be aware of the risks -# by doing so. -# -#url_preview_url_contains_allowlist = [] - -# Maximum amount of bytes allowed in a URL preview body size when -# spidering. Defaults to 256KB in bytes. -# -#url_preview_max_spider_size = 256000 - -# Option to decide whether you would like to run the domain allowlist -# checks (contains and explicit) on the root domain or not. Does not apply -# to URL contains allowlist. Defaults to false. -# -# Example usecase: If this is enabled and you have "wikipedia.org" allowed -# in the explicit and/or contains domain allowlist, it will allow all -# subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the -# root domain is checked and matched. Useful if the domain contains -# allowlist is still too broad for you but you still want to allow all the -# subdomains under a root domain. -# -#url_preview_check_root_domain = false - -# List of forbidden room aliases and room IDs as strings of regex -# patterns. -# -# Regex can be used or explicit contains matches can be done by just -# specifying the words (see example). -# -# This is checked upon room alias creation, custom room ID creation if -# used, and startup as warnings if any room aliases in your database have -# a forbidden room alias/ID. -# -# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] -# -#forbidden_alias_names = [] - -# List of forbidden username patterns/strings. -# -# Regex can be used or explicit contains matches can be done by just -# specifying the words (see example). -# -# This is checked upon username availability check, registration, and -# startup as warnings if any local users in your database have a forbidden -# username. -# -# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] -# -#forbidden_usernames = [] - -# Retry failed and incomplete messages to remote servers immediately upon -# startup. This is called bursting. If this is disabled, said messages may -# not be delivered until more messages are queued for that server. Do not -# change this option unless server resources are extremely limited or the -# scale of the server's deployment is huge. Do not disable this unless you -# know what you are doing. -# -#startup_netburst = true - -# Messages are dropped and not reattempted. The `startup_netburst` option -# must be enabled for this value to have any effect. Do not change this -# value unless you know what you are doing. Set this value to -1 to -# reattempt every message without trimming the queues; this may consume -# significant disk. Set this value to 0 to drop all messages without any -# attempt at redelivery. -# -#startup_netburst_keep = 50 - -# Block non-admin local users from sending room invites (local and -# remote), and block non-admin users from receiving remote room invites. -# -# Admins are always allowed to send and receive all room invites. -# -#block_non_admin_invites = false - -# Allow admins to enter commands in rooms other than "#admins" (admin -# room) by prefixing your message with "\!admin" or "\\!admin" followed up -# a normal conduwuit admin command. The reply will be publicly visible to -# the room, originating from the sender. -# -# example: \\!admin debug ping puppygock.gay -# -#admin_escape_commands = true - -# Automatically activate the conduwuit admin room console / CLI on -# startup. This option can also be enabled with `--console` conduwuit -# argument. -# -#admin_console_automatic = false - -# List of admin commands to execute on startup. -# -# This option can also be configured with the `--execute` conduwuit -# argument and can take standard shell commands and environment variables -# -# For example: `./conduwuit --execute "server admin-notice conduwuit has -# started up at $(date)"` -# -# example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` -# -#admin_execute = [] - -# Ignore errors in startup commands. -# -# If false, conduwuit will error and fail to start if an admin execute -# command (`--execute` / `admin_execute`) fails. -# -#admin_execute_errors_ignore = false - -# List of admin commands to execute on SIGUSR2. -# -# Similar to admin_execute, but these commands are executed when the -# server receives SIGUSR2 on supporting platforms. -# -#admin_signal_execute = [] - -# Controls the max log level for admin command log captures (logs -# generated from running admin commands). Defaults to "info" on release -# builds, else "debug" on debug builds. -# -#admin_log_capture = "info" - -# The default room tag to apply on the admin room. -# -# On some clients like Element, the room tag "m.server_notice" is a -# special pinned room at the very bottom of your room list. The conduwuit -# admin room can be pinned here so you always have an easy-to-access -# shortcut dedicated to your admin room. -# -#admin_room_tag = "m.server_notice" - -# Sentry.io crash/panic reporting, performance monitoring/metrics, etc. -# This is NOT enabled by default. conduwuit's default Sentry reporting -# endpoint domain is `o4506996327251968.ingest.us.sentry.io`. -# -#sentry = false - -# Sentry reporting URL, if a custom one is desired. -# -#sentry_endpoint = "" - -# Report your conduwuit server_name in Sentry.io crash reports and -# metrics. -# -#sentry_send_server_name = false - -# Performance monitoring/tracing sample rate for Sentry.io. -# -# Note that too high values may impact performance, and can be disabled by -# setting it to 0.0 (0%) This value is read as a percentage to Sentry, -# represented as a decimal. Defaults to 15% of traces (0.15) -# -#sentry_traces_sample_rate = 0.15 - -# Whether to attach a stacktrace to Sentry reports. -# -#sentry_attach_stacktrace = false - -# Send panics to Sentry. This is true by default, but Sentry has to be -# enabled. The global `sentry` config option must be enabled to send any -# data. -# -#sentry_send_panic = true - -# Send errors to sentry. This is true by default, but sentry has to be -# enabled. This option is only effective in release-mode; forced to false -# in debug-mode. -# -#sentry_send_error = true - -# Controls the tracing log level for Sentry to send things like -# breadcrumbs and transactions -# -#sentry_filter = "info" - -# Enable the tokio-console. This option is only relevant to developers. -# -# For more information, see: -# https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console -# -#tokio_console = false - -# This item is undocumented. Please contribute documentation for it. -# -#test = false - -# Controls whether admin room notices like account registrations, password -# changes, account deactivations, room directory publications, etc will be -# sent to the admin room. Update notices and normal admin command -# responses will still be sent. -# -#admin_room_notices = true - -# Enable database pool affinity support. On supporting systems, block -# device queue topologies are detected and the request pool is optimized -# for the hardware; db_pool_workers is determined automatically. -# -#db_pool_affinity = true - -# Sets the number of worker threads in the frontend-pool of the database. -# This number should reflect the I/O capabilities of the system, -# such as the queue-depth or the number of simultaneous requests in -# flight. Defaults to 32 or four times the number of CPU cores, whichever -# is greater. -# -# Note: This value is only used if db_pool_affinity is disabled or not -# detected on the system, otherwise it is determined automatically. -# -#db_pool_workers = 32 - -# When db_pool_affinity is enabled and detected, the size of any worker -# group will not exceed the determined value. This is necessary when -# thread-pooling approach does not scale to the full capabilities of -# high-end hardware; using detected values without limitation could -# degrade performance. -# -# The value is multiplied by the number of cores which share a device -# queue, since group workers can be scheduled on any of those cores. -# -#db_pool_workers_limit = 64 - -# Determines the size of the queues feeding the database's frontend-pool. -# The size of the queue is determined by multiplying this value with the -# number of pool workers. When this queue is full, tokio tasks conducting -# requests will yield until space is available; this is good for -# flow-control by avoiding buffer-bloat, but can inhibit throughput if -# too low. -# -#db_pool_queue_mult = 4 - -# Sets the initial value for the concurrency of streams. This value simply -# allows overriding the default in the code. The default is 32, which is -# the same as the default in the code. Note this value is itself -# overridden by the computed stream_width_scale, unless that is disabled; -# this value can serve as a fixed-width instead. -# -#stream_width_default = 32 - -# Scales the stream width starting from a base value detected for the -# specific system. The base value is the database pool worker count -# determined from the hardware queue size (e.g. 32 for SSD or 64 or 128+ -# for NVMe). This float allows scaling the width up or down by multiplying -# it (e.g. 1.5, 2.0, etc). The maximum result can be the size of the pool -# queue (see: db_pool_queue_mult) as any larger value will stall the tokio -# task. The value can also be scaled down (e.g. 0.5) to improve -# responsiveness for many users at the cost of throughput for each. -# -# Setting this value to 0.0 causes the stream width to be fixed at the -# value of stream_width_default. The default scale is 1.0 to match the -# capabilities detected for the system. -# -#stream_width_scale = 1.0 - -# Sets the initial amplification factor. This controls batch sizes of -# requests made by each pool worker, multiplying the throughput of each -# stream. This value is somewhat abstract from specific hardware -# characteristics and can be significantly larger than any thread count or -# queue size. This is because each database query may require several -# index lookups, thus many database queries in a batch may make progress -# independently while also sharing index and data blocks which may or may -# not be cached. It is worthwhile to submit huge batches to reduce -# complexity. The maximum value is 32768, though sufficient hardware is -# still advised for that. -# -#stream_amplification = 1024 - -# Number of sender task workers; determines sender parallelism. Default is -# '0' which means the value is determined internally, likely matching the -# number of tokio worker-threads or number of cores, etc. Override by -# setting a non-zero value. -# -#sender_workers = 0 - -# Enables listener sockets; can be set to false to disable listening. This -# option is intended for developer/diagnostic purposes only. -# -#listening = true - -# Enables configuration reload when the server receives SIGUSR1 on -# supporting platforms. -# -#config_reload_signal = true - -[global.tls] - -# Path to a valid TLS certificate file. -# -# example: "/path/to/my/certificate.crt" -# -#certs = - -# Path to a valid TLS certificate private key. -# -# example: "/path/to/my/certificate.key" -# -#key = - # Whether to listen and allow for HTTP and HTTPS connections (insecure!) -# +# This config option is only available if conduwuit was built with `axum_dual_protocol` feature (not default feature) +# Defaults to false #dual_protocol = false -[global.well_known] -# The server URL that the client well-known file will serve. This should -# not contain a port, and should just be a valid HTTPS URL. +# If you are using delegation via well-known files and you cannot serve them from your reverse proxy, you can +# uncomment these to serve them directly from conduwuit. This requires proxying all requests to conduwuit, not just `/_matrix` to work. # -# example: "https://matrix.example.com" +#[global.well_known] +#server = "matrix.example.com:443" +#client = "https://matrix.example.com" # -#client = - -# The server base domain of the URL with a specific port that the server -# well-known file will serve. This should contain a port at the end, and -# should not be a URL. +# A single contact and/or support page for /.well-known/matrix/support +# All options here are strings. Currently only supports 1 single contact. +# No default. # -# example: "matrix.example.com:443" -# -#server = - -# This item is undocumented. Please contribute documentation for it. -# -#support_page = - -# This item is undocumented. Please contribute documentation for it. -# -#support_role = - -# This item is undocumented. Please contribute documentation for it. -# -#support_email = - -# This item is undocumented. Please contribute documentation for it. -# -#support_mxid = - -[global.blurhashing] - -# blurhashing x component, 4 is recommended by https://blurha.sh/ -# -#components_x = 4 - -# blurhashing y component, 3 is recommended by https://blurha.sh/ -# -#components_y = 3 - -# Max raw size that the server will blurhash, this is the size of the -# image after converting it to raw data, it should be higher than the -# upload limit but not too high. The higher it is the higher the -# potential load will be for clients requesting blurhashes. The default -# is 33.55MB. Setting it to 0 disables blurhashing. -# -#blurhash_max_raw_size = 33554432 +#support_page = "" +#support_role = "" +#support_email = "" +#support_mxid = "" diff --git a/debian/README.md b/debian/README.md index 800a2e09..063982de 100644 --- a/debian/README.md +++ b/debian/README.md @@ -1,29 +1,37 @@ -# conduwuit for Debian +conduwuit for Debian +================== -Information about downloading and deploying the Debian package. This may also be -referenced for other `apt`-based distros such as Ubuntu. +Installation +------------ -### Installation +Information about downloading, building and deploying the Debian package, see +the "Installing Conduit" section in the Deploying docs. +All following sections until "Setting up the Reverse Proxy" be ignored because +this is handled automatically by the packaging. -It is recommended to see the [generic deployment guide](../deploying/generic.md) -for further information if needed as usage of the Debian package is generally -related. +Configuration +------------- -No `apt` repository is currently offered yet, it is in the works/development. +When installed, Debconf generates the configuration of the homeserver +(host)name, the address and port it listens on. This configuration ends up in +`/etc/matrix-conduit/conduit.toml`. -### Configuration +You can tweak more detailed settings by uncommenting and setting the variables +in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum +file size for download/upload, enabling federation, etc. -When installed, the example config is placed at `/etc/conduwuit/conduwuit.toml` -as the default config. The config mentions things required to be changed before -starting. +Running +------- -You can tweak more detailed settings by uncommenting and setting the config -options in `/etc/conduwuit/conduwuit.toml`. +The package uses the `matrix-conduit.service` systemd unit file to start and +stop Conduit. It loads the configuration file mentioned above to set up the +environment before running the server. -### Running +This package assumes by default that Conduit will be placed behind a reverse +proxy such as Apache or nginx. This default deployment entails just listening +on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL +. -The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop conduwuit. The binary is installed at `/usr/sbin/conduwuit`. - -This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate. - -Consult various online documentation and guides on setting up a reverse proxy and TLS. Caddy is documented at the [generic deployment guide](../deploying/generic.md#setting-up-the-reverse-proxy) as it's the easiest and most user friendly. +At a later stage this packaging may support also setting up TLS and running +stand-alone. In this case, however, you need to set up some certificates and +renewal, for it to work properly. diff --git a/debian/config b/debian/config index ec84aaa1..8710ef97 100644 --- a/debian/config +++ b/debian/config @@ -1,18 +1,17 @@ #!/bin/sh set -e -# TODO: implement debconf support that is maintainable without duplicating the config # Source debconf library. -#. /usr/share/debconf/confmodule -# -## Ask for the Matrix homeserver name, address and port. -#db_input high conduwuit/hostname || true -#db_go -# -#db_input low conduwuit/address || true -#db_go -# -#db_input medium conduwuit/port || true -#db_go +. /usr/share/debconf/confmodule + +# Ask for the Matrix homeserver name, address and port. +db_input high matrix-conduit/hostname || true +db_go + +db_input low matrix-conduit/address || true +db_go + +db_input medium matrix-conduit/port || true +db_go exit 0 diff --git a/debian/conduwuit.service b/debian/matrix-conduit.service similarity index 65% rename from debian/conduwuit.service rename to debian/matrix-conduit.service index 3d2fbc9b..bc1347af 100644 --- a/debian/conduwuit.service +++ b/debian/matrix-conduit.service @@ -1,22 +1,13 @@ [Unit] Description=conduwuit Matrix homeserver -Wants=network-online.target After=network-online.target -Alias=matrix-conduwuit.service -Documentation=https://continuwuity.org/ [Service] DynamicUser=yes -User=conduwuit -Group=conduwuit +User=_matrix-conduit +Group=_matrix-conduit Type=notify -Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" - -ExecStart=/usr/sbin/conduwuit - -ReadWritePaths=/var/lib/conduwuit /etc/conduwuit - AmbientCapabilities= CapabilityBoundingSet= @@ -24,7 +15,7 @@ DevicePolicy=closed LockPersonality=yes MemoryDenyWriteExecute=yes NoNewPrivileges=yes -#ProcSubset=pid +ProcSubset=pid ProtectClock=yes ProtectControlGroups=yes ProtectHome=yes @@ -45,19 +36,22 @@ RestrictNamespaces=yes RestrictRealtime=yes RestrictSUIDSGID=yes SystemCallArchitectures=native -SystemCallFilter=@system-service @resources -SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc +SystemCallFilter=@system-service +SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @resources @privileged @keyring @ipc SystemCallErrorNumber=EPERM -#StateDirectory=conduwuit +StateDirectory=matrix-conduit -RuntimeDirectory=conduwuit +RuntimeDirectory=conduit RuntimeDirectoryMode=0750 +Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" + +ExecStart=/usr/sbin/matrix-conduit Restart=on-failure RestartSec=5 -TimeoutStopSec=2m -TimeoutStartSec=2m +TimeoutStopSec=4m +TimeoutStartSec=4m StartLimitInterval=1m StartLimitBurst=5 diff --git a/debian/postinst b/debian/postinst index 4eae4573..cf73fe1d 100644 --- a/debian/postinst +++ b/debian/postinst @@ -1,43 +1,28 @@ #!/bin/sh set -e -# TODO: implement debconf support that is maintainable without duplicating the config -#. /usr/share/debconf/confmodule +. /usr/share/debconf/confmodule -CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit -CONDUWUIT_CONFIG_PATH=/etc/conduwuit +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/ case "$1" in configure) - # Create the `conduwuit` user if it does not exist yet. - if ! getent passwd conduwuit > /dev/null ; then - echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2 + # Create the `_matrix-conduit` user if it does not exist yet. + if ! getent passwd _matrix-conduit > /dev/null ; then + echo 'Adding system user for the Conduwuit Matrix homeserver' 1>&2 adduser --system --group --quiet \ - --home "$CONDUWUIT_DATABASE_PATH" \ + --home "$CONDUIT_DATABASE_PATH" \ --disabled-login \ --shell "/usr/sbin/nologin" \ - conduwuit + --force-badname \ + _matrix-conduit fi # Create the database path if it does not exist yet and fix up ownership - # and permissions for the config. - mkdir -v -p "$CONDUWUIT_DATABASE_PATH" - - # symlink the previous location for compatibility if it does not exist yet. - if ! test -L "/var/lib/matrix-conduit" ; then - ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit" - fi - - chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH" - chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH" - - chmod -v 740 "$CONDUWUIT_DATABASE_PATH" - - echo '' - echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!' - echo 'To start the server, run: systemctl start conduwuit.service' - echo '' - + # and permissions. + mkdir -p "$CONDUIT_DATABASE_PATH" + chown _matrix-conduit:_matrix-conduit -R "$CONDUIT_DATABASE_PATH" + chmod 700 "$CONDUIT_DATABASE_PATH" ;; esac diff --git a/debian/postrm b/debian/postrm index 3c0b1c09..28949091 100644 --- a/debian/postrm +++ b/debian/postrm @@ -1,42 +1,25 @@ #!/bin/sh set -e -#. /usr/share/debconf/confmodule +. /usr/share/debconf/confmodule -CONDUWUIT_CONFIG_PATH=/etc/conduwuit -CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit -CONDUWUIT_DATABASE_PATH_SYMLINK=/var/lib/matrix-conduit +CONDUIT_CONFIG_PATH=/etc/matrix-conduit +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit case $1 in purge) # Remove debconf changes from the db - #db_purge + db_purge # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior # "configuration files must be preserved when the package is removed, and # only deleted when the package is purged." - - # - - if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then - if test -L "$CONDUWUIT_CONFIG_PATH"; then - echo "Deleting conduwuit configuration files" - rm -v -r "$CONDUWUIT_CONFIG_PATH" - fi + if [ -d "$CONDUIT_CONFIG_PATH" ]; then + rm -r "$CONDUIT_CONFIG_PATH" fi - if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then - if test -L "$CONDUWUIT_DATABASE_PATH"; then - echo "Deleting conduwuit database directory" - rm -r "$CONDUWUIT_DATABASE_PATH" - fi - fi - - if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then - if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then - echo "Removing matrix-conduit symlink" - rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK" - fi + if [ -d "$CONDUIT_DATABASE_PATH" ]; then + rm -r "$CONDUIT_DATABASE_PATH" fi ;; esac diff --git a/debian/templates b/debian/templates new file mode 100644 index 00000000..c4281ad3 --- /dev/null +++ b/debian/templates @@ -0,0 +1,21 @@ +Template: matrix-conduit/hostname +Type: string +Default: localhost +Description: The server (host)name of the Matrix homeserver + This is the hostname the homeserver will be reachable at via a client. + . + If set to "localhost", you can connect with a client locally and clients + from other hosts and also other homeservers will not be able to reach you! + +Template: matrix-conduit/address +Type: string +Default: 127.0.0.1 +Description: The listen address of the Matrix homeserver + This is the address the homeserver will listen on. Leave it set to 127.0.0.1 + when using a reverse proxy. + +Template: matrix-conduit/port +Type: string +Default: 6167 +Description: The port of the Matrix homeserver + This port is most often just accessed by a reverse proxy. diff --git a/development.md b/development.md deleted file mode 120000 index 35e9aab8..00000000 --- a/development.md +++ /dev/null @@ -1 +0,0 @@ -docs/development.md \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 536af632..00000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,208 +0,0 @@ -ARG RUST_VERSION=1 - -FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx -FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS base -FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS toolchain - -# Prevent deletion of apt cache -RUN rm -f /etc/apt/apt.conf.d/docker-clean - -# Match Rustc version as close as possible -# rustc -vV -ARG LLVM_VERSION=19 -# ENV RUSTUP_TOOLCHAIN=${RUST_VERSION} - -# Install repo tools -# Line one: compiler tools -# Line two: curl, for downloading binaries -# Line three: for xx-verify -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ -apt-get update && apt-get install -y \ - clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \ - curl git \ - file - -# Create symlinks for LLVM tools -RUN <> /etc/environment - -# Configure pkg-config -RUN <> /etc/environment - echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment - echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment -EOF - -# Configure cc to use clang version -RUN <> /etc/environment - echo "CXX=clang++" >> /etc/environment -EOF - -# Cross-language LTO -RUN <> /etc/environment - echo "CXXFLAGS=-flto" >> /etc/environment - # Linker is set to target-compatible clang by xx - echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment -EOF - -# Apply CPU-specific optimizations if TARGET_CPU is provided -ARG TARGET_CPU= -RUN <> /etc/environment - echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment - echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment - fi -EOF - -# Prepare output directories -RUN mkdir /out - -FROM toolchain AS builder - -# Conduwuit version info -ARG COMMIT_SHA= -ARG CONDUWUIT_VERSION_EXTRA= -ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA -RUN <> /etc/environment -fi -EOF - -ARG TARGETPLATFORM - -# Verify environment configuration -RUN cat /etc/environment -RUN xx-cargo --print-target-triple - -# Get source -COPY . . - -# Build the binary -RUN --mount=type=cache,target=/usr/local/cargo/registry \ - --mount=type=cache,target=/usr/local/cargo/git/db \ - --mount=type=cache,target=/app/target \ - bash <<'EOF' - set -o allexport - . /etc/environment - TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \ - jq -r ".target_directory")) - mkdir /out/sbin - PACKAGE=conduwuit - xx-cargo build --locked --release \ - -p $PACKAGE; - BINARIES=($(cargo metadata --no-deps --format-version 1 | \ - jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name")) - for BINARY in "${BINARIES[@]}"; do - echo $BINARY - xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY - cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY - done -EOF - -# Generate Software Bill of Materials (SBOM) -RUN --mount=type=cache,target=/usr/local/cargo/registry \ - --mount=type=cache,target=/usr/local/cargo/git/db \ - bash <<'EOF' - mkdir /out/sbom - typeset -A PACKAGES - for BINARY in /out/sbin/*; do - BINARY_BASE=$(basename ${BINARY}) - package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name") - if [ -z "$package" ]; then - continue - fi - PACKAGES[$package]=1 - done - for PACKAGE in $(echo ${!PACKAGES[@]}); do - echo $PACKAGE - cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json - done -EOF - -# Extract dynamically linked dependencies -RUN <` +`@conduit:your.server.name: appservices unregister ` where `` one of the output of `appservices list`. + +### Tested appservices + +These appservices have been tested and work with Conduit without any extra steps: + +- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) +- [mautrix-hangouts](https://github.com/mautrix/hangouts/) +- [mautrix-telegram](https://github.com/mautrix/telegram/) +- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. +- [heisenbridge](https://github.com/hifi/heisenbridge/) diff --git a/docs/assets/conduwuit_logo.svg b/docs/assets/conduwuit_logo.svg deleted file mode 100644 index 9be5b453..00000000 --- a/docs/assets/conduwuit_logo.svg +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - diff --git a/docs/assets/gay dog anarchists.png b/docs/assets/gay dog anarchists.png deleted file mode 100644 index 871cf302..00000000 Binary files a/docs/assets/gay dog anarchists.png and /dev/null differ diff --git a/docs/community.md b/docs/community.md deleted file mode 100644 index a6852c0f..00000000 --- a/docs/community.md +++ /dev/null @@ -1,139 +0,0 @@ -# Continuwuity Community Guidelines - -Welcome to the Continuwuity commuwunity! We're excited to have you here. Continuwuity is a -continuation of the conduwuit homeserver, which in turn is a hard-fork of the Conduit homeserver, -aimed at making Matrix more accessible and inclusive for everyone. - -This space is dedicated to fostering a positive, supportive, and welcoming environment for everyone. -These guidelines apply to all Continuwuity spaces, including our Matrix rooms and any other -community channels that reference them. We've written these guidelines to help us all create an -environment where everyone feels safe and respected. - -For code and contribution guidelines, please refer to the -[Contributor's Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). -Below are additional guidelines specific to the Continuwuity community. - -## Our Values and Expected Behaviors - -We strive to create a community based on mutual respect, collaboration, and inclusivity. We expect -all members to: - -1. **Be Respectful and Inclusive**: Treat everyone with respect. We're committed to a community - where everyone feels safe, regardless of background, identity, or experience. Discrimination, - harassment, or hate speech won't be tolerated. Remember that each person experiences the world - differently; share your own perspective and be open to learning about others'. - -2. **Be Positive and Constructive**: Engage in discussions constructively and support each other. - If you feel angry or frustrated, take a break before participating. Approach disagreements with - the goal of understanding, not winning. Focus on the issue, not the person. - -3. **Communicate Clearly and Kindly**: Our community includes neurodivergent individuals and those - who may not appreciate sarcasm or subtlety. Communicate clearly and kindly. Avoid ambiguity and - ensure your messages can be easily understood by all. Avoid placing the burden of education on - marginalized groups; please make an effort to look into your questions before asking others for - detailed explanations. - -4. **Be Open to Improving Inclusivity**: Actively participate in making our community more inclusive. - Report behaviour that contradicts these guidelines (see Reporting and Enforcement below) and be - open to constructive feedback aimed at improving our community. Understand that discussing - negative experiences can be emotionally taxing; focus on the message, not the tone. - -5. **Commit to Our Values**: Building an inclusive community requires ongoing effort from everyone. - Recognise that addressing bias and discrimination is a continuous process that needs commitment - and action from all members. - -## Unacceptable Behaviors - -To ensure everyone feels safe and welcome, the following behaviors are considered unacceptable -within the Continuwuity community: - -* **Harassment and Discrimination**: Avoid offensive comments related to background, family status, - gender, gender identity or expression, marital status, sex, sexual orientation, native language, - age, ability, race and/or ethnicity, caste, national origin, socioeconomic status, religion, - geographic location, or any other dimension of diversity. Don't deliberately misgender someone or - question the legitimacy of their gender identity. - -* **Violence and Threats**: Do not engage in any form of violence or threats, including inciting - violence towards anyone or encouraging self-harm. Posting or threatening to post someone else's - personally identifying information ("doxxing") is also forbidden. - -* **Personal Attacks**: Disagreements happen, but they should never turn into personal attacks. - Don't insult, demean, or belittle others. - -* **Unwelcome Attention or Contact**: Avoid unwelcome sexual attention, inappropriate physical - contact (or simulation thereof), sexualized comments, jokes, or imagery. - -* **Disruption**: Do not engage in sustained disruption of discussions, events, or other - community activities. - -* **Bad Faith Actions**: Do not intentionally make false reports or otherwise abuse the reporting - process. - -This is not an exhaustive list. Any behaviour that makes others feel unsafe or unwelcome may be -subject to enforcement action. - -## Matrix Community - -These Community Guidelines apply to the entire -[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including: - -### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) - -This room is for support and discussions about Continuwuity. Ask questions, share insights, and help -each other out while adhering to these guidelines. - -We ask that this room remain focused on the Continuwuity software specifically: the team are -typically happy to engage in conversations about related subjects in the off-topic room. - -### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) - -For off-topic community conversations about any subject. While this room allows for a wide range of -topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid -divisive or stressful subjects like specific country/world politics unless handled with exceptional -care and respect for diverse viewpoints. - -General topics, such as world events, are welcome as long as they follow the guidelines. If a member -of the team asks for the conversation to end, please respect their decision. - -### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) - -This room is dedicated to discussing active development of Continuwuity, including ongoing issues or -code development. Collaboration here must follow these guidelines, and please consider raising -[an issue](https://forgejo.ellis.link/continuwuation/continuwuity/issues) on the repository to help -track progress. - -## Reporting and Enforcement - -We take these Community Guidelines seriously to protect our community members. If you witness or -experience unacceptable behaviour, or have any other concerns, please report it. - -**How to Report:** - -* **Alert Moderators in the Room:** If you feel comfortable doing so, you can address the issue - publicly in the relevant room by mentioning the moderation bot, `@rock:continuwuity.org`, which - will immediately alert all available moderators. -* **Direct Message:** If you're not comfortable raising the issue publicly, please send a direct - message (DM) to one of the room moderators. - -Reports will be handled with discretion. We will investigate promptly and thoroughly. - -**Enforcement Actions:** - -Anyone asked to stop unacceptable behaviour is expected to comply immediately. Failure to do so, or -engaging in prohibited behaviour, may result in enforcement action. Moderators may take actions they -deem appropriate, including but not limited to: - -1. **Warning**: A direct message or public warning identifying the violation and requesting - corrective action. -2. **Temporary Mute**: Temporary restriction from participating in discussions for a specified - period. -3. **Kick or Ban**: Removal from a room (kick) or the entire community space (ban). Egregious or - repeated violations may result in an immediate ban. Bans are typically permanent and reviewed - only in exceptional circumstances. - -Retaliation against those who report concerns in good faith will not be tolerated and will be -subject to the same enforcement actions. - -Together, let's build and maintain a community where everyone feels valued, safe, and respected. - -— The Continuwuity Moderation Team diff --git a/docs/configuration.md b/docs/configuration.md index 778e5c56..70069af0 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,64 +1,5 @@ -# Configuration +# Example configuration -This chapter describes various ways to configure Continuwuity. - -## Basics - -Continuwuity uses a config file for the majority of the settings, but also supports -setting individual config options via commandline. - -Please refer to the [example config -file](./configuration/examples.md#example-configuration) for all of those -settings. - -The config file to use can be specified on the commandline when running -Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use -the environment variable `CONDUWUIT_CONFIG` to specify the config file to used. -Conduit's environment variables are supported for backwards compatibility. - -## Option commandline flag - -Continuwuity supports setting individual config options in TOML format from the -`-O` / `--option` flag. For example, you can set your server name via `-O -server_name=\"example.com\"`. - -Note that the config is parsed as TOML, and shells like bash will remove quotes. -So unfortunately it is required to escape quotes if the config option takes a -string. This does not apply to options that take booleans or numbers: -- `--option allow_registration=true` works ✅ -- `-O max_request_size=99999999` works ✅ -- `-O server_name=example.com` does not work ❌ -- `--option log=\"debug\"` works ✅ -- `--option server_name='"example.com'"` works ✅ - -## Execute commandline flag - -Continuwuity supports running admin commands on startup using the commandline -argument `--execute`. The most notable use for this is to create an admin user -on first startup. - -The syntax of this is a standard admin command without the prefix such as -`./conduwuit --execute "users create_user june"` - -An example output of a success is: +``` toml +{{#include ../conduwuit-example.toml}} ``` -INFO conduwuit_service::admin::startup: Startup command #0 completed: -Created user with user_id: @june:girlboss.ceo and password: `` -``` - -This commandline argument can be paired with the `--option` flag. - -## Environment variables - -All of the settings that are found in the config file can be specified by using -environment variables. The environment variable names should be all caps and -prefixed with `CONDUWUIT_`. - -For example, if the setting you are changing is `max_request_size`, then the -environment variable to set is `CONDUWUIT_MAX_REQUEST_SIZE`. - -To modify config options not in the `[global]` context such as -`[global.well_known]`, use the `__` suffix split: `CONDUWUIT_WELL_KNOWN__SERVER` - -Conduit's environment variables are supported for backwards compatibility (e.g. -`CONDUIT_SERVER_NAME`). diff --git a/docs/configuration/examples.md b/docs/configuration/examples.md deleted file mode 100644 index 54aa8bd7..00000000 --- a/docs/configuration/examples.md +++ /dev/null @@ -1,32 +0,0 @@ -## Example configuration - -
-Example configuration - -```toml -{{#include ../../conduwuit-example.toml}} -``` - -
- -## Debian systemd unit file - -
-Debian systemd unit file - -``` -{{#include ../../debian/conduwuit.service}} -``` - -
- -## Arch Linux systemd unit file - -
-Arch Linux systemd unit file - -``` -{{#include ../../arch/conduwuit.service}} -``` - -
diff --git a/docs/contributing.md b/docs/contributing.md deleted file mode 120000 index 44fcc634..00000000 --- a/docs/contributing.md +++ /dev/null @@ -1 +0,0 @@ -../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/deploying.md b/docs/deploying.md index be1bf736..fecf7647 100644 --- a/docs/deploying.md +++ b/docs/deploying.md @@ -1,3 +1,3 @@ # Deploying -This chapter describes various ways to deploy Continuwuity. +This chapter describes various ways to deploy Conduwuit. diff --git a/docs/deploying/arch-linux.md b/docs/deploying/arch-linux.md deleted file mode 100644 index a14201e3..00000000 --- a/docs/deploying/arch-linux.md +++ /dev/null @@ -1,3 +0,0 @@ -# Continuwuity for Arch Linux - -Continuwuity does not have any Arch Linux packages at this time. diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 04142e0c..aa4c149c 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -1,49 +1,55 @@ -# Continuwuity - Behind Traefik Reverse Proxy +# Conduit - Behind Traefik Reverse Proxy +version: '2.4' # uses '2.4' for cpuset services: - homeserver: - ### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image, - ### then you are ready to go. - image: forgejo.ellis.link/continuwuation/continuwuity:latest - restart: unless-stopped - volumes: - - db:/var/lib/conduwuit - - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - #- ./conduwuit.toml:/etc/conduwuit.toml - networks: - - proxy - environment: - CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above - - # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN - # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate - # see the override file for more information about delegation - CONDUWUIT_WELL_KNOWN: | - { - client=https://your.server.name.example, - server=your.server.name.example:443 - } - #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it - nofile: - soft: 1048567 - hard: 1048567 + homeserver: + ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### then you are ready to go. + image: girlbossceo/conduwuit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + # build: + # context: . + # args: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' + # LOCAL: 'false' + # GIT_REF: origin/master + restart: unless-stopped + volumes: + - db:/var/lib/matrix-conduit + #- ./conduwuit.toml:/etc/conduit.toml + networks: + - proxy + environment: + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUIT_ALLOW_REGISTRATION: 'true' + CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + #CONDUIT_LOG: warn,state_res=warn + CONDUIT_ADDRESS: 0.0.0.0 + #CONDUIT_CONFIG: './conduwuit.toml' # Uncomment if you mapped config toml above + #cpuset: "0-4" # Uncomment to limit to specific CPU cores + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose override file. + well-known: + image: nginx:latest + restart: unless-stopped + volumes: + - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files + - ./nginx/www:/var/www/ # location of the client and server .well-known-files ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and Continuwuity + ### Domain or Subdomain for the communication between Element and Conduit ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest @@ -56,12 +62,10 @@ services: # - homeserver volumes: - db: + db: networks: - # This is the network Traefik listens to, if your network has a different - # name, don't forget to change it here and in the docker-compose.override.yml - proxy: - external: true - -# vim: ts=2:sw=2:expandtab + # This is the network Traefik listens to, if your network has a different + # name, don't forget to change it here and in the docker-compose.override.yml + proxy: + external: true diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml index ec82fac3..a41d0202 100644 --- a/docs/deploying/docker-compose.override.yml +++ b/docs/deploying/docker-compose.override.yml @@ -1,37 +1,45 @@ -# Continuwuity - Traefik Reverse Proxy Labels +# Conduit - Traefik Reverse Proxy Labels +version: '2.4' # uses '2.4' for cpuset services: - homeserver: - labels: - - "traefik.enable=true" - - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network + homeserver: + labels: + - "traefik.enable=true" + - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which Continuwuity is hosted - - "traefik.http.routers.to-conduwuit.tls=true" - - "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt" - - "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker" - - "traefik.http.services.to_conduwuit.loadbalancer.server.port=6167" + - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted + - "traefik.http.routers.to-conduit.tls=true" + - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" + - "traefik.http.routers.to-conduit.middlewares=cors-headers@docker" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" - # If you want to have your account on , but host Continuwuity on a subdomain, - # you can let it only handle the well known file on that domain instead - #- "traefik.http.routers.to-matrix-wellknown.rule=Host(``) && PathPrefix(`/.well-known/matrix`)" - #- "traefik.http.routers.to-matrix-wellknown.tls=true" - #- "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt" - #- "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker" + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose file. + well-known: + labels: + - "traefik.enable=true" + - "traefik.docker.network=proxy" - ### Uncomment this if you uncommented Element-Web App in the docker-compose.yml - # element-web: - # labels: - # - "traefik.enable=true" - # - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network + - "traefik.http.routers.to-matrix-wellknown.rule=Host(`.`) && PathPrefix(`/.well-known/matrix`)" + - "traefik.http.routers.to-matrix-wellknown.tls=true" + - "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt" + - "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker" - # - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted - # - "traefik.http.routers.to-element-web.tls=true" - # - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" + - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" -# vim: ts=2:sw=2:expandtab + ### Uncomment this if you uncommented Element-Web App in the docker-compose.yml + # element-web: + # labels: + # - "traefik.enable=true" + # - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network + + # - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted + # - "traefik.http.routers.to-element-web.tls=true" + # - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml deleted file mode 100644 index 9ee98428..00000000 --- a/docs/deploying/docker-compose.with-caddy.yml +++ /dev/null @@ -1,56 +0,0 @@ -services: - caddy: - # This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity! - # For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy - image: lucaslorentz/caddy-docker-proxy:ci-alpine - ports: - - 80:80 - - 443:443 - environment: - - CADDY_INGRESS_NETWORKS=caddy - networks: - - caddy - volumes: - - /var/run/docker.sock:/var/run/docker.sock - - ./data:/data - restart: unless-stopped - labels: - caddy: example.com - caddy.0_respond: /.well-known/matrix/server {"m.server":"matrix.example.com:443"} - caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}} - - homeserver: - ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, - ### then you are ready to go. - image: forgejo.ellis.link/continuwuation/continuwuity:latest - restart: unless-stopped - volumes: - - db:/var/lib/conduwuit - - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - #- ./conduwuit.toml:/etc/conduwuit.toml - environment: - CONDUWUIT_SERVER_NAME: example.com # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above - networks: - - caddy - labels: - caddy: matrix.example.com - caddy.reverse_proxy: "{{upstreams 6167}}" - -volumes: - db: - -networks: - caddy: - external: true diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 9083b796..a7a0274e 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -1,57 +1,59 @@ -# Continuwuity - Behind Traefik Reverse Proxy +# Conduit - Behind Traefik Reverse Proxy +version: '2.4' # uses '2.4' for cpuset services: - homeserver: - ### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image, - ### then you are ready to go. - image: forgejo.ellis.link/continuwuation/continuwuity:latest - restart: unless-stopped - volumes: - - db:/var/lib/conduwuit - - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - #- ./conduwuit.toml:/etc/conduwuit.toml - networks: - - proxy - environment: - CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - CONDUWUIT_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this - CONDUWUIT_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server - #CONDUWUIT_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read - CONDUWUIT_ADDRESS: 0.0.0.0 - CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - #CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above - ### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too - # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUWUIT_LOG: info # default is: "warn,state_res=warn" - # CONDUWUIT_ALLOW_ENCRYPTION: 'true' - # CONDUWUIT_ALLOW_FEDERATION: 'true' - # CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - # CONDUWUIT_ALLOW_INCOMING_PRESENCE: true - # CONDUWUIT_ALLOW_OUTGOING_PRESENCE: true - # CONDUWUIT_ALLOW_LOCAL_PRESENCE: true - # CONDUWUIT_WORKERS: 10 - # CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - # CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧" + homeserver: + ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### then you are ready to go. + image: girlbossceo/conduwuit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + # build: + # context: . + # args: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' + # LOCAL: 'false' + # GIT_REF: origin/master + restart: unless-stopped + volumes: + - db:/srv/conduit/.local/share/conduit + #- ./conduwuit.toml:/etc/conduit.toml + networks: + - proxy + environment: + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_ALLOW_REGISTRATION : 'true' + #CONDUIT_CONFIG: './conduwuit.toml' # Uncomment if you mapped config toml above + ### Uncomment and change values as desired + # CONDUIT_ADDRESS: 0.0.0.0 + # CONDUIT_PORT: 6167 + # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging + # CONDUIT_LOG: info # default is: "warn,state_res=warn" + # CONDUIT_ALLOW_JAEGER: 'false' + # CONDUIT_ALLOW_ENCRYPTION: 'true' + # CONDUIT_ALLOW_FEDERATION: 'true' + # CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' + # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit + # CONDUIT_WORKERS: 10 + # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + #cpuset: "0-4" # Uncomment to limit to specific CPU cores - # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN - # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate - # reverse proxy, but since you do not have a reverse proxy and following this guide, this example is included - CONDUWUIT_WELL_KNOWN: | - { - client=https://your.server.name.example, - server=your.server.name.example:443 - } - #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it - nofile: - soft: 1048567 - hard: 1048567 + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose override file. + well-known: + image: nginx:latest + restart: unless-stopped + volumes: + - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files + - ./nginx/www:/var/www/ # location of the client and server .well-known-files ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and Continuwuity + ### Domain or Subdomain for the communication between Element and Conduit ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest @@ -63,85 +65,33 @@ services: # depends_on: # - homeserver - traefik: - image: "traefik:latest" - container_name: "traefik" - restart: "unless-stopped" - ports: - - "80:80" - - "443:443" - volumes: - - "/var/run/docker.sock:/var/run/docker.sock:z" - - "acme:/etc/traefik/acme" - #- "./traefik_config:/etc/traefik:z" - labels: - - "traefik.enable=true" + traefik: + image: "traefik:latest" + container_name: "traefik" + restart: "unless-stopped" + ports: + - "80:80" + - "443:443" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + # - "./traefik_config:/etc/traefik" + - "acme:/etc/traefik/acme" + labels: + - "traefik.enable=true" - # middleware redirect - - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" - # global redirect to https - - "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)" - - "traefik.http.routers.redirs.entrypoints=web" - - "traefik.http.routers.redirs.middlewares=redirect-to-https" + # middleware redirect + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + # global redirect to https + - "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)" + - "traefik.http.routers.redirs.entrypoints=http" + - "traefik.http.routers.redirs.middlewares=redirect-to-https" - configs: - - source: dynamic.yml - target: /etc/traefik/dynamic.yml - - environment: - TRAEFIK_LOG_LEVEL: DEBUG - TRAEFIK_ENTRYPOINTS_WEB: true - TRAEFIK_ENTRYPOINTS_WEB_ADDRESS: ":80" - TRAEFIK_ENTRYPOINTS_WEB_HTTP_REDIRECTIONS_ENTRYPOINT_TO: websecure - - TRAEFIK_ENTRYPOINTS_WEBSECURE: true - TRAEFIK_ENTRYPOINTS_WEBSECURE_ADDRESS: ":443" - TRAEFIK_ENTRYPOINTS_WEBSECURE_HTTP_TLS_CERTRESOLVER: letsencrypt - #TRAEFIK_ENTRYPOINTS_WEBSECURE_HTTP_MIDDLEWARES: secureHeaders@file # if you want to enabled STS - - TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT: true - TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_EMAIL: # Set this to the email you want to receive certificate expiration emails for - TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_KEYTYPE: EC384 - TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_HTTPCHALLENGE: true - TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_HTTPCHALLENGE_ENTRYPOINT: web - TRAEFIK_CERTIFICATESRESOLVERS_LETSENCRYPT_ACME_STORAGE: "/etc/traefik/acme/acme.json" - - TRAEFIK_PROVIDERS_DOCKER: true - TRAEFIK_PROVIDERS_DOCKER_ENDPOINT: "unix:///var/run/docker.sock" - TRAEFIK_PROVIDERS_DOCKER_EXPOSEDBYDEFAULT: false - - TRAEFIK_PROVIDERS_FILE: true - TRAEFIK_PROVIDERS_FILE_FILENAME: "/etc/traefik/dynamic.yml" - -configs: - dynamic.yml: - content: | - # Optionally set STS headers, like in https://hstspreload.org - # http: - # middlewares: - # secureHeaders: - # headers: - # forceSTSHeader: true - # stsIncludeSubdomains: true - # stsPreload: true - # stsSeconds: 31536000 - tls: - options: - default: - cipherSuites: - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - minVersion: VersionTLS12 + networks: + - proxy volumes: db: acme: networks: - proxy: - -# vim: ts=2:sw=2:expandtab + proxy: \ No newline at end of file diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index 1a3ab811..4bcfb79a 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -1,34 +1,46 @@ -# Continuwuity +# Conduit +version: '2.4' # uses '2.4' for cpuset services: homeserver: - ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, + ### If you already built the Conduit image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: forgejo.ellis.link/continuwuation/continuwuity:latest + image: girlbossceo/conduwuit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + # build: + # context: . + # args: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' + # LOCAL: 'false' + # GIT_REF: origin/master restart: unless-stopped ports: - 8448:6167 volumes: - - db:/var/lib/conduwuit - #- ./conduwuit.toml:/etc/conduwuit.toml + - db:/var/lib/matrix-conduit + #- ./conduwuit.toml:/etc/conduit.toml environment: - CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUIT_ALLOW_REGISTRATION: 'true' + CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUIT_MAX_CONCURRENT_REQUESTS: 400 + #CONDUIT_LOG: warn,state_res=warn + CONDUIT_ADDRESS: 0.0.0.0 + #CONDUIT_CONFIG: './conduwuit.toml' # Uncomment if you mapped config toml above + #cpuset: "0-4" # Uncomment to limit to specific CPU cores # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and Continuwuity + ### Domain or Subdomain for the communication between Element and Conduit ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index 08a0dc4f..a6ba52bb 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -1,28 +1,52 @@ -# Continuwuity for Docker +# Conduwuit for Docker ## Docker -To run Continuwuity with Docker you can either build the image yourself or pull it -from a registry. +To run conduwuit with Docker you can either build the image yourself or pull it from a registry. + ### Use a registry -OCI images for Continuwuity are available in the registries listed below. +OCI images for conduwuit are available in the registries listed below. -| Registry | Image | Notes | -| --------------- | --------------------------------------------------------------- | -----------------------| -| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. | -| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. | +| Registry | Image | Size | Notes | +| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | +| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable image. | +| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable image. | +| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Development version. | +| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Development version. | -[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity -Use +[dh]: https://hub.docker.com/repository/docker/girlbossceo/conduwuit +[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit +[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest +[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main + + +Use +```bash +docker image pull +``` +to pull it to your machine. + + + +### Build using a Dockerfile + +The Dockerfile provided by Conduit has two stages, each of which creates an image. + +1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. +2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. + +To build the image you can use the following command ```bash -docker image pull $LINK +docker build --tag girlbossceo/conduwuit:main . ``` -to pull it to your machine. +which also will tag the resulting image as `girlbossceo/conduwuit:main`. + + ### Run @@ -30,115 +54,163 @@ When you have the image you can simply run it with ```bash docker run -d -p 8448:6167 \ - -v db:/var/lib/conduwuit/ \ - -e CONDUWUIT_SERVER_NAME="your.server.name" \ - -e CONDUWUIT_ALLOW_REGISTRATION=false \ - --name conduwuit $LINK + -v db:/var/lib/matrix-conduit/ \ + -e CONDUIT_SERVER_NAME="your.server.name" \ + -e CONDUIT_DATABASE_BACKEND="rocksdb" \ + -e CONDUIT_ALLOW_REGISTRATION=true \ + -e CONDUIT_ALLOW_FEDERATION=true \ + -e CONDUIT_MAX_REQUEST_SIZE="20000000" \ + -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ + -e CONDUIT_MAX_CONCURRENT_REQUESTS="500" \ + -e CONDUIT_LOG="warn,ruma_state_res=warn" \ + --name conduit ``` -or you can use [docker compose](#docker-compose). +or you can use [docker-compose](#docker-compose). -The `-d` flag lets the container run in detached mode. You may supply an -optional `conduwuit.toml` config file, the example config can be found -[here](../configuration/examples.md). You can pass in different env vars to -change config values on the fly. You can even configure Continuwuity completely by -using env vars. For an overview of possible values, please take a look at the -[`docker-compose.yml`](docker-compose.yml) file. +The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md). +You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need +to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. -If you just want to test Continuwuity for a short time, you can use the `--rm` -flag, which will clean up everything related to your container after you stop -it. +If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. ### Docker-compose -If the `docker run` command is not for you or your setup, you can also use one -of the provided `docker-compose` files. +If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, you can use one of the following files; - -- If you already have a `traefik` instance set up, use -[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) -- If you don't have a `traefik` instance set up and would like to use it, use -[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml) -- If you want a setup that works out of the box with `caddy-docker-proxy`, use -[`docker-compose.with-caddy.yml`](docker-compose.with-caddy.yml) and replace all -`example.com` placeholders with your own domain +- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) +- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml) - For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml) -When picking the traefik-related compose file, rename it so it matches -`docker-compose.yml`, and rename the override file to -`docker-compose.override.yml`. Edit the latter with the values you want for your -server. +When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and +rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want +for your server. -When picking the `caddy-docker-proxy` compose file, it's important to first -create the `caddy` network before spinning up the containers: - -```bash -docker network create caddy -``` - -After that, you can rename it so it matches `docker-compose.yml` and spin up the -containers! - -Additional info about deploying Continuwuity can be found [here](generic.md). +Additional info about deploying Conduit can be found [here](generic.md). ### Build -Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently. - -The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd. - -The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition. - -To build an image locally using Docker Buildx, you can typically run a command like: +To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: ```bash -# Build for the current platform and load into the local Docker daemon -docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile . - -# Example: Build for specific platforms and push to a registry. -# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push - -# Example: Build binary optimized for the current CPU -# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile . +docker-compose up ``` -Refer to the Docker Buildx documentation for more advanced build options. - -[dockerfile-path]: ../../docker/Dockerfile +This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. ### Run -If you already have built the image or want to use one from the registries, you -can just start the container and everything else in the compose file in detached -mode with: +If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: ```bash -docker compose up -d +docker-compose up -d ``` > **Note:** Don't forget to modify and adjust the compose file to your needs. ### Use Traefik as Proxy -As a container user, you probably know about Traefik. It is a easy to use -reverse proxy for making containerized app and services available through the -web. With the two provided files, +As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making +containerized app and services available through the web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and -[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy -to deploy and use Continuwuity, with a little caveat. If you already took a look at -the files, then you should have seen the `well-known` service, and that is the -little caveat. Traefik is simply a proxy and loadbalancer and is not able to -serve any kind of content, but for Continuwuity to federate, we need to either -expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` -and `.well-known/matrix/server`. +[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy +and use Conduit, with a little caveat. If you already took a look at the files, then you should have +seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and +loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to +either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and +`.well-known/matrix/server`. + +With the service `well-known` we use a single `nginx` container that will serve those two files. + +So...step by step: + +1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or +[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename. +2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs. +3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. +4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. +5. Create the files needed by the `well-known` service. + + - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) + + ```nginx + server { + server_name .; + listen 80 default_server; + + location /.well-known/matrix/server { + return 200 '{"m.server": ".:443"}'; + types { } default_type "application/json; charset=utf-8"; + } + + location /.well-known/matrix/client { + return 200 '{"m.homeserver": {"base_url": "https://."}}'; + types { } default_type "application/json; charset=utf-8"; + add_header "Access-Control-Allow-Origin" *; + } + + location / { + return 404; + } + } + ``` + +6. Run `docker-compose up -d` +7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. + + -With the service `well-known` we use a single `nginx` container that will serve -those two files. ## Voice communication -See the [TURN](../turn.md) page. +In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place. + +### Configuration + +Create a configuration file called `coturn.conf` containing: + +```conf +use-auth-secret +static-auth-secret= +realm= +``` +A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. + +These same values need to be set in conduit. You can either modify conduit.toml to include these lines: +``` +turn_uris = ["turn:?transport=udp", "turn:?transport=tcp"] +turn_secret = "" +``` +or append the following to the docker environment variables dependig on which configuration method you used earlier: +```yml +CONDUIT_TURN_URIS: '["turn:?transport=udp", "turn:?transport=tcp"]' +CONDUIT_TURN_SECRET: "" +``` +Restart Conduit to apply these changes. + +### Run +Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using +```bash +docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn +``` + +or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml` +and run `docker-compose up -d` in the same directory. + +```yml +version: 3 +services: + turn: + container_name: coturn-server + image: docker.io/coturn/coturn + restart: unless-stopped + network_mode: "host" + volumes: + - ./coturn.conf:/etc/coturn/turnserver.conf +``` + +To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md. +For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration). -[nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage diff --git a/docs/deploying/freebsd.md b/docs/deploying/freebsd.md deleted file mode 100644 index 3764ffa8..00000000 --- a/docs/deploying/freebsd.md +++ /dev/null @@ -1,5 +0,0 @@ -# Continuwuity for FreeBSD - -Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB. - -Contributions for getting Continuwuity packaged are welcome. diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 46b9b439..b676af7b 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -1,246 +1,158 @@ # Generic deployment documentation -> ### Getting help +### Please note that this documentation is not fully representative of conduwuit at the moment. Assume majority of it is outdated. + +> ## Getting help > -> If you run into any problems while setting up Continuwuity, ask us in -> `#continuwuity:continuwuity.org` or [open an issue on -> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). +> If you run into any problems while setting up conduwuit, ask us +> in `#conduwuit:puppygock.gay` or [open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new). -## Installing Continuwuity +## Installing conduwuit -### Static prebuilt binary +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. -You may simply download the binary that fits your machine architecture (x86_64 -or aarch64). Run `uname -m` to see what you need. +Prebuilt binaries can be downloaded from the latest successful CI workflow on the main branch here: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=branch%3Amain+actor%3Agirlbossceo+is%3Asuccess+event%3Apush -Prebuilt fully static musl binaries can be downloaded from the latest tagged -release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or -`main` CI branch workflow artifact output. These also include Debian/Ubuntu -packages. - -These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit -hash/revision, and `releases` are tagged releases. Sort by descending last -modified for the latest. - -These binaries have jemalloc and io_uring statically linked and included with -them, so no additional dynamic dependencies need to be installed. - -For the **best** performance; if using an `x86_64` CPU made in the last ~15 years, -we recommend using the `-haswell-` optimised binaries. This sets -`-march=haswell` which is the most compatible and highest performance with -optimised binaries. The database backend, RocksDB, most benefits from this as it -will then use hardware accelerated CRC32 hashing/checksumming which is critical -for performance. - -### Compiling - -Alternatively, you may compile the binary yourself. We recommend using -Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most -guaranteed reproducibiltiy and easiest to get a build environment and output -going. This also allows easy cross-compilation. - -You can run the `nix build -L .#static-x86_64-linux-musl-all-features` or -`nix build -L .#static-aarch64-linux-musl-all-features` commands based -on architecture to cross-compile the necessary static binary located at -`result/bin/conduwuit`. This is reproducible with the static binaries produced -in our CI. - -If wanting to build using standard Rust toolchains, make sure you install: -- `liburing-dev` on the compiling machine, and `liburing` on the target host -- LLVM and libclang for RocksDB - -You can build Continuwuity using `cargo build --release --all-features` - -## Adding a Continuwuity user - -While Continuwuity can run as any user it is better to use dedicated users for -different services. This also allows you to make sure that the file permissions -are correctly set up. - -In Debian, you can use this command to create a Continuwuity user: +Alternatively, you may compile the binary yourself. First, install any dependencies: ```bash -sudo adduser --system continuwuity --group --disabled-login --no-create-home +# Debian +$ sudo apt install libclang-dev build-essential + +# RHEL +$ sudo dnf install clang +``` +Then, `cd` into the source tree of conduit-next and run: +```bash +$ cargo build --release ``` -For distros without `adduser` (or where it's a symlink to `useradd`): +## Adding a Conduit user + +While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows +you to make sure that the file permissions are correctly set up. + +In Debian or RHEL, you can use this command to create a Conduit user: ```bash -sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity +sudo adduser --system conduit --group --disabled-login --no-create-home ``` ## Forwarding ports in the firewall or the router -Matrix's default federation port is port 8448, and clients must be using port 443. -If you would like to use only port 443, or a different port, you will need to setup -delegation. Continuwuity has config options for doing delegation, or you can configure -your reverse proxy to manually serve the necessary JSON files to do delegation -(see the `[global.well_known]` config section). +Conduit uses the ports 443 and 8448 both of which need to be open in the firewall. -If Continuwuity runs behind a router or in a container and has a different public -IP address than the host system these public ports need to be forwarded directly -or indirectly to the port mentioned in the config. - -Note for NAT users; if you have trouble connecting to your server from the inside -of your network, you need to research your router and see if it supports "NAT -hairpinning" or "NAT loopback". - -If your router does not support this feature, you need to research doing local -DNS overrides and force your Matrix DNS records to use your local IP internally. -This can be done at the host level using `/etc/hosts`. If you need this to be -on the network level, consider something like NextDNS or Pi-Hole. +If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. ## Setting up a systemd service -Two example systemd units for Continuwuity can be found -[on the configuration page](../configuration/examples.md#debian-systemd-unit-file). -You may need to change the `ExecStart=` path to where you placed the Continuwuity -binary if it is not `/usr/bin/conduwuit`. +Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your +server reboots. Simply paste the default systemd service you can find below into +`/etc/systemd/system/conduit.service`. -On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros -and OpenSUSE), put `$EscapeControlCharactersOnReceive off` inside -`/etc/rsyslog.conf` to allow color in logs. +```systemd +[Unit] +Description=Conduwuit Matrix Server +After=network.target -If you are using a different `database_path` other than the systemd unit -configured default `/var/lib/conduwuit`, you need to add your path to the -systemd unit's `ReadWritePaths=`. This can be done by either directly editing -`conduwuit.service` and reloading systemd, or running `systemctl edit conduwuit.service` -and entering the following: - -``` [Service] -ReadWritePaths=/path/to/custom/database/path +Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" +User=conduit +Group=conduit +RuntimeDirectory=conduit +RuntimeDirectoryMode=0750 +Restart=always +ExecStart=/usr/local/bin/matrix-conduit + +[Install] +WantedBy=multi-user.target ``` -## Creating the Continuwuity configuration file +Finally, run -Now we need to create the Continuwuity's config file in -`/etc/conduwuit/conduwuit.toml`. The example config can be found at -[conduwuit-example.toml](../configuration/examples.md). +```bash +$ sudo systemctl daemon-reload +``` -**Please take a moment to read the config. You need to change at least the -server name.** +## Creating the Conduit configuration file -RocksDB is the only supported database backend. +Now we need to create the Conduit's config file in `/etc/conduwuit/conduwuit.toml`. Paste this in **and take a moment +to read it. You need to change at least the server name.** +RocksDB (`rocksdb`) is the only supported database backend. SQLite only exists for historical reasons and is not recommended. Any performance issues, storage issues, database issues, etc will not be assisted if using SQLite and you will be asked to migrate to RocksDB first. + +See the following example config at [conduwuit-example.toml](../configuration.md) ## Setting the correct file permissions -If you are using a dedicated user for Continuwuity, you will need to allow it to -read the config. To do that you can run this: +As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on +Debian or RHEL: ```bash -sudo chown -R root:root /etc/conduwuit -sudo chmod -R 755 /etc/conduwuit +sudo chown -R root:root /etc/matrix-conduit +sudo chmod 755 /etc/matrix-conduit ``` If you use the default database path you also need to run this: ```bash -sudo mkdir -p /var/lib/conduwuit/ -sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/ -sudo chmod 700 /var/lib/conduwuit/ +sudo mkdir -p /var/lib/matrix-conduit/ +sudo chown -R conduit:conduit /var/lib/matrix-conduit/ +sudo chmod 700 /var/lib/matrix-conduit/ ``` ## Setting up the Reverse Proxy -We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults. -For other software, please refer to their respective documentation or online guides. +Refer to the documentation or various guides online of your chosen reverse proxy software. A Caddy example will be provided as this is the recommended reverse proxy for new users and is very trivial. ### Caddy -After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile` -and enter this (substitute for your server name). +Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for your server name). -```caddyfile +```caddy your.server.name, your.server.name:8448 { - # TCP reverse_proxy - reverse_proxy 127.0.0.1:6167 - # UNIX socket - #reverse_proxy unix//run/conduwuit/conduwuit.sock + # TCP + reverse_proxy 127.0.0.1:6167 + + # UNIX socket + #reverse_proxy unix//run/conduit/conduit.sock } ``` -That's it! Just start and enable the service and you're set. +That's it! Just start or enable the service and you're set. ```bash -sudo systemctl enable --now caddy +$ sudo systemctl enable caddy ``` -### Other Reverse Proxies +## You're done! -As we would prefer our users to use Caddy, we will not provide configuration files for other proxys. - -You will need to reverse proxy everything under following routes: -- `/_matrix/` - core Matrix C-S and S-S APIs -- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and -`/server_version` - -You can optionally reverse proxy the following individual routes: -- `/.well-known/matrix/client` and `/.well-known/matrix/server` if using -Continuwuity to perform delegation (see the `[global.well_known]` config section) -- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin -contact and support page (formerly known as MSC1929) -- `/` if you would like to see `hewwo from conduwuit woof!` at the root - -See the following spec pages for more details on these files: -- [`/.well-known/matrix/server`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixserver) -- [`/.well-known/matrix/client`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient) -- [`/.well-known/matrix/support`](https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixsupport) - -Examples of delegation: -- -- - -For Apache and Nginx there are many examples available online. - -Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization -header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. - -If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). - -If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so: -- `proxy_pass http://127.0.0.1:6167$request_uri;` -- `proxy_pass http://127.0.0.1:6167;` - -Nginx users need to increase `client_max_body_size` (default is 1M) to match -`max_request_size` defined in conduwuit.toml. - -## You're done - -Now you can start Continuwuity with: +Now you can start Conduit with: ```bash -sudo systemctl start conduwuit +$ sudo systemctl start conduit ``` Set it to start automatically when your system boots with: ```bash -sudo systemctl enable conduwuit +$ sudo systemctl enable conduit ``` ## How do I know it works? -You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your -homeserver and try to register. +You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. -You can also use these commands as a quick health check (replace -`your.server.name`). +You can also use these commands as a quick health check. ```bash -curl https://your.server.name/_conduwuit/server_version +$ curl https://your.server.name/_conduwuit/server_version # If using port 8448 -curl https://your.server.name:8448/_conduwuit/server_version - -# If federation is enabled -curl https://your.server.name:8448/_matrix/federation/v1/version +$ curl https://your.server.name:8448/_conduwuit/server_version ``` -- To check if your server can talk with other homeservers, you can use the -[Matrix Federation Tester](https://federationtester.matrix.org/). If you can -register but cannot join federated rooms check your config again and also check -if the port 8448 is open and forwarded correctly. +- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/). + If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly. # What's next? @@ -250,5 +162,4 @@ For Audio/Video call functionality see the [TURN Guide](../turn.md). ## Appservices -If you want to set up an appservice, take a look at the [Appservice -Guide](../appservices.md). +If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md). diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md deleted file mode 100644 index 0cbfbbc0..00000000 --- a/docs/deploying/kubernetes.md +++ /dev/null @@ -1,9 +0,0 @@ -# Continuwuity for Kubernetes - -Continuwuity doesn't support horizontal scalability or distributed loading -natively, however a community maintained Helm Chart is available here to run -conduwuit on Kubernetes: - -This should be compatible with continuwuity, but you will need to change the image reference. - -Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers. diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index cf2c09e4..2f73cf03 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -1,75 +1,30 @@ -# Continuwuity for NixOS +# Conduwuit for NixOS -Continuwuity can be acquired by Nix (or [Lix][lix]) from various places: +Conduwuit can be acquired by Nix from various places: * The `flake.nix` at the root of the repo * The `default.nix` at the root of the repo -* From Continuwuity's binary cache +* From Conduwuit's binary cache -### NixOS module +A binary cache for conduwuit that the CI/CD publishes to is available at the +following places (both are the same just different names): +``` +https://attic.kennel.juneis.dog/conduit +conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= -The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions -welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure -Continuwuity. +https://attic.kennel.juneis.dog/conduwuit +conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= +``` -### Conduit NixOS Config Module and SQLite +If specifying a URL in your flake, please use the GitHub remote: `github:girlbossceo/conduwuit` -Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend. -Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB. -Make sure that you are using the RocksDB backend before migrating! +The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so +(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to +configure Conduit. -There is a [tool to migrate a Conduit SQLite database to -RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/). - -If you want to run the latest code, you should get Continuwuity from the `flake.nix` +If you want to run the latest code, you should get Conduwuit from the `flake.nix` or `default.nix` and set [`services.matrix-conduit.package`][package] -appropriately to use Continuwuity instead of Conduit. +appropriately. -### UNIX sockets - -Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module -a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX -socket option does not exist in Conduit, and the module forcibly sets the `address` and -`port` config options. - -```nix -options.services.matrix-conduit.settings = lib.mkOption { - apply = old: old // ( - if (old.global ? "unix_socket_path") - then { global = builtins.removeAttrs old.global [ "address" "port" ]; } - else { } - ); -}; - -``` - -Additionally, the [`matrix-conduit` systemd unit][systemd-unit] in the module does not allow -the `AF_UNIX` socket address family in their systemd unit's `RestrictAddressFamilies=` which -disallows the namespace from accessing or creating UNIX sockets and has to be enabled like so: - -```nix -systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ]; -``` - -Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and -published by the community, would be appreciated. - -### jemalloc and hardened profile - -Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] -due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or -disable jemalloc like so: - -```nix -let - conduwuit = pkgs.unstable.conduwuit.override { - enableJemalloc = false; - }; -in -``` - -[lix]: https://lix.systems/ [module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit [package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package -[hardened.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/profiles/hardened.nix#L22 -[systemd-unit]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/conduit.nix#L132 diff --git a/docs/development.md b/docs/development.md deleted file mode 100644 index 1e344f41..00000000 --- a/docs/development.md +++ /dev/null @@ -1,131 +0,0 @@ -# Development - -Information about developing the project. If you are only interested in using -it, you can safely ignore this page. If you plan on contributing, see the -[contributor's guide](./contributing.md). - -## Continuwuity project layout - -Continuwuity uses a collection of sub-crates, packages, or workspace members -that indicate what each general area of code is for. All of the workspace -members are under `src/`. The workspace definition is at the top level / root -`Cargo.toml`. - -The crate names are generally self-explanatory: -- `admin` is the admin room -- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc -- `core` is core Continuwuity functionality like config loading, error definitions, -global utilities, logging infrastructure, etc -- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions, -utilities, or functions -- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging -and error handling macros, and [syn][syn] and [procedural macros][proc-macro] -used for admin room commands and others -- `main` is the "primary" sub-crate. This is where the `main()` function lives, -tokio worker and async initialisation, Sentry initialisation, [clap][clap] init, -and signal handling. If you are adding new [Rust features][features], they *must* -go here. -- `router` is the webserver and request handling bits, using axum, tower, tower-http, -hyper, etc, and the [global server state][state] to access `services`. -- `service` is the high-level database definitions and functions for data, -outbound/sending code, and other business logic such as media fetching. - -It is highly unlikely you will ever need to add a new workspace member, but -if you truly find yourself needing to, we recommend reaching out to us in -the Matrix room for discussions about it beforehand. - -The primary inspiration for this design was apart of hot reloadable development, -to support "Continuwuity as a library" where specific parts can simply be swapped out. -There is evidence Conduit wanted to go this route too as `axum` is technically an -optional feature in Conduit, and can be compiled without the binary or axum library -for handling inbound web requests; but it was never completed or worked. - -See the Rust documentation on [Workspaces][workspaces] for general questions -and information on Cargo workspaces. - -## Adding compile-time [features][features] - -If you'd like to add a compile-time feature, you must first define it in -the `main` workspace crate located in `src/main/Cargo.toml`. The feature must -enable a feature in the other workspace crate(s) you intend to use it in. Then -the said workspace crate(s) must define the feature there in its `Cargo.toml`. - -So, if this is adding a feature to the API such as `woof`, you define the feature -in the `api` crate's `Cargo.toml` as `woof = []`. The feature definition in `main`'s -`Cargo.toml` will be `woof = ["conduwuit-api/woof"]`. - -The rationale for this is due to Rust / Cargo not supporting -["workspace level features"][9], we must make a choice of; either scattering -features all over the workspace crates, making it difficult for anyone to add -or remove default features; or define all the features in one central workspace -crate that propagate down/up to the other workspace crates. It is a Cargo pitfall, -and we'd like to see better developer UX in Rust's Workspaces. - -Additionally, the definition of one single place makes "feature collection" in our -Nix flake a million times easier instead of collecting and deduping them all from -searching in all the workspace crates' `Cargo.toml`s. Though we wouldn't need to -do this if Rust supported workspace-level features to begin with. - -## List of forked dependencies - -During Continuwuity development, we have had to fork -some dependencies to support our use-cases in some areas. This ranges from -things said upstream project won't accept for any reason, faster-paced -development (unresponsive or slow upstream), Continuwuity-specific usecases, or -lack of time to upstream some things. - -- [ruma/ruma][1]: - various performance -improvements, more features, faster-paced development, better client/server interop -hacks upstream won't accept, etc -- [facebook/rocksdb][2]: - liburing -build fixes and GCC debug build fix -- [tikv/jemallocator][3]: - musl -builds seem to be broken on upstream, fixes some broken/suspicious code in -places, additional safety measures, and support redzones for Valgrind -- [zyansheep/rustyline-async][4]: - - tab completion callback and -`CTRL+\` signal quit event for Continuwuity console CLI -- [rust-rocksdb/rust-rocksdb][5]: - - [`@zaidoon1`][8]'s fork -has quicker updates, more up to date dependencies, etc. Our fork fixes musl build -issues, removes unnecessary `gtest` include, and uses our RocksDB and jemallocator -forks. -- [tokio-rs/tracing][6]: - Implements -`Clone` for `EnvFilter` to support dynamically changing tracing envfilter's -alongside other logging/metrics things - -## Debugging with `tokio-console` - -[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a -`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature, -disable the default `release_max_log_level` feature, and set the `--cfg -tokio_unstable` flag to enable experimental tokio APIs. A build might look like -this: - -```bash -RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \ - --release \ - --no-default-features \ - --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console -``` - -You will also need to enable the `tokio_console` config option in Continuwuity when -starting it. This was due to tokio-console causing gradual memory leak/usage -if left enabled. - -[1]: https://github.com/ruma/ruma/ -[2]: https://github.com/facebook/rocksdb/ -[3]: https://github.com/tikv/jemallocator/ -[4]: https://github.com/zyansheep/rustyline-async/ -[5]: https://github.com/rust-rocksdb/rust-rocksdb/ -[6]: https://github.com/tokio-rs/tracing/ -[7]: https://docs.rs/tokio-console/latest/tokio_console/ -[8]: https://github.com/zaidoon1/ -[9]: https://github.com/rust-lang/cargo/issues/12162 -[workspaces]: https://doc.rust-lang.org/cargo/reference/workspaces.html -[macros]: https://doc.rust-lang.org/book/ch19-06-macros.html -[syn]: https://docs.rs/syn/latest/syn/ -[proc-macro]: https://doc.rust-lang.org/reference/procedural-macros.html -[clap]: https://docs.rs/clap/latest/clap/ -[features]: https://doc.rust-lang.org/cargo/reference/features.html -[state]: https://docs.rs/axum/latest/axum/extract/struct.State.html diff --git a/docs/development/assets/libraries.png b/docs/development/assets/libraries.png deleted file mode 100644 index e8edf878..00000000 Binary files a/docs/development/assets/libraries.png and /dev/null differ diff --git a/docs/development/assets/reload_order.png b/docs/development/assets/reload_order.png deleted file mode 100644 index 18eaa3e4..00000000 Binary files a/docs/development/assets/reload_order.png and /dev/null differ diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md deleted file mode 100644 index ecfb6396..00000000 --- a/docs/development/hot_reload.md +++ /dev/null @@ -1,200 +0,0 @@ -# Hot Reloading ("Live" Development) - -Note that hot reloading has not been refactored in quite a while and is not -guaranteed to work at this time. - -### Summary - -When developing in debug-builds with the nightly toolchain, Continuwuity is modular -using dynamic libraries and various parts of the application are hot-reloadable -while the server is running: http api handlers, admin commands, services, -database, etc. These are all split up into individual workspace crates as seen -in the `src/` directory. Changes to sourcecode in a crate rebuild that crate and -subsequent crates depending on it. Reloading then occurs for the changed crates. - -Release builds still produce static binaries which are unaffected. Rust's -soundness guarantees are in full force. Thus you cannot hot-reload release -binaries. - -### Requirements - -Currently, this development setup only works on x86_64 and aarch64 Linux glibc. -[musl explicitly does not support hot reloadable libraries, and does not -implement `dlclose`][2]. macOS does not fully support our usage of `RTLD_GLOBAL` -possibly due to some thread-local issues. [This Rust issue][3] may be of -relevance, specifically [this comment][4]. It may be possible to get it working -on only very modern macOS versions such as at least Sonoma, as currently loading -dylibs is supported, but not unloading them in our setup, and the cited comment -mentions an Apple WWDC confirming there have been TLS changes to somewhat make -this possible. - -As mentioned above this requires the nightly toolchain. This is due to reliance -on various Cargo.toml features that are only available on nightly, most -specifically `RUSTFLAGS` in Cargo.toml. Some of the implementation could also be -simpler based on other various nightly features. We hope lots of nightly -features start making it out of nightly sooner as there have been dozens of very -helpful features that have been stuck in nightly ("unstable") for at least 5+ -years that would make this simpler. We encourage greater community consensus to -move these features into stability. - -This currently only works on x86_64/aarch64 Linux with a glibc C library. musl C -library, macOS, and likely other host architectures are not supported (if other -architectures work, feel free to let us know and/or make a PR updating this). -This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you -happen to have linker issues it's recommended to try using `mold` or `gold` -linkers, and please let us know in the [Continuwuity Matrix room][7] the linker -error and what linker solved this issue so we can figure out a solution. Ideally -there should be minimal friction to using this, and in the future a build script -(`build.rs`) may be suitable to making this easier to use if the capabilities -allow us. - -### Usage - -As of 19 May 2024, the instructions for using this are: - -0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to - receive help using this. As indicated by the various rustflags used and some -of the interesting issues linked at the bottom, this is definitely not something -the Rust ecosystem or toolchain is used to doing. - -1. Install the nightly toolchain using rustup. You may need to use `rustup - override set nightly` in your local Continuwuity directory, or use `cargo -+nightly` for all actions. - -2. Uncomment `cargo-features` at the top level / root Cargo.toml - -3. Scroll down to the `# Developer profile` section and uncomment ALL the - rustflags for each dev profile and their respective packages. - -4. In each workspace crate's Cargo.toml (everything under `src/*` AND - `deps/rust-rocksdb/Cargo.toml`), uncomment the `dylib` crate type under -`[lib]`. - -5. Due to [this rpath issue][5], you must export the `LD_LIBRARY_PATH` - environment variable to your nightly Rust toolchain library directory. If -using rustup (hopefully), use this: `export -LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/` - -6. Start the server. You can use `cargo +nightly run` for this along with the - standard. - -7. Make some changes where you need to. - -8. In a separate terminal window in the same directory (or using a terminal - multiplexer like tmux), run the *build* Cargo command `cargo +nightly build`. - Cargo should only rebuild what was changed / what's necessary, so it should - not be rebuilding all the crates. - -9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell - Continuwuity to find which libraries need to be reloaded, and reloads them as - necessary. - -10. If there were no errors, it will tell you it successfully reloaded `#` - modules, and your changes should now be visible. Repeat 7 - 9 as needed. - -To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still -shutdown with `CTRL+C` as usual. - -Steps 1 - 5 are the initial first-time steps for using this. To remove the hot -reload setup, revert/comment all the Cargo.toml changes. - -As mentioned in the requirements section, if you happen to have some linker -issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the -`rustflags` definitions in the top level Cargo.toml, and please let us know in -the [Continuwuity Matrix room][7] the problem. mold can be installed typically -through your distro, and gold is provided by the binutils package. - -It's possible a helper script can be made to do all of this, or most preferably -a specially made build script (build.rs). `cargo watch` support will be -implemented soon which will eliminate the need to manually run `cargo build` all -together. - -### Addendum - -Conduit was inherited as a single crate without modularity or reloading in its -design. Reasonable partitioning and abstraction allowed a split into several -crates, though many circular dependencies had to be corrected. The resulting -crates now form a directed graph as depicted in figures below. The interfacing -between these crates is still extremely broad which is not mitigable. - -Initially [hot_lib_reload][6] was investigated but found appropriate for a -project designed with modularity through limited interfaces, not a large and -complex existing codebase. Instead a bespoke solution built directly on -[libloading][8] satisfied our constraints. This required relatively minimal -modifications and zero maintenance burden compared to what would be required -otherwise. The technical difference lies with relocation processing: we leverage -global bindings (`RTLD_GLOBAL`) in a very intentional way. Most libraries and -off-the-shelf module systems (such as [hot_lib_reload][6]) restrict themselves -to local bindings (`RTLD_LOCAL`). This allows them to release software to -multiple platforms with much greater consistency, but at the cost of burdening -applications to explicitly manage these bindings. In our case with an optional -feature for developers, we shrug any such requirement to enjoy the cost/benefit -on platforms where global relocations are properly cooperative. - -To make use of `RTLD_GLOBAL` the application has to be oriented as a directed -acyclic graph. The primary rule is simple and illustrated in the figure below: -**no crate is allowed to call a function or use a variable from a crate below -it.** - -![Continuwuity's dynamic library setup diagram - created by Jason -Volk](assets/libraries.png) - -When a symbol is referenced between crates they become bound: **crates cannot be -unloaded until their calling crates are first unloaded.** Thus we start the -reloading process from the crate which has no callers. There is a small problem -though: the first crate is called by the base executable itself! This is solved -by using an `RTLD_LOCAL` binding for just one link between the main executable -and the first crate, freeing the executable from all modules as no global -binding ever occurs between them. - -![Continuwuity's reload and load order diagram - created by Jason -Volk](assets/reload_order.png) - -Proper resource management is essential for reliable reloading to occur. This is -a very basic ask in RAII-idiomatic Rust and the exposure to reloading hazards is -remarkably low, generally stemming from poor patterns and practices. -Unfortunately static analysis doesn't enforce reload-safety programmatically -(though it could one day), for now hazards can be avoided by knowing a few basic -do's and dont's: - -1. Understand that code is memory. Just like one is forbidden from referencing - free'd memory, one must not transfer control to free'd code. Exposure to this -is primarily from two things: - - - Callbacks, which this project makes very little use of. - - Async tasks, which are addressed below. - -2. Tie all resources to a scope or object lifetime with greatest possible -symmetry (locality). For our purposes this applies to code resources, which -means async blocks and tokio tasks. - - - **Never spawn a task without receiving and storing its JoinHandle**. - - **Always wait on join handles** before leaving a scope or in another cleanup - function called by an owning scope. - -3. Know any minor specific quirks documented in code or here: - - - Don't use `tokio::spawn`, instead use our `Handle` in `core/server.rs`, which - is reachable in most of the codebase via `services()` or other state. This is - due to some bugs or assumptions made in tokio, as it happens in `unsafe {}` - blocks, which are mitigated by circumventing some thread-local variables. Using - runtime handles is good practice in any case. - -The initial implementation PR is available [here][1]. - -### Interesting related issues/bugs - -- [DT_RUNPATH produced in binary with rpath = true is wrong (cargo)][5] -- [Disabling MIR Optimization in Rust Compilation -(cargo)](https://internals.rust-lang.org/t/disabling-mir-optimization-in-rust-compilation/19066/5) -- [Workspace-level metadata -(cargo-deb)](https://github.com/kornelski/cargo-deb/issues/68) - -[1]: https://github.com/girlbossceo/conduwuit/pull/387 -[2]: https://wiki.musl-libc.org/functional-differences-from-glibc.html#Unloading-libraries -[3]: https://github.com/rust-lang/rust/issues/28794 -[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049 -[5]: https://github.com/rust-lang/cargo/issues/12746 -[6]: https://crates.io/crates/hot-lib-reloader/ -[7]: https://matrix.to/#/#continuwuity:continuwuity.org -[8]: https://crates.io/crates/libloading diff --git a/docs/development/testing.md b/docs/development/testing.md deleted file mode 100644 index a577698a..00000000 --- a/docs/development/testing.md +++ /dev/null @@ -1,31 +0,0 @@ -# Testing - -## Complement - -Have a look at [Complement's repository][complement] for an explanation of what -it is. - -To test against Complement, with Nix (or [Lix](https://lix.systems) and -[direnv installed and set up][direnv] (run `direnv allow` after setting up the hook), you can: - -* Run `./bin/complement "$COMPLEMENT_SRC"` to build a Complement image, run -the tests, and output the logs and results to the specified paths. This will also output the OCI image -at `result` -* Run `nix build .#complement` from the root of the repository to just build a -Complement OCI image outputted to `result` (it's a `.tar.gz` file) -* Or download the latest Complement OCI image from the CI workflow artifacts -output from the commit/revision you want to test (e.g. from main) -[here][ci-workflows] - -If you want to use your own prebuilt OCI image (such as from our CI) without needing -Nix installed, put the image at `complement_oci_image.tar.gz` in the root of the repo -and run the script. - -If you're on macOS and need to build an image, run `nix build .#linux-complement`. - -We have a Complement fork as some tests have needed to be fixed. This can be found -at: - -[ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo -[complement]: https://github.com/matrix-org/complement -[direnv]: https://direnv.net/docs/hook.html diff --git a/docs/differences.md b/docs/differences.md new file mode 100644 index 00000000..ac1a6f98 --- /dev/null +++ b/docs/differences.md @@ -0,0 +1,109 @@ +#### **Note: This list is not up to date. There are rapidly more and more improvements, fixes, changes, etc being made that it is becoming more difficult to maintain this list. I recommend that you give Conduwuit a try and see the differences for yourself. If you have any concerns, feel free to join the Conduwuit Matrix room and ask any pre-usage questions.** + +### list of features, bug fixes, etc that conduwuit does that upstream does not: + +- GitLab CI ported to GitHub Actions +- Fixed every single clippy (default lints) and rustc warnings, including some that were performance related or potential safety issues / unsoundness +- Add a **lot** of other clippy and rustc lints and a rustfmt.toml file +- Has Renovate and significantly updates all dependencies possible +- Uses proper argon2 crate instead of questionable rust-argon2 crate +- Improved and cleaned up logging (less noisy dead server logging, registration attempts, more useful troubleshooting logging, etc) +- Attempts and interest in removing extreme and unnecessary panics/unwraps/expects that can lead to denial of service or such (upstream and upstream contributors want this unusual behaviour for some reason) +- Merged and cleaned up upstream MRs that have been sitting for 6-12 months +- Configurable RocksDB logging (`LOG` files) with proper defaults (rotate, max size, verbosity, etc) to stop LOG files from accumulating so much +- Concurrency support for key fetching for faster remote room joins and room joins that will error less frequently (via upstream MR) +- Room version 11 support (via upstream MR) +- Explicit startup error/warning if your configuration allows open registration without a token or such like Synapse +- Improved RocksDB defaults to use new features that help with performance significantly, uses settings tailored to SSDs, various ways to tweak RocksDB, and a conduwuit setting to tell RocksDB to use settings that are tailored to HDDs or slow spinning rust storage. +- Revamped admin room infrastructure and commands (via upstream MR) +- Admin room commands to delete room aliases and unpublish rooms from our room directory (via upstream MR) +- Make spaces/hierarchy cache use cache_capacity_modifier instead of hardcoded small value +- Add *optional* feature flag to use SHA256 key names for media instead of base64 to overcome filesystem file name length limitations (OS error file name too long) (via upstream MR) +- Add feature flags and config options to enable/build with zstd, brotli, and/or gzip HTTP body compression (response and request) +- Add support for querying both Matrix SRV records, the deprecated `_matrix` record and `_matrix-fed` record if necessary +- Add config option for device name federation with a privacy-friendly default (disabled) +- Add config option for requiring authentication to the `/publicRooms` endpoint (room directory) with a default enabled for privacy +- Add config option for federating `/publicRooms` endpoint (room directory) to other servers with a default disabled for privacy +- Add support for listening on a UNIX socket for performance and host security with proper default permissions (660) +- Add missing `destination` key to all `X-Matrix` `Authorization` requests (spec compliance issue) +- Use aggressive build-time performance optimisations for release builds (1 codegen unit, no debug, fat LTO, etc, and optimise all crates with same) +- Raise various hardcoded timeouts in codebase that were way too short, making some things like room joins and client bugs error less or none at all than they should +- Add debug admin command to force update user device lists (could potentially resolve some E2EE flukes) (`ForceDeviceListUpdates`) +- Declare various missing Matrix versions and features at `/_matrix/client/versions` +- Add support for serving server and client well-known files from conduwuit using `well_known_client` and `well_known_server` options +- Send a User-Agent on all of our requests (`conduwuit/0.7.0-alpha+conduwuit-0.1.1`) which strangely was not done upstream since forever. Some providers consider no User-Agent suspicious and block said requests. +- Safer and cleaner shutdowns on both database side as we run cleanup on shutdown and exits database loop better (no potential hanging issues in database loop), overall cleaner shutdown logic +- Allow HEAD HTTP requests in CORS for clients (despite not being explicity mentioned in Matrix spec, HTTP spec says all HEAD requests need to behave the same as GET requests, Synapse supports HEAD requests) +- Purge unmaintained/irrelevant/broken database backends (heed, sled, persy) +- webp support for images +- Support for suggesting servers to join at `/_matrix/client/v3/directory/room/{roomAlias}` +- Prevent admin credential commands like reset password and deactivate user from modifying non-local users (https://gitlab.com/famedly/conduit/-/issues/377) +- Fixed spec compliance issue with room version 8 - 11 joins (https://github.com/matrix-org/synapse/issues/16717 / https://github.com/matrix-org/matrix-spec/issues/1708) +- Add basic cache eviction for true destinations when requests fail if we use a cached destination (e.g. a server has modified their well-known and we're still connecting to the old destination) +- Generate passwords with 25 characters instead of 15 +- Add missing `reason` field to user ban events (`/ban`) +- For all [`/report`](https://spec.matrix.org/v1.9/client-server-api/#post_matrixclientv3roomsroomidreporteventid) requests: check if the reported event ID belongs to the reported room ID, raise report reasoning character limit to 750, fix broken formatting, make a small delayed random response per spec suggestion on privacy, and check if the sender user is in the reported room. +- Support blocking servers from downloading remote media from +- Support sending `well_known` response to client logins if using config option `well_known_client` +- Send `avatar_url` on invite room membership events/changes +- Revamp example config, adding a lot of config options available (still some missing) +- Return joined member count of rooms for push rules/conditions instead of a hardcoded value of 10 +- Respect *most* client parameters for `/media/` requests (`allow_redirect` still needs work) +- Config option `ip_range_denylist` to support refusing to send requests (typically federation) to specific IP ranges, typically RFC 1918, non-routable, testnet, etc addresses like Synapse for security (note: this is not a guaranteed protection, and you should be using a firewall with zones if you want guaranteed protection as doing this on the application level is prone to bypasses). +- Support for creating rooms with custom room IDs like Maunium Synapse (`room_id` request body field to `/createRoom`) +- Assume well-knowns are broken if they exceed past 10000 characters. +- Basic validation/checks on user-specified room aliases and custom room ID creations +- Warn on unknown config options specified +- URL preview support (via upstream MR) with various improvements +- Increased graceful shutdown timeout from a low 60 seconds to 180 seconds to avoid killing connections and let the remaining ones finish processing +- Query parameter `?format=event|content` for returning either the room state event's content (default) for the full room state event on ` /_matrix/client/v3/rooms/{roomId}/state/{eventType}[/{stateKey}]` requests (see https://github.com/matrix-org/matrix-spec/issues/1047) +- Add admin commands for banning (blocking) room IDs from our local users joining (admins are always allowed) and evicts all our local users from that room, in addition to bulk room banning support, and blocks room invites (remote and local) to the banned room, as a moderation feature +- Add admin command to delete media via a specific MXC. This deletes the MXC from our database, and the file locally. +- Replace the lightning bolt emoji option with support for setting any arbitrary text (e.g. another emoji) to suffix to all new user registrations +- Add admin command to bulk delete media via a codeblock list of MXC URLs. +- Add admin command to delete both the thumbnail and media MXC URLs from an event ID (e.g. from an abuse report) +- Add `!admin` as a way to call the Conduit admin bot +- Add support for listening on multiple TCP ports +- Add admin command to list all the rooms a local user is joined in +- Add admin command to delete all remote media in the past X minutes as a form of deleting media that you don't want on your server that a remote user posted in a room +- Config option to block non-admin users from sending room invites or receiving remote room invites. Admin users are still allowed. +- Startup check if conduwuit running in a container and is listening on 127.0.0.1 +- Make `CONDUIT_CONFIG` optional, relevant for container users that configure only by environment variables and no longer need to set `CONDUIT_CONFIG` to an empty string. +- Config option to change Conduit's behaviour of homeserver key fetching (`query_trusted_key_servers_first`). This option sets whether conduwuit will query trusted notary key servers first before the individual homeserver(s), or vice versa. +- Implement database flush and cleanup Conduit operations when using RocksDB +- Implement legacy Matrix `/v1/` media endpoints that some clients and servers may still call +- Commandline argument to specify the path to a config file +- Admin debug command to fetch a PDU from a remote server and inserts it into our database/timeline +- Update rusqlite/sqlite (not that you should be using it) +- Disable update check by default as it's not useful for conduwuit +- Config option to disable incoming remote read receipts if desired +- Extend clear cache admin command to support clearing DNS and TLS name override caches +- Responsive outgoing read receipt EDU support +- Eliminate all usage of the thread-blocking `getaddrinfo(3)` call upon DNS queries, significantly improving federation latency/ping and cache DNS results using hickory-dns / hickory-resolver +- Store the sender user with the MXC URL for all media uploads (`/upload`) (not for thumbnails or media requests which are unauthenticated) +- Perform connection pooling and keepalives where necessary to significantly improve federation performance and latency +- Implement RocksDB online backups via admin command +- Implement RocksDB write buffer corking and coalescing in database write-heavy areas +- Various config options to tweak connection pooling, request timeouts, connection timeouts, DNS timeouts and settings, etc with good defaults +- Implement config option to auto join rooms upon registration +- Overall significant database, Client-Server, and federation performance and latency improvements +- Outgoing read receipt and private read receipt support (EDU) +- Outgoing typing indicator support (EDU) +- Outgoing and local presence support (EDU) +- **Opt-in** Sentry.io telemetry and metrics, mainly used for crash reporting +- Add `/_conduwuit/server_version` route to return the version of Conduwuit without relying on the federation API `/_matrix/federation/v1/version` +- Add configurable RocksDB recovery modes to aid in recovering corrupte RocksDB database +- Config option to forbid publishing rooms to the room directory (`lockdown_public_room_directory`) except for admins +- Don't allow `m.call.invite` events to be sent in public rooms (prevents calling the entire room) +- On new public room creations, only allow moderators to send `m.call.invite`, `org.matrix.msc3401.call`, and `org.matrix.msc3401.call.member` events +- Stop sending `make_join` requests on room joins if 15 servers respond with `M_UNSUPPORTED_ROOM_VERSION` or `M_INVALID_ROOM_VERSION` +- Stop sending `make_join` requests if 50 servers cannot provide `make_join` for us +- Admin debug command to send a federation request/ping to a server's `/_matrix/federation/v1/version` endpoint and measures the latency it took +- Implement building Conduwuit with jemalloc or hardened_malloc light variant, and produce CI builds with jemalloc or hardened_malloc, for performance and/or security +- Significant RocksDB tuning and improvements tailored to maximising Conduwuit performance with RocksDB +- Implement unstable MSC2666 support for querying mutual rooms with a user +- Add admin command to fetch a server's `/.well-known/matrix/support` file +- Send `Cache-Control` response header with immutable and 1 year cache length for all media requests to instruct clients to cache media, and reduce server load from media requests that could be otherwise cached +- Forbid the admin room from being made public +- Fix admin room handler to not panic/crash if the admin room command response fails (e.g. too large message) +- Implement `include_state` search criteria support for `/search` requests (response now can include room states) diff --git a/docs/introduction.md b/docs/introduction.md index d193f7c7..7b879aec 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -1,18 +1,17 @@ -# Continuwuity +# Conduwuit {{#include ../README.md:catchphrase}} {{#include ../README.md:body}} +#### What's different about your fork than upstream Conduit? + +See [differences.md](differences.md) + #### How can I deploy my own? - [Deployment options](deploying.md) -If you want to connect an appservice to Continuwuity, take a look at the -[appservices documentation](appservices.md). - -#### How can I contribute? - -See the [contributor's guide](contributing.md) +If you want to connect an Appservice to Conduwuit, take a look at the [appservices documentation](appservices.md). {{#include ../README.md:footer}} diff --git a/docs/maintenance.md b/docs/maintenance.md deleted file mode 100644 index b85a1971..00000000 --- a/docs/maintenance.md +++ /dev/null @@ -1,135 +0,0 @@ -# Maintaining your Continuwuity setup - -## Moderation - -Continuwuity has moderation through admin room commands. "binary commands" (medium -priority) and an admin API (low priority) is planned. Some moderation-related -config options are available in the example config such as "global ACLs" and -blocking media requests to certain servers. See the example config for the -moderation config options under the "Moderation / Privacy / Security" section. - -Continuwuity has moderation admin commands for: - -- managing room aliases (`!admin rooms alias`) -- managing room directory (`!admin rooms directory`) -- managing room banning/blocking and user removal (`!admin rooms moderation`) -- managing user accounts (`!admin users`) -- fetching `/.well-known/matrix/support` from servers (`!admin federation`) -- blocking incoming federation for certain rooms (not the same as room banning) -(`!admin federation`) -- deleting media (see [the media section](#media)) - -Any commands with `-list` in them will require a codeblock in the message with -each object being newline delimited. An example of doing this is: - -```` -!admin rooms moderation ban-list-of-rooms -``` -!roomid1:server.name -#badroomalias1:server.name -!roomid2:server.name -!roomid3:server.name -#badroomalias2:server.name -``` -```` - -## Database (RocksDB) - -Generally there is very little you need to do. [Compaction][rocksdb-compaction] -is ran automatically based on various defined thresholds tuned for Continuwuity to -be high performance with the least I/O amplifcation or overhead. Manually -running compaction is not recommended, or compaction via a timer, due to -creating unnecessary I/O amplification. RocksDB is built with io_uring support -via liburing for improved read performance. - -RocksDB troubleshooting can be found [in the RocksDB section of troubleshooting](troubleshooting.md). - -### Compression - -Some RocksDB settings can be adjusted such as the compression method chosen. See -the RocksDB section in the [example config](configuration/examples.md). - -btrfs users have reported that database compression does not need to be disabled -on Continuwuity as the filesystem already does not attempt to compress. This can be -validated by using `filefrag -v` on a `.SST` file in your database, and ensure -the `physical_offset` matches (no filesystem compression). It is very important -to ensure no additional filesystem compression takes place as this can render -unbuffered Direct IO inoperable, significantly slowing down read and write -performance. See - -> Compression is done using the COW mechanism so it’s incompatible with -> nodatacow. Direct IO read works on compressed files but will fall back to -> buffered writes and leads to no compression even if force compression is set. -> Currently nodatasum and compression don’t work together. - -### Files in database - -Do not touch any of the files in the database directory. This must be said due -to users being mislead by the `.log` files in the RocksDB directory, thinking -they're server logs or database logs, however they are critical RocksDB files -related to WAL tracking. - -The only safe files that can be deleted are the `LOG` files (all caps). These -are the real RocksDB telemetry/log files, however Continuwuity has already -configured to only store up to 3 RocksDB `LOG` files due to generall being -useless for average users unless troubleshooting something low-level. If you -would like to store nearly none at all, see the `rocksdb_max_log_files` -config option. - -## Backups - -Currently only RocksDB supports online backups. If you'd like to backup your -database online without any downtime, see the `!admin server` command for the -backup commands and the `database_backup_path` config options in the example -config. Please note that the format of the database backup is not the exact -same. This is unfortunately a bad design choice by Facebook as we are using the -database backup engine API from RocksDB, however the data is still there and can -still be joined together. - -To restore a backup from an online RocksDB backup: - -- shutdown Continuwuity -- create a new directory for merging together the data -- in the online backup created, copy all `.sst` files in -`$DATABASE_BACKUP_PATH/shared_checksum` to your new directory -- trim all the strings so instead of `######_sxxxxxxxxx.sst`, it reads -`######.sst`. A way of doing this with sed and bash is `for file in *.sst; do mv -"$file" "$(echo "$file" | sed 's/_s.*/.sst/')"; done` -- copy all the files in `$DATABASE_BACKUP_PATH/1` (or the latest backup number -if you have multiple) to your new directory -- set your `database_path` config option to your new directory, or replace your -old one with the new one you crafted -- start up Continuwuity again and it should open as normal - -If you'd like to do an offline backup, shutdown Continuwuity and copy your -`database_path` directory elsewhere. This can be restored with no modifications -needed. - -Backing up media is also just copying the `media/` directory from your database -directory. - -## Media - -Media still needs various work, however Continuwuity implements media deletion via: - -- MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the -event) -- Delete list of MXC URIs -- Delete remote media in the past `N` seconds/minutes via filesystem metadata on -the file created time (`btime`) or file modified time (`mtime`) - -See the `!admin media` command for further information. All media in Continuwuity -is stored at `$DATABASE_DIR/media`. This will be configurable soon. - -If you are finding yourself needing extensive granular control over media, we -recommend looking into [Matrix Media -Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to -implement various utilities for media, but MMR is dedicated to extensive media -management. - -Built-in S3 support is also planned, but for now using a "S3 filesystem" on -`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and -immutable for all media requests (download and thumbnail) to reduce unnecessary -media requests from browsers, reduce bandwidth usage, and reduce load. - -[rocksdb-compaction]: https://github.com/facebook/rocksdb/wiki/Compaction diff --git a/docs/static/_headers b/docs/static/_headers deleted file mode 100644 index 6e52de9f..00000000 --- a/docs/static/_headers +++ /dev/null @@ -1,6 +0,0 @@ -/.well-known/matrix/* - Access-Control-Allow-Origin: * - Content-Type: application/json -/.well-known/continuwuity/* - Access-Control-Allow-Origin: * - Content-Type: application/json \ No newline at end of file diff --git a/docs/static/announcements.json b/docs/static/announcements.json deleted file mode 100644 index 9b97d091..00000000 --- a/docs/static/announcements.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "$schema": "https://continuwuity.org/schema/announcements.schema.json", - "announcements": [ - { - "id": 1, - "message": "Welcome to Continuwuity! Important announcements about the project will appear here." - } - ] -} \ No newline at end of file diff --git a/docs/static/announcements.schema.json b/docs/static/announcements.schema.json deleted file mode 100644 index 95b1d153..00000000 --- a/docs/static/announcements.schema.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "$id": "https://continwuity.org/schema/announcements.schema.json", - "type": "object", - "properties": { - "updates": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "integer" - }, - "message": { - "type": "string" - }, - "date": { - "type": "string" - } - }, - "required": [ - "id", - "message" - ] - } - } - }, - "required": [ - "updates" - ] - } \ No newline at end of file diff --git a/docs/static/client b/docs/static/client deleted file mode 100644 index c2b70a14..00000000 --- a/docs/static/client +++ /dev/null @@ -1 +0,0 @@ -{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"}} diff --git a/docs/static/server b/docs/static/server deleted file mode 100644 index a3099f6e..00000000 --- a/docs/static/server +++ /dev/null @@ -1 +0,0 @@ -{"m.server":"matrix.continuwuity.org:443"} diff --git a/docs/static/support b/docs/static/support deleted file mode 100644 index 6b7a9860..00000000 --- a/docs/static/support +++ /dev/null @@ -1,24 +0,0 @@ -{ - "contacts": [ - { - "email_address": "security@continuwuity.org", - "role": "m.role.security" - }, - { - "matrix_id": "@tom:continuwuity.org", - "email_address": "tom@tcpip.uk", - "role": "m.role.admin" - }, - { - "matrix_id": "@jade:continuwuity.org", - "email_address": "jade@continuwuity.org", - "role": "m.role.admin" - }, - { - "matrix_id": "@nex:continuwuity.org", - "email_address": "nex@continuwuity.org", - "role": "m.role.admin" - } - ], - "support_page": "https://continuwuity.org/introduction#contact" -} \ No newline at end of file diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md deleted file mode 100644 index d84dbc7a..00000000 --- a/docs/troubleshooting.md +++ /dev/null @@ -1,193 +0,0 @@ -# Troubleshooting Continuwuity - -> **Docker users ⚠️** -> -> Docker can be difficult to use and debug. It's common for Docker -> misconfigurations to cause issues, particularly with networking and permissions. -> Please check that your issues are not due to problems with your Docker setup. - -## Continuwuity and Matrix issues - -### Lost access to admin room - -You can reinvite yourself to the admin room through the following methods: - -- Use the `--execute "users make_user_admin "` Continuwuity binary -argument once to invite yourslf to the admin room on startup -- Use the Continuwuity console/CLI to run the `users make_user_admin` command -- Or specify the `emergency_password` config option to allow you to temporarily -log into the server account (`@conduit`) from a web client - -## General potential issues - -### Potential DNS issues when using Docker - -Docker's DNS setup for containers in a non-default network intercepts queries to -enable resolving of container hostnames to IP addresses. However, due to -performance issues with Docker's built-in resolver, this can cause DNS queries -to take a long time to resolve, resulting in federation issues. - -This is particularly common with Docker Compose, as custom networks are easily -created and configured. - -Symptoms of this include excessively long room joins (30+ minutes) from very -long DNS timeouts, log entries of "mismatching responding nameservers", -and/or partial or non-functional inbound/outbound federation. - -This is not a bug in continuwuity. Docker's default DNS resolver is not suitable -for heavy DNS activity, which is normal for federated protocols like Matrix. - -Workarounds: - -- Use DNS over TCP via the config option `query_over_tcp_only = true` -- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`. - -### DNS No connections available error message - -If you receive spurious amounts of error logs saying "DNS No connections -available", this is due to your DNS server (servers from `/etc/resolv.conf`) -being overloaded and unable to handle typical Matrix federation volume. Some -users have reported that the upstream servers are rate-limiting them as well -when they get this error (e.g. popular upstreams like Google DNS). - -Matrix federation is extremely heavy and sends wild amounts of DNS requests. -Unfortunately this is by design and has only gotten worse with more -server/destination resolution steps. Synapse also expects a very perfect DNS -setup. - -There are some ways you can reduce the amount of DNS queries, but ultimately -the best solution/fix is selfhosting a high quality caching DNS server like -[Unbound][unbound-arch] without any upstream resolvers, and without DNSSEC -validation enabled. - -DNSSEC validation is highly recommended to be **disabled** due to DNSSEC being -very computationally expensive, and is extremely susceptible to denial of -service, especially on Matrix. Many servers also strangely have broken DNSSEC -setups and will result in non-functional federation. - -Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but -the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch] -may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors -config options and removing the `validator` module. - -**Avoid** using `systemd-resolved` as it does **not** perform very well under -high load, and we have identified its DNS caching to not be very effective. - -dnsmasq can possibly work, but it does **not** support TCP fallback which can be -problematic when receiving large DNS responses such as from large SRV records. -If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback` -in Continuwuity config. - -Raising `dns_cache_entries` in Continuwuity config from the default can also assist -in DNS caching, but a full-fledged external caching resolver is better and more -reliable. - -If you don't have IPv6 connectivity, changing `ip_lookup_strategy` to match -your setup can help reduce unnecessary AAAA queries -(`1 - Ipv4Only (Only query for A records, no AAAA/IPv6)`). - -If your DNS server supports it, some users have reported enabling -`query_over_tcp_only` to force only TCP querying by default has improved DNS -reliability at a slight performance cost due to TCP overhead. - -## RocksDB / database issues - -### Database corruption - -If your database is corrupted *and* is failing to start (e.g. checksum -mismatch), it may be recoverable but careful steps must be taken, and there is -no guarantee it may be recoverable. - -The first thing that can be done is launching Continuwuity with the -`rocksdb_repair` config option set to true. This will tell RocksDB to attempt to -repair itself at launch. If this does not work, disable the option and continue -reading. - -RocksDB has the following recovery modes: - -- `TolerateCorruptedTailRecords` -- `AbsoluteConsistency` -- `PointInTime` -- `SkipAnyCorruptedRecord` - -By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may -be due to bad federation and we can re-fetch the correct data over federation. -The RocksDB default is `PointInTime` which will attempt to restore a "snapshot" -of the data when it was last known to be good. This data can be either a few -seconds old, or multiple minutes prior. `PointInTime` may not be suitable for -default usage due to clients and servers possibly not being able to handle -sudden "backwards time travels", and `AbsoluteConsistency` may be too strict. - -`AbsoluteConsistency` will fail to start the database if any sign of corruption -is detected. `SkipAnyCorruptedRecord` will skip all forms of corruption unless -it forbids the database from opening (e.g. too severe). Usage of -`SkipAnyCorruptedRecord` voids any support as this may cause more damage and/or -leave your database in a permanently inconsistent state, but it may do something -if `PointInTime` does not work as a last ditch effort. - -With this in mind: - -- First start Continuwuity with the `PointInTime` recovery method. See the [example -config](configuration/examples.md) for how to do this using -`rocksdb_recovery_mode` -- If your database successfully opens, clients are recommended to clear their -client cache to account for the rollback -- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as -much possible corruption is restored -- If all goes will, you should be able to restore back to using -`TolerateCorruptedTailRecords` and you have successfully recovered your database - -## Debugging - -Note that users should not really be debugging things. If you find yourself -debugging and find the issue, please let us know and/or how we can fix it. -Various debug commands can be found in `!admin debug`. - -### Debug/Trace log level - -Continuwuity builds without debug or trace log levels at compile time by default -for substantial performance gains in CPU usage and improved compile times. If -you need to access debug/trace log levels, you will need to build without the -`release_max_log_level` feature or use our provided static debug binaries. - -### Changing log level dynamically - -Continuwuity supports changing the tracing log environment filter on-the-fly using -the admin command `!admin debug change-log-level `. This accepts -a string **without quotes** the same format as the `log` config option. - -Example: `!admin debug change-log-level debug` - -This can also accept complex filters such as: -`!admin debug change-log-level info,conduit_service[{dest="example.com"}]=trace,ruma_state_res=trace` -`!admin debug change-log-level info,conduit_service[{dest="example.com"}]=trace,conduit_service[send{dest="example.org"}]=trace` - -And to reset the log level to the one that was set at startup / last config -load, simply pass the `--reset` flag. - -`!admin debug change-log-level --reset` - -### Pinging servers - -Continuwuity can ping other servers using `!admin debug ping `. This takes -a server name and goes through the server discovery process and queries -`/_matrix/federation/v1/version`. Errors are outputted. - -While it does measure the latency of the request, it is not indicative of -server performance on either side as that endpoint is completely unauthenticated -and simply fetches a string on a static JSON endpoint. It is very low cost both -bandwidth and computationally. - -### Allocator memory stats - -When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you -can see Continuwuity's high-level allocator stats by using -`!admin server memory-usage` at the bottom. - -If you are a developer, you can also view the raw jemalloc statistics with -`!admin debug memory-stats`. Please note that this output is extremely large -which may only be visible in the Continuwuity console CLI due to PDU size limits, -and is not easy for non-developers to understand. - -[unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html -[unbound-arch]: https://wiki.archlinux.org/title/Unbound diff --git a/docs/turn.md b/docs/turn.md index 5dba823c..63c1e99f 100644 --- a/docs/turn.md +++ b/docs/turn.md @@ -1,70 +1,25 @@ # Setting up TURN/STURN -In order to make or receive calls, a TURN server is required. Continuwuity suggests -using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also -available as a Docker image. +## General instructions -### Configuration +* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md). -Create a configuration file called `coturn.conf` containing: +## Edit/Add a few settings to your existing conduit.toml -```conf -use-auth-secret -static-auth-secret= -realm= +``` +# Refer to your Coturn settings. +# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`. +turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"] + +# static-auth-secret of your turnserver +turn_secret = "ADD SECRET HERE" + +# If you have your TURN server configured to use a username and password +# you can provide these information too. In this case comment out `turn_secret above`! +#turn_username = "" +#turn_password = "" ``` -A common way to generate a suitable alphanumeric secret key is by using `pwgen --s 64 1`. +## Apply settings -These same values need to be set in Continuwuity. See the [example -config](configuration/examples.md) in the TURN section for configuring these and -restart Continuwuity after. - -`turn_secret` or a path to `turn_secret_file` must have a value of your -coturn `static-auth-secret`, or use `turn_username` and `turn_password` -if using legacy username:password TURN authentication (not preferred). - -`turn_uris` must be the list of TURN URIs you would like to send to the client. -Typically you will just replace the example domain `example.turn.uri` with the -`realm` you set from the example config. - -If you are using TURN over TLS, you can replace `turn:` with `turns:` in the -`turn_uris` config option to instruct clients to attempt to connect to -TURN over TLS. This is highly recommended. - -If you need unauthenticated access to the TURN URIs, or some clients may be -having trouble, you can enable `turn_guest_access` in Continuwuity which disables -authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer` - -### Run - -Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using - -```bash -docker run -d --network=host -v -$(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn -``` - -or docker-compose. For the latter, paste the following section into a file -called `docker-compose.yml` and run `docker compose up -d` in the same -directory. - -```yml -version: 3 -services: - turn: - container_name: coturn-server - image: docker.io/coturn/coturn - restart: unless-stopped - network_mode: "host" - volumes: - - ./coturn.conf:/etc/coturn/turnserver.conf -``` - -To understand why the host networking mode is used and explore alternative -configuration options, please visit [Coturn's Docker -documentation](https://github.com/coturn/coturn/blob/master/docker/coturn/README.md). - -For security recommendations see Synapse's [Coturn -documentation](https://element-hq.github.io/synapse/latest/turn-howto.html). +Restart Conduit. \ No newline at end of file diff --git a/engage.toml b/engage.toml index 210bafd5..1f968467 100644 --- a/engage.toml +++ b/engage.toml @@ -18,12 +18,12 @@ script = "direnv --version" [[task]] name = "rustc" group = "versions" -script = "rustc --version -v" +script = "rustc --version" [[task]] name = "cargo" group = "versions" -script = "cargo --version -v" +script = "cargo --version" [[task]] name = "cargo-fmt" @@ -55,135 +55,47 @@ name = "lychee" group = "versions" script = "lychee --version" -[[task]] -name = "markdownlint" -group = "versions" -script = "markdownlint --version" - [[task]] name = "cargo-audit" group = "security" -script = "cargo audit --color=always -D warnings -D unmaintained -D unsound -D yanked" +script = "cargo audit -D warnings -D unmaintained -D unsound -D yanked" [[task]] name = "cargo-fmt" group = "lints" -script = """ -cargo fmt --check -- --color=always -""" +script = "cargo fmt --check -- --color=always" [[task]] name = "cargo-doc" group = "lints" script = """ -env DIRENV_DEVSHELL=all-features \ - RUSTDOCFLAGS="-D warnings" \ - direnv exec . \ - cargo doc \ - --workspace \ - --locked \ - --profile test \ - --all-features \ - --no-deps \ - --document-private-items \ - --color always -""" - -[[task]] -name = "clippy/default" -group = "lints" -script = """ -direnv exec . \ -cargo clippy \ +RUSTDOCFLAGS="-D warnings" cargo doc \ --workspace \ - --locked \ - --profile test \ - --color=always \ - -- \ - -D warnings + --all-features \ + --no-deps \ + --document-private-items \ + --color always """ [[task]] -name = "clippy/all" +name = "cargo-clippy" group = "lints" -script = """ -env DIRENV_DEVSHELL=all-features \ - direnv exec . \ - cargo clippy \ - --workspace \ - --locked \ - --profile test \ - --all-features \ - --color=always \ - -- \ - -D warnings -""" - -[[task]] -name = "clippy/no-features" -group = "lints" -script = """ -env DIRENV_DEVSHELL=no-features \ - direnv exec . \ - cargo clippy \ - --workspace \ - --locked \ - --profile test \ - --no-default-features \ - --color=always \ - -- \ - -D warnings -""" - -[[task]] -name = "clippy/other-features" -group = "lints" -script = """ -direnv exec . \ -cargo clippy \ - --workspace \ - --locked \ - --profile test \ - --no-default-features \ - --features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \ - --color=always \ - -- \ - -D warnings -""" +script = "cargo clippy --workspace --all-targets --all-features --color=always -- -D warnings" [[task]] name = "lychee" group = "lints" -script = "lychee --verbose --offline docs *.md --exclude development.md --exclude contributing.md --exclude testing.md" +script = "lychee --offline docs" [[task]] -name = "markdownlint" -group = "lints" -script = "markdownlint docs *.md || true" # TODO: fix the ton of markdown lints so we can drop `|| true` - -[[task]] -name = "cargo/default" +name = "cargo" group = "tests" script = """ -env DIRENV_DEVSHELL=default \ - direnv exec . \ - cargo test \ - --workspace \ - --locked \ - --profile test \ - --all-targets \ - --no-fail-fast \ - --color=always \ - -- \ - --color=always -""" - -# Checks if the generated example config differs from the checked in repo's -# example config. -[[task]] -name = "example-config" -group = "tests" -depends = ["cargo/default"] -script = """ -git diff --exit-code conduwuit-example.toml +cargo test \ + --workspace \ + --all-targets \ + --all-features \ + --color=always \ + -- \ + --color=always """ diff --git a/flake.lock b/flake.lock index 1f87b9b6..07b37e26 100644 --- a/flake.lock +++ b/flake.lock @@ -4,17 +4,16 @@ "inputs": { "crane": "crane", "flake-compat": "flake-compat", - "flake-parts": "flake-parts", - "nix-github-actions": "nix-github-actions", + "flake-utils": "flake-utils", "nixpkgs": "nixpkgs", "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1738524606, - "narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=", + "lastModified": 1707922053, + "narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=", "owner": "zhaofengli", "repo": "attic", - "rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e", + "rev": "6eabc3f02fae3683bffab483e614bebfcd476b21", "type": "github" }, "original": { @@ -24,76 +23,6 @@ "type": "github" } }, - "cachix": { - "inputs": { - "devenv": "devenv", - "flake-compat": "flake-compat_2", - "git-hooks": "git-hooks", - "nixpkgs": "nixpkgs_4" - }, - "locked": { - "lastModified": 1737621947, - "narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=", - "owner": "cachix", - "repo": "cachix", - "rev": "f65a3cd5e339c223471e64c051434616e18cc4f5", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "master", - "repo": "cachix", - "type": "github" - } - }, - "cachix_2": { - "inputs": { - "devenv": [ - "cachix", - "devenv" - ], - "flake-compat": [ - "cachix", - "devenv" - ], - "git-hooks": [ - "cachix", - "devenv" - ], - "nixpkgs": "nixpkgs_2" - }, - "locked": { - "lastModified": 1728672398, - "narHash": "sha256-KxuGSoVUFnQLB2ZcYODW7AVPAh9JqRlD5BrfsC/Q4qs=", - "owner": "cachix", - "repo": "cachix", - "rev": "aac51f698309fd0f381149214b7eee213c66ef0a", - "type": "github" - }, - "original": { - "owner": "cachix", - "ref": "latest", - "repo": "cachix", - "type": "github" - } - }, - "complement": { - "flake": false, - "locked": { - "lastModified": 1741891349, - "narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=", - "owner": "girlbossceo", - "repo": "complement", - "rev": "e587b3df569cba411aeac7c20b6366d03c143745", - "type": "github" - }, - "original": { - "owner": "girlbossceo", - "ref": "main", - "repo": "complement", - "type": "github" - } - }, "crane": { "inputs": { "nixpkgs": [ @@ -102,11 +31,11 @@ ] }, "locked": { - "lastModified": 1722960479, - "narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=", + "lastModified": 1702918879, + "narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=", "owner": "ipetkov", "repo": "crane", - "rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4", + "rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb", "type": "github" }, "original": { @@ -116,49 +45,23 @@ } }, "crane_2": { - "locked": { - "lastModified": 1739936662, - "narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=", - "owner": "ipetkov", - "repo": "crane", - "rev": "19de14aaeb869287647d9461cbd389187d8ecdb7", - "type": "github" - }, - "original": { - "owner": "ipetkov", - "ref": "master", - "repo": "crane", - "type": "github" - } - }, - "devenv": { "inputs": { - "cachix": "cachix_2", - "flake-compat": [ - "cachix", - "flake-compat" - ], - "git-hooks": [ - "cachix", - "git-hooks" - ], - "nix": "nix", "nixpkgs": [ - "cachix", "nixpkgs" ] }, "locked": { - "lastModified": 1733323168, - "narHash": "sha256-d5DwB4MZvlaQpN6OQ4SLYxb5jA4UH5EtV5t5WOtjLPU=", - "owner": "cachix", - "repo": "devenv", - "rev": "efa9010b8b1cfd5dd3c7ed1e172a470c3b84a064", + "lastModified": 1707685877, + "narHash": "sha256-XoXRS+5whotelr1rHiZle5t5hDg9kpguS5yk8c8qzOc=", + "owner": "ipetkov", + "repo": "crane", + "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", "type": "github" }, "original": { - "owner": "cachix", - "repo": "devenv", + "owner": "ipetkov", + "repo": "crane", + "rev": "2c653e4478476a52c6aa3ac0495e4dea7449ea0e", "type": "github" } }, @@ -170,21 +73,36 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1740724364, - "narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=", + "lastModified": 1711606966, + "narHash": "sha256-nTaO7ZDL4D02dVC5ktqnXNiNuODBUHyE4qEcFjAUCQY=", "owner": "nix-community", "repo": "fenix", - "rev": "edf7d9e431cda8782e729253835f178a356d3aab", + "rev": "aa45c3e901ea42d6633af083c0c555efaf948b17", "type": "github" }, "original": { "owner": "nix-community", - "ref": "main", "repo": "fenix", "type": "github" } }, "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { "flake": false, "locked": { "lastModified": 1696426674, @@ -200,265 +118,61 @@ "type": "github" } }, - "flake-compat_2": { - "flake": false, - "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-compat_3": { - "flake": false, - "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", - "type": "github" - }, - "original": { - "owner": "edolstra", - "ref": "master", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": [ - "attic", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1722555600, - "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-parts_2": { - "inputs": { - "nixpkgs-lib": [ - "cachix", - "devenv", - "nix", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1712014858, - "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, "flake-utils": { + "locked": { + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { "inputs": { "systems": "systems" }, "locked": { - "lastModified": 1731533236, - "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { "owner": "numtide", - "ref": "main", "repo": "flake-utils", "type": "github" } }, - "git-hooks": { - "inputs": { - "flake-compat": [ - "cachix", - "flake-compat" - ], - "gitignore": "gitignore", - "nixpkgs": [ - "cachix", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable_2" - }, - "locked": { - "lastModified": 1733318908, - "narHash": "sha256-SVQVsbafSM1dJ4fpgyBqLZ+Lft+jcQuMtEL3lQWx2Sk=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "6f4e2a2112050951a314d2733a994fbab94864c6", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "cachix", - "git-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1709087332, - "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "libgit2": { - "flake": false, - "locked": { - "lastModified": 1697646580, - "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=", - "owner": "libgit2", - "repo": "libgit2", - "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5", - "type": "github" - }, - "original": { - "owner": "libgit2", - "repo": "libgit2", - "type": "github" - } - }, - "liburing": { - "flake": false, - "locked": { - "lastModified": 1740613216, - "narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=", - "owner": "axboe", - "repo": "liburing", - "rev": "e1003e496e66f9b0ae06674869795edf772d5500", - "type": "github" - }, - "original": { - "owner": "axboe", - "ref": "master", - "repo": "liburing", - "type": "github" - } - }, - "nix": { - "inputs": { - "flake-compat": [ - "cachix", - "devenv" - ], - "flake-parts": "flake-parts_2", - "libgit2": "libgit2", - "nixpkgs": "nixpkgs_3", - "nixpkgs-23-11": [ - "cachix", - "devenv" - ], - "nixpkgs-regression": [ - "cachix", - "devenv" - ], - "pre-commit-hooks": [ - "cachix", - "devenv" - ] - }, - "locked": { - "lastModified": 1727438425, - "narHash": "sha256-X8ES7I1cfNhR9oKp06F6ir4Np70WGZU5sfCOuNBEwMg=", - "owner": "domenkozar", - "repo": "nix", - "rev": "f6c5ae4c1b2e411e6b1e6a8181cc84363d6a7546", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "devenv-2.24", - "repo": "nix", - "type": "github" - } - }, "nix-filter": { "locked": { - "lastModified": 1731533336, - "narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=", + "lastModified": 1710156097, + "narHash": "sha256-1Wvk8UP7PXdf8bCCaEoMnOT1qe5/Duqgj+rL8sRQsSM=", "owner": "numtide", "repo": "nix-filter", - "rev": "f7653272fd234696ae94229839a99b73c9ab7de0", + "rev": "3342559a24e85fc164b295c3444e8a139924675b", "type": "github" }, "original": { "owner": "numtide", - "ref": "main", "repo": "nix-filter", "type": "github" } }, - "nix-github-actions": { - "inputs": { - "nixpkgs": [ - "attic", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1729742964, - "narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=", - "owner": "nix-community", - "repo": "nix-github-actions", - "rev": "e04df33f62cdcf93d73e9a04142464753a16db67", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "nix-github-actions", - "type": "github" - } - }, "nixpkgs": { "locked": { - "lastModified": 1726042813, - "narHash": "sha256-LnNKCCxnwgF+575y0pxUdlGZBO/ru1CtGHIqQVfvjlA=", + "lastModified": 1702539185, + "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "159be5db480d1df880a0135ca0bfed84c2f88353", + "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", "type": "github" }, "original": { @@ -470,43 +184,27 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1724316499, - "narHash": "sha256-Qb9MhKBUTCfWg/wqqaxt89Xfi6qTD3XpTzQ9eXi3JmE=", + "lastModified": 1702780907, + "narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "797f7dc49e0bc7fab4b57c021cdf68f595e47841", + "rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-24.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-stable_2": { - "locked": { - "lastModified": 1730741070, - "narHash": "sha256-edm8WG19kWozJ/GqyYx2VjW99EdhjKwbY3ZwdlPAAlo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "d063c1dd113c91ab27959ba540c0d9753409edf3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-24.05", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1730531603, - "narHash": "sha256-Dqg6si5CqIzm87sp57j5nTaeBbWhHFaVyG7V6L8k3lY=", + "lastModified": 1711523803, + "narHash": "sha256-UKcYiHWHQynzj6CN/vTcix4yd1eCu1uFdsuarupdCQQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "7ffd9ae656aec493492b44d0ddfb28e79a1ea25d", + "rev": "2726f127c15a4cc9810843b96cad73c7eb39e443", "type": "github" }, "original": { @@ -516,94 +214,25 @@ "type": "github" } }, - "nixpkgs_3": { - "locked": { - "lastModified": 1717432640, - "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "release-24.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_4": { - "locked": { - "lastModified": 1733212471, - "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_5": { - "locked": { - "lastModified": 1740547748, - "narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "3a05eebede89661660945da1f151959900903b6a", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "rocksdb": { - "flake": false, - "locked": { - "lastModified": 1741308171, - "narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=", - "owner": "girlbossceo", - "repo": "rocksdb", - "rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986", - "type": "github" - }, - "original": { - "owner": "girlbossceo", - "ref": "v9.11.1", - "repo": "rocksdb", - "type": "github" - } - }, "root": { "inputs": { "attic": "attic", - "cachix": "cachix", - "complement": "complement", "crane": "crane_2", "fenix": "fenix", - "flake-compat": "flake-compat_3", - "flake-utils": "flake-utils", - "liburing": "liburing", + "flake-compat": "flake-compat_2", + "flake-utils": "flake-utils_2", "nix-filter": "nix-filter", - "nixpkgs": "nixpkgs_5", - "rocksdb": "rocksdb" + "nixpkgs": "nixpkgs_2" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1740691488, - "narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=", + "lastModified": 1711562745, + "narHash": "sha256-s/YOyBM0vumhkqCFi8CnV5imFlC5JJrGia8CmEXyQkM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5", + "rev": "ad51a17c627b4ca57f83f0dc1f3bb5f3f17e6d0b", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 49e860ed..7b100a10 100644 --- a/flake.nix +++ b/flake.nix @@ -1,309 +1,257 @@ { inputs = { + nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + nix-filter.url = "github:numtide/nix-filter"; + flake-compat = { + url = "github:edolstra/flake-compat"; + flake = false; + }; + + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + crane = { + # Pin latest crane that's not affected by the following bugs: + # + # * + # * + # * + url = "github:ipetkov/crane?rev=2c653e4478476a52c6aa3ac0495e4dea7449ea0e"; + inputs.nixpkgs.follows = "nixpkgs"; + }; attic.url = "github:zhaofengli/attic?ref=main"; - cachix.url = "github:cachix/cachix?ref=master"; - complement = { url = "github:girlbossceo/complement?ref=main"; flake = false; }; - crane = { url = "github:ipetkov/crane?ref=master"; }; - fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; }; - flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; }; - flake-utils.url = "github:numtide/flake-utils?ref=main"; - nix-filter.url = "github:numtide/nix-filter?ref=main"; - nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; }; - liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; - outputs = inputs: - inputs.flake-utils.lib.eachDefaultSystem (system: + outputs = + { self + , nixpkgs + , flake-utils + , nix-filter + + , fenix + , crane + , ... + }: flake-utils.lib.eachDefaultSystem (system: let - pkgsHost = import inputs.nixpkgs{ - inherit system; - }; - pkgsHostStatic = pkgsHost.pkgsStatic; + pkgsHost = nixpkgs.legacyPackages.${system}; + allocator = null; + + rocksdb' = pkgs: + let + version = "9.0.0"; + in + (pkgs.rocksdb.overrideAttrs (old: { + inherit version; + src = pkgs.fetchFromGitHub { + owner = "girlbossceo"; + repo = "rocksdb"; + rev = "449768a833b79c267c584b5ab1d50e73db6faf9d"; + hash = "sha256-MjmGfAlZ5WC2+hFH6nEUprqBjO8xiTQh2HJIqQ5mIg8="; + }; + })); + + # Nix-accessible `Cargo.toml` + cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); # The Rust toolchain to use - toolchain = inputs.fenix.packages.${system}.fromToolchainFile { + toolchain = fenix.packages.${system}.fromToolchainFile { file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI="; + sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; }; - mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { - inherit pkgs; - book = self.callPackage ./nix/pkgs/book {}; - complement = self.callPackage ./nix/pkgs/complement {}; - craneLib = ((inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain)); - inherit inputs; - main = self.callPackage ./nix/pkgs/main {}; - oci-image = self.callPackage ./nix/pkgs/oci-image {}; - tini = pkgs.tini.overrideAttrs { - # newer clang/gcc is unhappy with tini-static: - patches = [ (pkgs.fetchpatch { - url = "https://patch-diff.githubusercontent.com/raw/krallin/tini/pull/224.patch"; - hash = "sha256-4bTfAhRyIT71VALhHY13hUgbjLEUyvgkIJMt3w9ag3k="; - }) - ]; - }; - liburing = pkgs.liburing.overrideAttrs { - # Tests weren't building - outputs = [ "out" "dev" "man" ]; - buildFlags = [ "library" ]; - src = inputs.liburing; - }; - rocksdb = (pkgs.rocksdb.override { - liburing = self.liburing; - }).overrideAttrs (old: { - src = inputs.rocksdb; - version = pkgs.lib.removePrefix - "v" - (builtins.fromJSON (builtins.readFile ./flake.lock)) - .nodes.rocksdb.original.ref; - # we have this already at https://github.com/girlbossceo/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155 - # unsetting this so i don't have to revert it and make this nix exclusive - patches = []; - cmakeFlags = pkgs.lib.subtractLists + builder = pkgs: + ((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; + + nativeBuildInputs = pkgs: [ + # bindgen needs the build platform's libclang. Apparently due to + # "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't + # quite do the right thing here. + pkgs.pkgsBuildHost.rustPlatform.bindgenHook + ]; + + env = pkgs: { + CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev; + ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; + ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; + } + // pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic { + ROCKSDB_STATIC = ""; + } + // { + CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in + lib.concatStringsSep " " ([ ] + ++ lib.optionals + # This disables PIE for static builds, which isn't great in terms + # of security. Unfortunately, my hand is forced because nixpkgs' + # `libstdc++.a` is built without `-fPIE`, which precludes us from + # leaving PIE enabled. + stdenv.hostPlatform.isStatic + [ "-C" "relocation-model=static" ] + ++ lib.optionals + (stdenv.buildPlatform.config != stdenv.hostPlatform.config) + [ "-l" "c" ] + ++ lib.optionals + # This check has to match the one [here][0]. We only need to set + # these flags when using a different linker. Don't ask me why, + # though, because I don't know. All I know is it breaks otherwise. + # + # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 + ( + # Nixpkgs doesn't check for x86_64 here but we do, because I + # observed a failure building statically for x86_64 without + # including it here. Linkers are weird. + (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) + && stdenv.hostPlatform.isStatic + && !stdenv.isDarwin + && !stdenv.cc.bintools.isLLVM + ) [ - # no real reason to have snappy or zlib, no one uses this - "-DWITH_SNAPPY=1" - "-DZLIB=1" - "-DWITH_ZLIB=1" - # we dont need to use ldb or sst_dump (core_tools) - "-DWITH_CORE_TOOLS=1" - # we dont need to build rocksdb tests - "-DWITH_TESTS=1" - # we use rust-rocksdb via C interface and dont need C++ RTTI - "-DUSE_RTTI=1" - # this doesn't exist in RocksDB, and USE_SSE is deprecated for - # PORTABLE=$(march) - "-DFORCE_SSE42=1" - # PORTABLE will get set in main/default.nix - "-DPORTABLE=1" + "-l" + "stdc++" + "-L" + "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" ] - old.cmakeFlags - ++ [ - # no real reason to have snappy, no one uses this - "-DWITH_SNAPPY=0" - "-DZLIB=0" - "-DWITH_ZLIB=0" - # we dont need to use ldb or sst_dump (core_tools) - "-DWITH_CORE_TOOLS=0" - # we dont need trace tools - "-DWITH_TRACE_TOOLS=0" - # we dont need to build rocksdb tests - "-DWITH_TESTS=0" - # we use rust-rocksdb via C interface and dont need C++ RTTI - "-DUSE_RTTI=0" - ]; + ); + } - # outputs has "tools" which we dont need or use - outputs = [ "out" ]; + # What follows is stolen from [here][0]. Its purpose is to properly + # configure compilers and linkers for various stages of the build, and + # even covers the case of build scripts that need native code compiled and + # run on the build platform (I think). + # + # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 + // ( + let + inherit (pkgs.rust.lib) envVars; + in + pkgs.lib.optionalAttrs + (pkgs.stdenv.targetPlatform.rust.rustcTarget + != pkgs.stdenv.hostPlatform.rust.rustcTarget) + ( + let + inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = + envVars.linkerForTarget; + } + ) + // ( + let + inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForHost; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; + CARGO_BUILD_TARGET = rustcTarget; + } + ) + // ( + let + inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; + HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc"; + HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++"; + } + ) + ); - # preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use - preInstall = ""; - }); - }); - - scopeHost = mkScope pkgsHost; - scopeHostStatic = mkScope pkgsHostStatic; - scopeCrossLinux = mkScope pkgsHost.pkgsLinux.pkgsStatic; - mkCrossScope = crossSystem: - let pkgsCrossStatic = (import inputs.nixpkgs { - inherit system; - crossSystem = { - config = crossSystem; - }; - }).pkgsStatic; - in - mkScope pkgsCrossStatic; - - mkDevShell = scope: scope.pkgs.mkShell { - env = scope.main.env // { - # Rust Analyzer needs to be able to find the path to default crate - # sources, and it can read this environment variable to do so. The - # `rust-src` component is required in order for this to work. - RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; - - # Convenient way to access a pinned version of Complement's source - # code. - COMPLEMENT_SRC = inputs.complement.outPath; - - # Needed for Complement: - CGO_CFLAGS = "-Wl,--no-gc-sections"; - CGO_LDFLAGS = "-Wl,--no-gc-sections"; + mkPackage = pkgs: allocator: builder pkgs { + src = nix-filter { + root = ./.; + include = [ + "src" + "Cargo.toml" + "Cargo.lock" + ]; }; - # Development tools - packages = [ - # Always use nightly rustfmt because most of its options are unstable - # - # This needs to come before `toolchain` in this list, otherwise - # `$PATH` will have stable rustfmt instead. - inputs.fenix.packages.${system}.latest.rustfmt + buildFeatures = [ ] + ++ (if allocator == "jemalloc" then [ "jemalloc" ] else [ ]) + ++ (if allocator == "hmalloc" then [ "hardened_malloc" ] else [ ]) + ; - toolchain - ] - ++ (with pkgsHost.pkgs; [ - # Required by hardened-malloc.rs dep - binutils + rocksdb' = (if allocator == "jemalloc" then (pkgs.rocksdb.override { enableJemalloc = true; }) else (rocksdb' pkgs)); - cargo-audit - cargo-auditable + # This is redundant with CI + doCheck = false; - # Needed for producing Debian packages - cargo-deb + env = env pkgs; + nativeBuildInputs = nativeBuildInputs pkgs; - # Needed for CI to check validity of produced Debian packages (dpkg-deb) - dpkg - - engage - - # Needed for Complement - go - - # Needed for our script for Complement - jq - gotestfmt - - # Needed for finding broken markdown links - lychee - - # Needed for linting markdown files - markdownlint-cli - - # Useful for editing the book locally - mdbook - - # used for rust caching in CI to speed it up - sccache - ] - # liburing is Linux-exclusive - ++ lib.optional stdenv.hostPlatform.isLinux liburing - ++ lib.optional stdenv.hostPlatform.isLinux numactl) - ++ scope.main.buildInputs - ++ scope.main.propagatedBuildInputs - ++ scope.main.nativeBuildInputs; + meta.mainProgram = cargoToml.package.name; }; + + mkOciImage = pkgs: package: allocator: + pkgs.dockerTools.buildLayeredImage { + name = package.pname; + tag = "main"; + # Debian makes builds reproducible through using the HEAD commit's date + created = "@${toString self.lastModified}"; + contents = [ + pkgs.dockerTools.caCertificates + ]; + config = { + # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) + # are handled as expected + Entrypoint = [ + "${pkgs.lib.getExe' pkgs.tini "tini"}" + "--" + ]; + Cmd = [ + "${pkgs.lib.getExe package}" + ]; + }; + }; in { packages = { - default = scopeHost.main.override { - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - default-debug = scopeHost.main.override { - profile = "dev"; - # debug build users expect full logs - disable_release_max_log_level = true; - disable_features = [ - # dont include experimental features - "experimental" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - # just a test profile used for things like CI and complement - default-test = scopeHost.main.override { - profile = "test"; - disable_release_max_log_level = true; - disable_features = [ - # dont include experimental features - "experimental" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - all-features = scopeHost.main.override { - all_features = true; - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - all-features-debug = scopeHost.main.override { - profile = "dev"; - all_features = true; - # debug build users expect full logs - disable_release_max_log_level = true; - disable_features = [ - # dont include experimental features - "experimental" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - hmalloc = scopeHost.main.override { features = ["hardened_malloc"]; }; + default = mkPackage pkgsHost null; + jemalloc = mkPackage pkgsHost "jemalloc"; + hmalloc = mkPackage pkgsHost "hmalloc"; + oci-image = mkOciImage pkgsHost self.packages.${system}.default null; + oci-image-jemalloc = mkOciImage pkgsHost self.packages.${system}.default "jemalloc"; + oci-image-hmalloc = mkOciImage pkgsHost self.packages.${system}.default "hmalloc"; - oci-image = scopeHost.oci-image; - oci-image-all-features = scopeHost.oci-image.override { - main = scopeHost.main.override { - all_features = true; - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - }; - oci-image-all-features-debug = scopeHost.oci-image.override { - main = scopeHost.main.override { - profile = "dev"; - all_features = true; - # debug build users expect full logs - disable_release_max_log_level = true; - disable_features = [ - # dont include experimental features - "experimental" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - }; - oci-image-hmalloc = scopeHost.oci-image.override { - main = scopeHost.main.override { - features = ["hardened_malloc"]; - }; - }; + book = + let + package = self.packages.${system}.default; + in + pkgsHost.stdenv.mkDerivation { + pname = "${package.pname}-book"; + version = package.version; - book = scopeHost.book; + src = nix-filter { + root = ./.; + include = [ + "book.toml" + "conduwuit-example.toml" + "README.md" + "debian/README.md" + "docs" + ]; + }; - complement = scopeHost.complement; - static-complement = scopeHostStatic.complement; - # macOS containers don't exist, so the complement images must be forced to linux - linux-complement = (mkCrossScope "${pkgsHost.hostPlatform.qemuArch}-linux-musl").complement; + nativeBuildInputs = (with pkgsHost; [ + mdbook + ]); + + buildPhase = '' + mdbook build + mv public $out + ''; + }; } // builtins.listToAttrs @@ -312,267 +260,108 @@ (crossSystem: let binaryName = "static-${crossSystem}"; - scopeCrossStatic = mkCrossScope crossSystem; + pkgsCrossStatic = + (import nixpkgs { + inherit system; + crossSystem = { + config = crossSystem; + }; + }).pkgsStatic; in [ # An output for a statically-linked binary { name = binaryName; - value = scopeCrossStatic.main; + value = mkPackage pkgsCrossStatic null; } - # An output for a statically-linked binary with x86_64 haswell - # target optimisations + # An output for a statically-linked binary with jemalloc { - name = "${binaryName}-x86_64-haswell-optimised"; - value = scopeCrossStatic.main.override { - x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); - }; - } - - # An output for a statically-linked unstripped debug ("dev") binary - { - name = "${binaryName}-debug"; - value = scopeCrossStatic.main.override { - profile = "dev"; - # debug build users expect full logs - disable_release_max_log_level = true; - }; - } - - # An output for a statically-linked unstripped debug binary with the - # "test" profile (for CI usage only) - { - name = "${binaryName}-test"; - value = scopeCrossStatic.main.override { - profile = "test"; - disable_release_max_log_level = true; - disable_features = [ - # dont include experimental features - "experimental" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - } - - # An output for a statically-linked binary with `--all-features` - { - name = "${binaryName}-all-features"; - value = scopeCrossStatic.main.override { - all_features = true; - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - } - - # An output for a statically-linked binary with `--all-features` and with x86_64 haswell - # target optimisations - { - name = "${binaryName}-all-features-x86_64-haswell-optimised"; - value = scopeCrossStatic.main.override { - all_features = true; - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); - }; - } - - # An output for a statically-linked unstripped debug ("dev") binary with `--all-features` - { - name = "${binaryName}-all-features-debug"; - value = scopeCrossStatic.main.override { - profile = "dev"; - all_features = true; - # debug build users expect full logs - disable_release_max_log_level = true; - disable_features = [ - # dont include experimental features - "experimental" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; + name = "${binaryName}-jemalloc"; + value = mkPackage pkgsCrossStatic "jemalloc"; } # An output for a statically-linked binary with hardened_malloc { name = "${binaryName}-hmalloc"; - value = scopeCrossStatic.main.override { - features = ["hardened_malloc"]; - }; + value = mkPackage pkgsCrossStatic "hmalloc"; } # An output for an OCI image based on that binary { name = "oci-image-${crossSystem}"; - value = scopeCrossStatic.oci-image; + value = mkOciImage + pkgsCrossStatic + self.packages.${system}.${binaryName} + null; } - # An output for an OCI image based on that binary with x86_64 haswell - # target optimisations + # An output for an OCI image based on that binary with jemalloc { - name = "oci-image-${crossSystem}-x86_64-haswell-optimised"; - value = scopeCrossStatic.oci-image.override { - main = scopeCrossStatic.main.override { - x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); - }; - }; - } - - # An output for an OCI image based on that unstripped debug ("dev") binary - { - name = "oci-image-${crossSystem}-debug"; - value = scopeCrossStatic.oci-image.override { - main = scopeCrossStatic.main.override { - profile = "dev"; - # debug build users expect full logs - disable_release_max_log_level = true; - }; - }; - } - - # An output for an OCI image based on that binary with `--all-features` - { - name = "oci-image-${crossSystem}-all-features"; - value = scopeCrossStatic.oci-image.override { - main = scopeCrossStatic.main.override { - all_features = true; - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - }; - } - - # An output for an OCI image based on that binary with `--all-features` and with x86_64 haswell - # target optimisations - { - name = "oci-image-${crossSystem}-all-features-x86_64-haswell-optimised"; - value = scopeCrossStatic.oci-image.override { - main = scopeCrossStatic.main.override { - all_features = true; - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - x86_64_haswell_target_optimised = (if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false); - }; - }; - } - - # An output for an OCI image based on that unstripped debug ("dev") binary with `--all-features` - { - name = "oci-image-${crossSystem}-all-features-debug"; - value = scopeCrossStatic.oci-image.override { - main = scopeCrossStatic.main.override { - profile = "dev"; - all_features = true; - # debug build users expect full logs - disable_release_max_log_level = true; - disable_features = [ - # dont include experimental features - "experimental" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; - }; - }; + name = "oci-image-${crossSystem}-jemalloc"; + value = mkOciImage + pkgsCrossStatic + self.packages.${system}.${binaryName} + "jemalloc"; } # An output for an OCI image based on that binary with hardened_malloc { name = "oci-image-${crossSystem}-hmalloc"; - value = scopeCrossStatic.oci-image.override { - main = scopeCrossStatic.main.override { - features = ["hardened_malloc"]; - }; - }; - } - - # An output for a complement OCI image for the specified platform - { - name = "complement-${crossSystem}"; - value = scopeCrossStatic.complement; + value = mkOciImage + pkgsCrossStatic + self.packages.${system}.${binaryName} + "hmalloc"; } ] ) [ - #"x86_64-apple-darwin" - #"aarch64-apple-darwin" - "x86_64-linux-gnu" - "x86_64-linux-musl" - "aarch64-linux-musl" + "x86_64-unknown-linux-musl" + "x86_64-unknown-linux-musl-jemalloc" + "x86_64-unknown-linux-musl-hmalloc" + "aarch64-unknown-linux-musl" + "aarch64-unknown-linux-musl-jemalloc" + "aarch64-unknown-linux-musl-hmalloc" ] ) ); - devShells.default = mkDevShell scopeHostStatic; - devShells.all-features = mkDevShell - (scopeHostStatic.overrideScope (final: prev: { - main = prev.main.override { - all_features = true; - disable_features = [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" - ]; + devShells.default = pkgsHost.mkShell { + env = env pkgsHost // { + # Rust Analyzer needs to be able to find the path to default crate + # sources, and it can read this environment variable to do so. The + # `rust-src` component is required in order for this to work. + RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; }; - })); - devShells.no-features = mkDevShell - (scopeHostStatic.overrideScope (final: prev: { - main = prev.main.override { default_features = false; }; - })); - devShells.dynamic = mkDevShell scopeHost; + + # Development tools + nativeBuildInputs = nativeBuildInputs pkgsHost ++ [ + # Always use nightly rustfmt because most of its options are unstable + # + # This needs to come before `toolchain` in this list, otherwise + # `$PATH` will have stable rustfmt instead. + fenix.packages.${system}.latest.rustfmt + + toolchain + ] ++ (with pkgsHost; [ + engage + + # Needed for producing Debian packages + cargo-deb + + # Needed for Complement + go + olm + + # Needed for our script for Complement + jq + + # Needed for finding broken markdown links + lychee + + # Useful for editing the book locally + mdbook + ]); + }; }); } diff --git a/nix/pkgs/book/default.nix b/nix/pkgs/book/default.nix deleted file mode 100644 index 3995ab79..00000000 --- a/nix/pkgs/book/default.nix +++ /dev/null @@ -1,36 +0,0 @@ -{ inputs - -# Dependencies -, main -, mdbook -, stdenv -}: - -stdenv.mkDerivation { - inherit (main) pname version; - - src = inputs.nix-filter { - root = inputs.self; - include = [ - "book.toml" - "conduwuit-example.toml" - "CODE_OF_CONDUCT.md" - "CONTRIBUTING.md" - "README.md" - "development.md" - "debian/conduwuit.service" - "debian/README.md" - "arch/conduwuit.service" - "docs" - "theme" - ]; - }; - - nativeBuildInputs = [ - mdbook - ]; - - buildPhase = '' - mdbook build -d $out - ''; -} diff --git a/nix/pkgs/complement/certificate.crt b/nix/pkgs/complement/certificate.crt deleted file mode 100644 index 5dd4fdea..00000000 --- a/nix/pkgs/complement/certificate.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL -BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz -IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy -NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m -ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt -/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88 -awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp -L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK -K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl -8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV -HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy -ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw -DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ -irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+ -HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e -VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3 -y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d -jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4= ------END CERTIFICATE----- diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml deleted file mode 100644 index 7f4ecef7..00000000 --- a/nix/pkgs/complement/config.toml +++ /dev/null @@ -1,50 +0,0 @@ -[global] -address = "0.0.0.0" -allow_device_name_federation = true -allow_guest_registration = true -allow_public_room_directory_over_federation = true -allow_public_room_directory_without_auth = true -allow_registration = true -database_path = "/database" -log = "trace,h2=debug,hyper=debug" -port = [8008, 8448] -trusted_servers = [] -only_query_trusted_key_servers = false -query_trusted_key_servers_first = false -query_trusted_key_servers_first_on_join = false -yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true -ip_range_denylist = [] -url_preview_domain_contains_allowlist = ["*"] -url_preview_domain_explicit_denylist = ["*"] -media_compat_file_link = false -media_startup_check = true -prune_missing_media = true -log_colors = true -admin_room_notices = false -allow_check_for_updates = false -intentionally_unknown_config_option_for_testing = true -rocksdb_log_level = "info" -rocksdb_max_log_files = 1 -rocksdb_recovery_mode = 0 -rocksdb_paranoid_file_checks = true -log_guest_registrations = false -allow_legacy_media = true -startup_netburst = true -startup_netburst_keep = -1 - -allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true - -# valgrind makes things so slow -dns_timeout = 60 -dns_attempts = 20 -request_conn_timeout = 60 -request_timeout = 120 -well_known_conn_timeout = 60 -well_known_timeout = 60 -federation_idle_timeout = 300 -sender_timeout = 300 -sender_idle_timeout = 300 -sender_retry_backoff_limit = 300 - -[global.tls] -dual_protocol = true diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix deleted file mode 100644 index 9b010e14..00000000 --- a/nix/pkgs/complement/default.nix +++ /dev/null @@ -1,89 +0,0 @@ -# Dependencies -{ bashInteractive -, buildEnv -, coreutils -, dockerTools -, lib -, main -, stdenv -, tini -, writeShellScriptBin -}: - -let - main' = main.override { - profile = "test"; - all_features = true; - disable_release_max_log_level = true; - disable_features = [ - # console/CLI stuff isn't used or relevant for complement - "console" - "tokio_console" - # sentry telemetry isn't useful for complement, disabled by default anyways - "sentry_telemetry" - "perf_measurements" - # this is non-functional on nix for some reason - "hardened_malloc" - # dont include experimental features - "experimental" - # compression isn't needed for complement - "brotli_compression" - "gzip_compression" - "zstd_compression" - # complement doesn't need hot reloading - "conduwuit_mods" - # complement doesn't have URL preview media tests - "url_preview" - ]; - }; - - start = writeShellScriptBin "start" '' - set -euxo pipefail - - ${lib.getExe' coreutils "env"} \ - CONDUWUIT_SERVER_NAME="$SERVER_NAME" \ - ${lib.getExe main'} - ''; -in - -dockerTools.buildImage { - name = "complement-conduwuit"; - tag = "main"; - - copyToRoot = buildEnv { - name = "root"; - pathsToLink = [ - "/bin" - ]; - paths = [ - bashInteractive - coreutils - main' - start - ]; - }; - - config = { - Cmd = [ - "${lib.getExe start}" - ]; - - Entrypoint = if !stdenv.hostPlatform.isDarwin - # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) - # are handled as expected - then [ "${lib.getExe' tini "tini"}" "--" ] - else []; - - Env = [ - "CONDUWUIT_TLS__KEY=${./private_key.key}" - "CONDUWUIT_TLS__CERTS=${./certificate.crt}" - "CONDUWUIT_CONFIG=${./config.toml}" - "RUST_BACKTRACE=full" - ]; - - ExposedPorts = { - "8008/tcp" = {}; - "8448/tcp" = {}; - }; - }; -} diff --git a/nix/pkgs/complement/private_key.key b/nix/pkgs/complement/private_key.key deleted file mode 100644 index 5b9d4d4f..00000000 --- a/nix/pkgs/complement/private_key.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb -iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT -LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a -09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc -ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga -Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO -/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu -WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB -DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb -piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN -D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ -8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+ -3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq -/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90 -FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q -td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M -Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A -91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV -8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh -VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW -UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K -kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz -KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7 -IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh -tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM -9MVtdgSkuh2gwkD/mMoAJXM= ------END PRIVATE KEY----- diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr deleted file mode 100644 index e2aa658e..00000000 --- a/nix/pkgs/complement/signing_request.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK -DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH -uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR -xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb -o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B -hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe -vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB -CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79 -ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3 -r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb -XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK -MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76 -U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/ ------END CERTIFICATE REQUEST----- diff --git a/nix/pkgs/complement/v3.ext b/nix/pkgs/complement/v3.ext deleted file mode 100644 index 0deaa48a..00000000 --- a/nix/pkgs/complement/v3.ext +++ /dev/null @@ -1,12 +0,0 @@ -authorityKeyIdentifier=keyid,issuer -basicConstraints=CA:FALSE -keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment -subjectAltName = @alt_names - -[alt_names] -DNS.1 = *.docker.internal -DNS.2 = hs1 -DNS.3 = hs2 -DNS.4 = hs3 -DNS.5 = hs4 -IP.1 = 127.0.0.1 diff --git a/nix/pkgs/main/cross-compilation-env.nix b/nix/pkgs/main/cross-compilation-env.nix deleted file mode 100644 index 0f326c92..00000000 --- a/nix/pkgs/main/cross-compilation-env.nix +++ /dev/null @@ -1,87 +0,0 @@ -{ lib -, pkgsBuildHost -, rust -, stdenv -}: - -lib.optionalAttrs stdenv.hostPlatform.isStatic { - ROCKSDB_STATIC = ""; -} -// -{ - CARGO_BUILD_RUSTFLAGS = - lib.concatStringsSep - " " - ([] - # This disables PIE for static builds, which isn't great in terms - # of security. Unfortunately, my hand is forced because nixpkgs' - # `libstdc++.a` is built without `-fPIE`, which precludes us from - # leaving PIE enabled. - ++ lib.optionals - stdenv.hostPlatform.isStatic - [ "-C" "relocation-model=static" ] - ++ lib.optionals - (stdenv.buildPlatform.config != stdenv.hostPlatform.config) - [ - "-l" - "c" - - "-l" - "stdc++" - - "-L" - "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" - ] - ); -} - -# What follows is stolen from [here][0]. Its purpose is to properly -# configure compilers and linkers for various stages of the build, and -# even covers the case of build scripts that need native code compiled and -# run on the build platform (I think). -# -# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68 -// -( - let - inherit (rust.lib) envVars; - in - lib.optionalAttrs - (stdenv.targetPlatform.rust.rustcTarget - != stdenv.hostPlatform.rust.rustcTarget) - ( - let - inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget; - } - ) - // - ( - let - inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForHost; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost; - CARGO_BUILD_TARGET = rustcTarget; - } - ) - // - ( - let - inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild; - HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc"; - HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++"; - } - ) -) diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix deleted file mode 100644 index 9c8038a7..00000000 --- a/nix/pkgs/main/default.nix +++ /dev/null @@ -1,220 +0,0 @@ -# Dependencies (keep sorted) -{ craneLib -, inputs -, jq -, lib -, libiconv -, liburing -, pkgsBuildHost -, rocksdb -, removeReferencesTo -, rust -, rust-jemalloc-sys -, stdenv - -# Options (keep sorted) -, all_features ? false -, default_features ? true -# default list of disabled features -, disable_features ? [ - # dont include experimental features - "experimental" - # jemalloc profiling/stats features are expensive and shouldn't - # be expected on non-debug builds. - "jemalloc_prof" - "jemalloc_stats" - # this is non-functional on nix for some reason - "hardened_malloc" - # conduwuit_mods is a development-only hot reload feature - "conduwuit_mods" -] -, disable_release_max_log_level ? false -, features ? [] -, profile ? "release" -# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag -# haswell is pretty much any x86 cpu made in the last 12 years, and -# supports modern CPU extensions that rocksdb can make use of. -# disable if trying to make a portable x86_64 build for very old hardware -, x86_64_haswell_target_optimised ? false -}: - -let -# We perform default-feature unification in nix, because some of the dependencies -# on the nix side depend on feature values. -crateFeatures = path: - let manifest = lib.importTOML "${path}/Cargo.toml"; in - lib.remove "default" (lib.attrNames manifest.features); -crateDefaultFeatures = path: - (lib.importTOML "${path}/Cargo.toml").features.default; -allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main"; -allFeatures = crateFeatures "${inputs.self}/src/main"; -features' = lib.unique - (features ++ - lib.optionals default_features allDefaultFeatures ++ - lib.optionals all_features allFeatures); -disable_features' = disable_features ++ lib.optionals disable_release_max_log_level ["release_max_log_level"]; -features'' = lib.subtractLists disable_features' features'; - -featureEnabled = feature : builtins.elem feature features''; - -enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin; - -# This derivation will set the JEMALLOC_OVERRIDE variable, causing the -# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's -# own. In order for this to work, we need to set flags on the build that match -# whatever flags tikv-jemalloc-sys was going to use. These are dependent on -# which features we enable in tikv-jemalloc-sys. -rust-jemalloc-sys' = (rust-jemalloc-sys.override { - # tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature - unprefixed = true; -}).overrideAttrs (old: { - configureFlags = old.configureFlags ++ - # we dont need docs - [ "--disable-doc" ] ++ - # we dont need cxx/C++ integration - [ "--disable-cxx" ] ++ - # tikv-jemalloc-sys/profiling feature - lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++ - # tikv-jemalloc-sys/stats feature - (if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]); -}); - -buildDepsOnlyEnv = - let - rocksdb' = (rocksdb.override { - jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'; - # rocksdb fails to build with prefixed jemalloc, which is required on - # darwin due to [1]. In this case, fall back to building rocksdb with - # libc malloc. This should not cause conflicts, because all of the - # jemalloc symbols are prefixed. - # - # [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17 - enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin; - - # for some reason enableLiburing in nixpkgs rocksdb is default true - # which breaks Darwin entirely - enableLiburing = enableLiburing; - }).overrideAttrs (old: { - enableLiburing = enableLiburing; - cmakeFlags = (if x86_64_haswell_target_optimised then (lib.subtractLists [ - # dont make a portable build if x86_64_haswell_target_optimised is enabled - "-DPORTABLE=1" - ] old.cmakeFlags - ++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ]) - ) - ++ old.cmakeFlags; - - # outputs has "tools" which we dont need or use - outputs = [ "out" ]; - - # preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use - preInstall = ""; - }); - in - { - # https://crane.dev/faq/rebuilds-bindgen.html - NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; - - CARGO_PROFILE = profile; - ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include"; - ROCKSDB_LIB_DIR = "${rocksdb'}/lib"; - } - // - (import ./cross-compilation-env.nix { - # Keep sorted - inherit - lib - pkgsBuildHost - rust - stdenv; - }); - -buildPackageEnv = { - CONDUWUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; -} // buildDepsOnlyEnv // { - # Only needed in static stdenv because these are transitive dependencies of rocksdb - CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS - + lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic) - " -L${lib.getLib liburing}/lib -luring" - + lib.optionalString x86_64_haswell_target_optimised - " -Ctarget-cpu=haswell"; -}; - - - -commonAttrs = { - inherit - (craneLib.crateNameFromCargoToml { - cargoToml = "${inputs.self}/Cargo.toml"; - }) - pname - version; - - src = let filter = inputs.nix-filter.lib; in filter { - root = inputs.self; - - # Keep sorted - include = [ - ".cargo" - "Cargo.lock" - "Cargo.toml" - "src" - ]; - }; - - doCheck = true; - - cargoExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); - - dontStrip = profile == "dev" || profile == "test"; - dontPatchELF = profile == "dev" || profile == "test"; - - buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys' - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]; - - nativeBuildInputs = [ - # bindgen needs the build platform's libclang. Apparently due to "splicing - # weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the - # right thing here. - pkgsBuildHost.rustPlatform.bindgenHook - - # We don't actually depend on `jq`, but crane's `buildPackage` does, but - # its `buildDepsOnly` doesn't. This causes those two derivations to have - # differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious - # rebuilds of bindgen and its depedents. - jq - ]; - }; -in - -craneLib.buildPackage ( commonAttrs // { - cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // { - env = buildDepsOnlyEnv; - }); - - doCheck = true; - - cargoExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); - - env = buildPackageEnv; - - passthru = { - env = buildPackageEnv; - }; - - meta.mainProgram = commonAttrs.pname; -}) diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix deleted file mode 100644 index 1650053d..00000000 --- a/nix/pkgs/oci-image/default.nix +++ /dev/null @@ -1,46 +0,0 @@ -{ inputs - -# Dependencies -, dockerTools -, lib -, main -, stdenv -, tini -}: - -dockerTools.buildLayeredImage { - name = main.pname; - tag = "main"; - created = "@${toString inputs.self.lastModified}"; - contents = [ - dockerTools.caCertificates - main - ]; - config = { - Entrypoint = if !stdenv.hostPlatform.isDarwin - # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) - # are handled as expected - then [ "${lib.getExe' tini "tini"}" "--" ] - else []; - Cmd = [ - "${lib.getExe main}" - ]; - Env = [ - "RUST_BACKTRACE=full" - ]; - Labels = { - "org.opencontainers.image.authors" = "June Clementine Strawberry and Jason Volk - "; - "org.opencontainers.image.created" ="@${toString inputs.self.lastModified}"; - "org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust"; - "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; - "org.opencontainers.image.licenses" = "Apache-2.0"; - "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; - "org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit"; - "org.opencontainers.image.title" = main.pname; - "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; - "org.opencontainers.image.vendor" = "girlbossceo"; - "org.opencontainers.image.version" = main.version; - }; - }; -} diff --git a/renovate.json b/renovate.json index eecf8532..b63c7b3f 100644 --- a/renovate.json +++ b/renovate.json @@ -12,15 +12,5 @@ "nix": { "enabled": true }, - "labels": [ - "dependencies", - "github_actions" - ], - "ignoreDeps": [ - "tikv-jemllocator", - "tikv-jemalloc-sys", - "tikv-jemalloc-ctl", - "opentelemetry-rust", - "tracing-opentelemetry" - ] + "ignoreDeps": ["tower-http", "axum-server", "hyper", "axum", "http"] } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index aadc8f99..c570550f 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,6 +2,8 @@ # # Other files that need upkeep when this changes: # +# * `.gitlab-ci.yml` +# * `.github/workflows/ci.yml` # * `Cargo.toml` # * `flake.nix` # @@ -9,21 +11,13 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.86.0" -profile = "minimal" +channel = "1.75.0" components = [ # For rust-analyzer "rust-src", - "rust-analyzer", - # For CI and editors - "rustfmt", - "clippy", ] targets = [ - #"x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "aarch64-unknown-linux-musl", - "aarch64-unknown-linux-gnu", - #"aarch64-apple-darwin", -] +] \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml index 89041b04..114677d4 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,33 +1,28 @@ -array_width = 80 -chain_width = 60 -comment_width = 80 +edition = "2021" + condense_wildcard_suffixes = true -style_edition = "2024" -fn_call_width = 80 -fn_single_line = true format_code_in_doc_comments = true format_macro_bodies = true format_macro_matchers = true format_strings = true -group_imports = "StdExternalCrate" -hard_tabs = true hex_literal_case = "Upper" -imports_granularity = "Crate" -match_arm_blocks = false -match_arm_leading_pipes = "Always" +max_width = 120 +tab_spaces = 4 +array_width = 80 +comment_width = 80 +wrap_comments = true +fn_params_layout = "Compressed" +fn_call_width = 80 +fn_single_line = true +hard_tabs = true match_block_trailing_comma = true -max_width = 98 -newline_style = "Unix" +imports_granularity = "Crate" normalize_comments = false -overflow_delimited_expr = true reorder_impl_items = true reorder_imports = true -single_line_if_else_max_width = 60 -single_line_let_else_max_width = 80 -struct_lit_width = 40 -tab_spaces = 4 -unstable_features = true +group_imports = "StdExternalCrate" +newline_style = "Unix" use_field_init_shorthand = true use_small_heuristics = "Off" use_try_shorthand = true -wrap_comments = true +chain_width = 60 diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml deleted file mode 100644 index 7896ef97..00000000 --- a/src/admin/Cargo.toml +++ /dev/null @@ -1,94 +0,0 @@ -[package] -name = "conduwuit_admin" -categories.workspace = true -description.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version.workspace = true - -[lib] -path = "mod.rs" -crate-type = [ - "rlib", -# "dylib", -] - -[features] -brotli_compression = [ - "conduwuit-api/brotli_compression", - "conduwuit-core/brotli_compression", - "conduwuit-service/brotli_compression", -] -gzip_compression = [ - "conduwuit-api/gzip_compression", - "conduwuit-core/gzip_compression", - "conduwuit-service/gzip_compression", -] -io_uring = [ - "conduwuit-api/io_uring", - "conduwuit-database/io_uring", - "conduwuit-service/io_uring", -] -jemalloc = [ - "conduwuit-api/jemalloc", - "conduwuit-core/jemalloc", - "conduwuit-database/jemalloc", - "conduwuit-service/jemalloc", -] -jemalloc_conf = [ - "conduwuit-api/jemalloc_conf", - "conduwuit-core/jemalloc_conf", - "conduwuit-database/jemalloc_conf", - "conduwuit-service/jemalloc_conf", -] -jemalloc_prof = [ - "conduwuit-api/jemalloc_prof", - "conduwuit-core/jemalloc_prof", - "conduwuit-database/jemalloc_prof", - "conduwuit-service/jemalloc_prof", -] -jemalloc_stats = [ - "conduwuit-api/jemalloc_stats", - "conduwuit-core/jemalloc_stats", - "conduwuit-database/jemalloc_stats", - "conduwuit-service/jemalloc_stats", -] -release_max_log_level = [ - "conduwuit-api/release_max_log_level", - "conduwuit-core/release_max_log_level", - "conduwuit-database/release_max_log_level", - "conduwuit-service/release_max_log_level", - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", -] -zstd_compression = [ - "conduwuit-api/zstd_compression", - "conduwuit-core/zstd_compression", - "conduwuit-database/zstd_compression", - "conduwuit-service/zstd_compression", -] - -[dependencies] -clap.workspace = true -conduwuit-api.workspace = true -conduwuit-core.workspace = true -conduwuit-database.workspace = true -conduwuit-macros.workspace = true -conduwuit-service.workspace = true -const-str.workspace = true -futures.workspace = true -log.workspace = true -ruma.workspace = true -serde_json.workspace = true -serde_yaml.workspace = true -tokio.workspace = true -tracing-subscriber.workspace = true -tracing.workspace = true - -[lints] -workspace = true diff --git a/src/admin/admin.rs b/src/admin/admin.rs deleted file mode 100644 index 0d636c72..00000000 --- a/src/admin/admin.rs +++ /dev/null @@ -1,66 +0,0 @@ -use clap::Parser; -use conduwuit::Result; - -use crate::{ - appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context, - debug, debug::DebugCommand, federation, federation::FederationCommand, media, - media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server, - server::ServerCommand, user, user::UserCommand, -}; - -#[derive(Debug, Parser)] -#[command(name = "conduwuit", version = conduwuit::version())] -pub(super) enum AdminCommand { - #[command(subcommand)] - /// - Commands for managing appservices - Appservices(AppserviceCommand), - - #[command(subcommand)] - /// - Commands for managing local users - Users(UserCommand), - - #[command(subcommand)] - /// - Commands for managing rooms - Rooms(RoomCommand), - - #[command(subcommand)] - /// - Commands for managing federation - Federation(FederationCommand), - - #[command(subcommand)] - /// - Commands for managing the server - Server(ServerCommand), - - #[command(subcommand)] - /// - Commands for managing media - Media(MediaCommand), - - #[command(subcommand)] - /// - Commands for checking integrity - Check(CheckCommand), - - #[command(subcommand)] - /// - Commands for debugging things - Debug(DebugCommand), - - #[command(subcommand)] - /// - Low-level queries for database getters and iterators - Query(QueryCommand), -} - -#[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result { - use AdminCommand::*; - - match command { - | Appservices(command) => appservice::process(command, context).await, - | Media(command) => media::process(command, context).await, - | Users(command) => user::process(command, context).await, - | Rooms(command) => room::process(command, context).await, - | Federation(command) => federation::process(command, context).await, - | Server(command) => server::process(command, context).await, - | Debug(command) => debug::process(command, context).await, - | Query(command) => query::process(command, context).await, - | Check(command) => check::process(command, context).await, - } -} diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs deleted file mode 100644 index 3575e067..00000000 --- a/src/admin/appservice/commands.rs +++ /dev/null @@ -1,80 +0,0 @@ -use conduwuit::{Err, Result, checked}; -use futures::{FutureExt, StreamExt, TryFutureExt}; - -use crate::admin_command; - -#[admin_command] -pub(super) async fn register(&self) -> Result { - let body = &self.body; - let body_len = self.body.len(); - if body_len < 2 - || !body[0].trim().starts_with("```") - || body.last().unwrap_or(&"").trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details."); - } - - let range = 1..checked!(body_len - 1)?; - let appservice_config_body = body[range].join("\n"); - let parsed_config = serde_yaml::from_str(&appservice_config_body); - match parsed_config { - | Err(e) => return Err!("Could not parse appservice config as YAML: {e}"), - | Ok(registration) => match self - .services - .appservice - .register_appservice(®istration, &appservice_config_body) - .await - .map(|()| registration.id) - { - | Err(e) => return Err!("Failed to register appservice: {e}"), - | Ok(id) => write!(self, "Appservice registered with ID: {id}"), - }, - } - .await -} - -#[admin_command] -pub(super) async fn unregister(&self, appservice_identifier: String) -> Result { - match self - .services - .appservice - .unregister_appservice(&appservice_identifier) - .await - { - | Err(e) => return Err!("Failed to unregister appservice: {e}"), - | Ok(()) => write!(self, "Appservice unregistered."), - } - .await -} - -#[admin_command] -pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result { - match self - .services - .appservice - .get_registration(&appservice_identifier) - .await - { - | None => return Err!("Appservice does not exist."), - | Some(config) => { - let config_str = serde_yaml::to_string(&config)?; - write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```") - }, - } - .await -} - -#[admin_command] -pub(super) async fn list_registered(&self) -> Result { - self.services - .appservice - .iter_ids() - .collect() - .map(Ok) - .and_then(|appservices: Vec<_>| { - let len = appservices.len(); - let list = appservices.join(", "); - write!(self, "Appservices ({len}): {list}") - }) - .await -} diff --git a/src/admin/appservice/mod.rs b/src/admin/appservice/mod.rs deleted file mode 100644 index 2e0694aa..00000000 --- a/src/admin/appservice/mod.rs +++ /dev/null @@ -1,40 +0,0 @@ -mod commands; - -use clap::Subcommand; -use conduwuit::Result; - -use crate::admin_command_dispatch; - -#[derive(Debug, Subcommand)] -#[admin_command_dispatch] -pub(super) enum AppserviceCommand { - /// - Register an appservice using its registration YAML - /// - /// This command needs a YAML generated by an appservice (such as a bridge), - /// which must be provided in a Markdown code block below the command. - /// - /// Registering a new bridge using the ID of an existing bridge will replace - /// the old one. - Register, - - /// - Unregister an appservice using its ID - /// - /// You can find the ID using the `list-appservices` command. - Unregister { - /// The appservice to unregister - appservice_identifier: String, - }, - - /// - Show an appservice's config using its ID - /// - /// You can find the ID using the `list-appservices` command. - #[clap(alias("show"))] - ShowAppserviceConfig { - /// The appservice to show - appservice_identifier: String, - }, - - /// - List all the currently registered appservices - #[clap(alias("list"))] - ListRegistered, -} diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs deleted file mode 100644 index 1ffc3ae5..00000000 --- a/src/admin/check/commands.rs +++ /dev/null @@ -1,26 +0,0 @@ -use conduwuit::Result; -use conduwuit_macros::implement; -use futures::StreamExt; - -use crate::Context; - -/// Uses the iterator in `src/database/key_value/users.rs` to iterator over -/// every user in our database (remote and local). Reports total count, any -/// errors if there were any, etc -#[implement(Context, params = "<'_>")] -pub(super) async fn check_all_users(&self) -> Result { - let timer = tokio::time::Instant::now(); - let users = self.services.users.iter().collect::>().await; - let query_time = timer.elapsed(); - - let total = users.len(); - let err_count = users.iter().filter(|_user| false).count(); - let ok_count = users.iter().filter(|_user| true).count(); - - self.write_str(&format!( - "Database query completed in {query_time:?}:\n\n```\nTotal entries: \ - {total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \ - {ok_count:?}\n```" - )) - .await -} diff --git a/src/admin/check/mod.rs b/src/admin/check/mod.rs deleted file mode 100644 index 30b335c4..00000000 --- a/src/admin/check/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -mod commands; - -use clap::Subcommand; -use conduwuit::Result; - -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(super) enum CheckCommand { - CheckAllUsers, -} diff --git a/src/admin/context.rs b/src/admin/context.rs deleted file mode 100644 index 270537be..00000000 --- a/src/admin/context.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::{fmt, time::SystemTime}; - -use conduwuit::Result; -use conduwuit_service::Services; -use futures::{ - Future, FutureExt, TryFutureExt, - io::{AsyncWriteExt, BufWriter}, - lock::Mutex, -}; -use ruma::EventId; - -pub(crate) struct Context<'a> { - pub(crate) services: &'a Services, - pub(crate) body: &'a [&'a str], - pub(crate) timer: SystemTime, - pub(crate) reply_id: Option<&'a EventId>, - pub(crate) output: Mutex>>, -} - -impl Context<'_> { - pub(crate) fn write_fmt( - &self, - arguments: fmt::Arguments<'_>, - ) -> impl Future + Send + '_ + use<'_> { - let buf = format!("{arguments}"); - self.output.lock().then(async move |mut output| { - output.write_all(buf.as_bytes()).map_err(Into::into).await - }) - } - - pub(crate) fn write_str<'a>( - &'a self, - s: &'a str, - ) -> impl Future + Send + 'a { - self.output.lock().then(async move |mut output| { - output.write_all(s.as_bytes()).map_err(Into::into).await - }) - } -} diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs deleted file mode 100644 index d0debc2a..00000000 --- a/src/admin/debug/commands.rs +++ /dev/null @@ -1,908 +0,0 @@ -use std::{ - collections::HashMap, - fmt::Write, - iter::once, - time::{Instant, SystemTime}, -}; - -use conduwuit::{ - Err, Result, debug_error, err, info, - matrix::pdu::{PduEvent, PduId, RawPduId}, - trace, utils, - utils::{ - stream::{IterStream, ReadyExt}, - string::EMPTY, - }, - warn, -}; -use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, - api::federation::event::get_room_state, -}; -use service::rooms::{ - short::{ShortEventId, ShortRoomId}, - state_compressor::HashSetCompressStateEvent, -}; -use tracing_subscriber::EnvFilter; - -use crate::admin_command; - -#[admin_command] -pub(super) async fn echo(&self, message: Vec) -> Result { - let message = message.join(" "); - self.write_str(&message).await -} - -#[admin_command] -pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result { - let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { - return Err!("Event not found."); - }; - - let room_id_str = event - .get("room_id") - .and_then(CanonicalJsonValue::as_str) - .ok_or_else(|| err!(Database("Invalid event in database")))?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| err!(Database("Invalid room id field in event in database")))?; - - let start = Instant::now(); - let count = self - .services - .rooms - .auth_chain - .event_ids_iter(room_id, once(event_id.as_ref())) - .ready_filter_map(Result::ok) - .count() - .await; - - let elapsed = start.elapsed(); - let out = format!("Loaded auth chain with length {count} in {elapsed:?}"); - - self.write_str(&out).await -} - -#[admin_command] -pub(super) async fn parse_pdu(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&EMPTY).trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details."); - } - - let string = self.body[1..self.body.len().saturating_sub(1)].join("\n"); - match serde_json::from_str(&string) { - | Err(e) => return Err!("Invalid json in command body: {e}"), - | Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { - | Err(e) => return Err!("Could not parse PDU JSON: {e:?}"), - | Ok(hash) => { - let event_id = OwnedEventId::parse(format!("${hash}")); - match serde_json::from_value::(serde_json::to_value(value)?) { - | Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"), - | Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"), - } - }, - }, - } - .await -} - -#[admin_command] -pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { - let mut outlier = false; - let mut pdu_json = self - .services - .rooms - .timeline - .get_non_outlier_pdu_json(&event_id) - .await; - - if pdu_json.is_err() { - outlier = true; - pdu_json = self.services.rooms.timeline.get_pdu_json(&event_id).await; - } - - match pdu_json { - | Err(_) => return Err!("PDU not found locally."), - | Ok(json) => { - let text = serde_json::to_string_pretty(&json)?; - let msg = if outlier { - "Outlier (Rejected / Soft Failed) PDU found in our database" - } else { - "PDU found in our database" - }; - write!(self, "{msg}\n```json\n{text}\n```",) - }, - } - .await -} - -#[admin_command] -pub(super) async fn get_short_pdu( - &self, - shortroomid: ShortRoomId, - shorteventid: ShortEventId, -) -> Result { - let pdu_id: RawPduId = PduId { - shortroomid, - shorteventid: shorteventid.into(), - } - .into(); - - let pdu_json = self - .services - .rooms - .timeline - .get_pdu_json_from_id(&pdu_id) - .await; - - match pdu_json { - | Err(_) => return Err!("PDU not found locally."), - | Ok(json) => { - let json_text = serde_json::to_string_pretty(&json)?; - write!(self, "```json\n{json_text}\n```") - }, - } - .await -} - -#[admin_command] -pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result { - if !self.services.server.config.allow_federation { - return Err!("Federation is disabled on this homeserver.",); - } - - if server == self.services.globals.server_name() { - return Err!( - "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ - fetching local PDUs from the database.", - ); - } - - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&EMPTY).trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details.",); - } - - let list = self - .body - .iter() - .collect::>() - .drain(1..self.body.len().saturating_sub(1)) - .filter_map(|pdu| EventId::parse(pdu).ok()) - .collect::>(); - - let mut failed_count: usize = 0; - let mut success_count: usize = 0; - - for event_id in list { - if force { - match self - .get_remote_pdu(event_id.to_owned(), server.clone()) - .await - { - | Err(e) => { - failed_count = failed_count.saturating_add(1); - self.services - .admin - .send_text(&format!("Failed to get remote PDU, ignoring error: {e}")) - .await; - - warn!("Failed to get remote PDU, ignoring error: {e}"); - }, - | _ => { - success_count = success_count.saturating_add(1); - }, - } - } else { - self.get_remote_pdu(event_id.to_owned(), server.clone()) - .await?; - success_count = success_count.saturating_add(1); - } - } - - let out = - format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures"); - - self.write_str(&out).await -} - -#[admin_command] -pub(super) async fn get_remote_pdu( - &self, - event_id: OwnedEventId, - server: OwnedServerName, -) -> Result { - if !self.services.server.config.allow_federation { - return Err!("Federation is disabled on this homeserver."); - } - - if server == self.services.globals.server_name() { - return Err!( - "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ - fetching local PDUs.", - ); - } - - match self - .services - .sending - .send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request { - event_id: event_id.clone(), - include_unredacted_content: None, - }) - .await - { - | Err(e) => - return Err!( - "Remote server did not have PDU or failed sending request to remote server: {e}" - ), - | Ok(response) => { - let json: CanonicalJsonObject = - serde_json::from_str(response.pdu.get()).map_err(|e| { - warn!( - "Requested event ID {event_id} from server but failed to convert from \ - RawValue to CanonicalJsonObject (malformed event/response?): {e}" - ); - err!(Request(Unknown( - "Received response from server but failed to parse PDU" - ))) - })?; - - trace!("Attempting to parse PDU: {:?}", &response.pdu); - let _parsed_pdu = { - let parsed_result = self - .services - .rooms - .event_handler - .parse_incoming_pdu(&response.pdu) - .boxed() - .await; - - let (event_id, value, room_id) = match parsed_result { - | Ok(t) => t, - | Err(e) => { - warn!("Failed to parse PDU: {e}"); - info!("Full PDU: {:?}", &response.pdu); - return Err!("Failed to parse PDU remote server {server} sent us: {e}"); - }, - }; - - vec![(event_id, value, room_id)] - }; - - info!("Attempting to handle event ID {event_id} as backfilled PDU"); - self.services - .rooms - .timeline - .backfill_pdu(&server, response.pdu) - .await?; - - let text = serde_json::to_string_pretty(&json)?; - let msg = "Got PDU from specified server and handled as backfilled"; - write!(self, "{msg}. Event body:\n```json\n{text}\n```") - }, - } - .await -} - -#[admin_command] -pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { - let room_id = self.services.rooms.alias.resolve(&room).await?; - let room_state: Vec<_> = self - .services - .rooms - .state_accessor - .room_state_full_pdus(&room_id) - .map_ok(PduEvent::into_state_event) - .try_collect() - .await?; - - if room_state.is_empty() { - return Err!("Unable to find room state in our database (vector is empty)",); - } - - let json = serde_json::to_string_pretty(&room_state).map_err(|e| { - err!(Database( - "Failed to convert room state events to pretty JSON, possible invalid room state \ - events in our database {e}", - )) - })?; - - let out = format!("```json\n{json}\n```"); - self.write_str(&out).await -} - -#[admin_command] -pub(super) async fn ping(&self, server: OwnedServerName) -> Result { - if server == self.services.globals.server_name() { - return Err!("Not allowed to send federation requests to ourselves."); - } - - let timer = tokio::time::Instant::now(); - - match self - .services - .sending - .send_federation_request( - &server, - ruma::api::federation::discovery::get_server_version::v1::Request {}, - ) - .await - { - | Err(e) => { - return Err!("Failed sending federation request to specified server:\n\n{e}"); - }, - | Ok(response) => { - let ping_time = timer.elapsed(); - let json_text_res = serde_json::to_string_pretty(&response.server); - - let out = if let Ok(json) = json_text_res { - format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```") - } else { - format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}") - }; - - write!(self, "{out}") - }, - } - .await -} - -#[admin_command] -pub(super) async fn force_device_list_updates(&self) -> Result { - // Force E2EE device list updates for all users - self.services - .users - .stream() - .for_each(|user_id| self.services.users.mark_device_key_update(user_id)) - .await; - - write!(self, "Marked all devices for all users as having new keys to update").await -} - -#[admin_command] -pub(super) async fn change_log_level(&self, filter: Option, reset: bool) -> Result { - let handles = &["console"]; - - if reset { - let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) { - | Ok(s) => s, - | Err(e) => return Err!("Log level from config appears to be invalid now: {e}"), - }; - - match self - .services - .server - .log - .reload - .reload(&old_filter_layer, Some(handles)) - { - | Err(e) => - return Err!("Failed to modify and reload the global tracing log level: {e}"), - | Ok(()) => { - let value = &self.services.server.config.log; - let out = format!("Successfully changed log level back to config value {value}"); - return self.write_str(&out).await; - }, - } - } - - if let Some(filter) = filter { - let new_filter_layer = match EnvFilter::try_new(filter) { - | Ok(s) => s, - | Err(e) => return Err!("Invalid log level filter specified: {e}"), - }; - - match self - .services - .server - .log - .reload - .reload(&new_filter_layer, Some(handles)) - { - | Ok(()) => return self.write_str("Successfully changed log level").await, - | Err(e) => - return Err!("Failed to modify and reload the global tracing log level: {e}"), - } - } - - Err!("No log level was specified.") -} - -#[admin_command] -pub(super) async fn sign_json(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details."); - } - - let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - match serde_json::from_str(&string) { - | Err(e) => return Err!("Invalid json: {e}"), - | Ok(mut value) => { - self.services.server_keys.sign_json(&mut value)?; - let json_text = serde_json::to_string_pretty(&value)?; - write!(self, "{json_text}") - }, - } - .await -} - -#[admin_command] -pub(super) async fn verify_json(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details."); - } - - let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - match serde_json::from_str::(&string) { - | Err(e) => return Err!("Invalid json: {e}"), - | Ok(value) => match self.services.server_keys.verify_json(&value, None).await { - | Err(e) => return Err!("Signature verification failed: {e}"), - | Ok(()) => write!(self, "Signature correct"), - }, - } - .await -} - -#[admin_command] -pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { - use ruma::signatures::Verified; - - let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; - - event.remove("event_id"); - let msg = match self.services.server_keys.verify_event(&event, None).await { - | Err(e) => return Err(e), - | Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).", - | Ok(Verified::All) => "signatures and hashes OK.", - }; - - self.write_str(msg).await -} - -#[admin_command] -#[tracing::instrument(skip(self))] -pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { - if !self - .services - .rooms - .state_cache - .server_in_room(&self.services.server.name, &room_id) - .await - { - return Err!("We are not participating in the room / we don't know about the room ID.",); - } - - let first_pdu = self - .services - .rooms - .timeline - .first_pdu_in_room(&room_id) - .await - .map_err(|_| err!(Database("Failed to find the first PDU in database")))?; - - let out = format!("{first_pdu:?}"); - self.write_str(&out).await -} - -#[admin_command] -#[tracing::instrument(skip(self))] -pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { - if !self - .services - .rooms - .state_cache - .server_in_room(&self.services.server.name, &room_id) - .await - { - return Err!("We are not participating in the room / we don't know about the room ID."); - } - - let latest_pdu = self - .services - .rooms - .timeline - .latest_pdu_in_room(&room_id) - .await - .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; - - let out = format!("{latest_pdu:?}"); - self.write_str(&out).await -} - -#[admin_command] -#[tracing::instrument(skip(self))] -pub(super) async fn force_set_room_state_from_server( - &self, - room_id: OwnedRoomId, - server_name: OwnedServerName, -) -> Result { - if !self - .services - .rooms - .state_cache - .server_in_room(&self.services.server.name, &room_id) - .await - { - return Err!("We are not participating in the room / we don't know about the room ID."); - } - - let first_pdu = self - .services - .rooms - .timeline - .latest_pdu_in_room(&room_id) - .await - .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; - - let room_version = self.services.rooms.state.get_room_version(&room_id).await?; - - let mut state: HashMap = HashMap::new(); - - let remote_state_response = self - .services - .sending - .send_federation_request(&server_name, get_room_state::v1::Request { - room_id: room_id.clone(), - event_id: first_pdu.event_id.clone(), - }) - .await?; - - for pdu in remote_state_response.pdus.clone() { - match self - .services - .rooms - .event_handler - .parse_incoming_pdu(&pdu) - .await - { - | Ok(t) => t, - | Err(e) => { - warn!("Could not parse PDU, ignoring: {e}"); - continue; - }, - }; - } - - info!("Going through room_state response PDUs"); - for result in remote_state_response.pdus.iter().map(|pdu| { - self.services - .server_keys - .validate_and_add_event_id(pdu, &room_version) - }) { - let Ok((event_id, value)) = result.await else { - continue; - }; - - let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { - debug_error!("Invalid PDU in fetching remote room state PDUs response: {value:#?}"); - err!(BadServerResponse(debug_error!("Invalid PDU in send_join response: {e:?}"))) - })?; - - self.services - .rooms - .outlier - .add_pdu_outlier(&event_id, &value); - - if let Some(state_key) = &pdu.state_key { - let shortstatekey = self - .services - .rooms - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) - .await; - - state.insert(shortstatekey, pdu.event_id.clone()); - } - } - - info!("Going through auth_chain response"); - for result in remote_state_response.auth_chain.iter().map(|pdu| { - self.services - .server_keys - .validate_and_add_event_id(pdu, &room_version) - }) { - let Ok((event_id, value)) = result.await else { - continue; - }; - - self.services - .rooms - .outlier - .add_pdu_outlier(&event_id, &value); - } - - let new_room_state = self - .services - .rooms - .event_handler - .resolve_state(&room_id, &room_version, state) - .await?; - - info!("Forcing new room state"); - let HashSetCompressStateEvent { - shortstatehash: short_state_hash, - added, - removed, - } = self - .services - .rooms - .state_compressor - .save_state(room_id.clone().as_ref(), new_room_state) - .await?; - - let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await; - - self.services - .rooms - .state - .force_state(room_id.clone().as_ref(), short_state_hash, added, removed, &state_lock) - .await?; - - info!( - "Updating joined counts for room just in case (e.g. we may have found a difference in \ - the room's m.room.member state" - ); - self.services - .rooms - .state_cache - .update_joined_count(&room_id) - .await; - - self.write_str("Successfully forced the room state from the requested remote server.") - .await -} - -#[admin_command] -pub(super) async fn get_signing_keys( - &self, - server_name: Option, - notary: Option, - query: bool, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); - - if let Some(notary) = notary { - let signing_keys = self - .services - .server_keys - .notary_request(¬ary, &server_name) - .await?; - - let out = format!("```rs\n{signing_keys:#?}\n```"); - return self.write_str(&out).await; - } - - let signing_keys = if query { - self.services - .server_keys - .server_request(&server_name) - .await? - } else { - self.services - .server_keys - .signing_keys_for(&server_name) - .await? - }; - - let out = format!("```rs\n{signing_keys:#?}\n```"); - self.write_str(&out).await -} - -#[admin_command] -pub(super) async fn get_verify_keys(&self, server_name: Option) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); - - let keys = self - .services - .server_keys - .verify_keys_for(&server_name) - .await; - - let mut out = String::new(); - writeln!(out, "| Key ID | Public Key |")?; - writeln!(out, "| --- | --- |")?; - for (key_id, key) in keys { - writeln!(out, "| {key_id} | {key:?} |")?; - } - - self.write_str(&out).await -} - -#[admin_command] -pub(super) async fn resolve_true_destination( - &self, - server_name: OwnedServerName, - no_cache: bool, -) -> Result { - if !self.services.server.config.allow_federation { - return Err!("Federation is disabled on this homeserver.",); - } - - if server_name == self.services.server.name { - return Err!( - "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ - fetching local PDUs.", - ); - } - - let actual = self - .services - .resolver - .resolve_actual_dest(&server_name, !no_cache) - .await?; - - let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host); - self.write_str(&msg).await -} - -#[admin_command] -pub(super) async fn memory_stats(&self, opts: Option) -> Result { - const OPTS: &str = "abcdefghijklmnopqrstuvwxyz"; - - let opts: String = OPTS - .chars() - .filter(|&c| { - let allow_any = opts.as_ref().is_some_and(|opts| opts == "*"); - - let allow = allow_any || opts.as_ref().is_some_and(|opts| opts.contains(c)); - - !allow - }) - .collect(); - - let stats = conduwuit::alloc::memory_stats(&opts).unwrap_or_default(); - - self.write_str("```\n").await?; - self.write_str(&stats).await?; - self.write_str("\n```").await?; - Ok(()) -} - -#[cfg(tokio_unstable)] -#[admin_command] -pub(super) async fn runtime_metrics(&self) -> Result { - let out = self.services.server.metrics.runtime_metrics().map_or_else( - || "Runtime metrics are not available.".to_owned(), - |metrics| { - format!( - "```rs\nnum_workers: {}\nnum_alive_tasks: {}\nglobal_queue_depth: {}\n```", - metrics.num_workers(), - metrics.num_alive_tasks(), - metrics.global_queue_depth() - ) - }, - ); - - self.write_str(&out).await -} - -#[cfg(not(tokio_unstable))] -#[admin_command] -pub(super) async fn runtime_metrics(&self) -> Result { - self.write_str("Runtime metrics require building with `tokio_unstable`.") - .await -} - -#[cfg(tokio_unstable)] -#[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { - let out = self.services.server.metrics.runtime_interval().map_or_else( - || "Runtime metrics are not available.".to_owned(), - |metrics| format!("```rs\n{metrics:#?}\n```"), - ); - - self.write_str(&out).await -} - -#[cfg(not(tokio_unstable))] -#[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { - self.write_str("Runtime metrics require building with `tokio_unstable`.") - .await -} - -#[admin_command] -pub(super) async fn time(&self) -> Result { - let now = SystemTime::now(); - let now = utils::time::format(now, "%+"); - - self.write_str(&now).await -} - -#[admin_command] -pub(super) async fn list_dependencies(&self, names: bool) -> Result { - if names { - let out = info::cargo::dependencies_names().join(" "); - return self.write_str(&out).await; - } - - let mut out = String::new(); - let deps = info::cargo::dependencies(); - writeln!(out, "| name | version | features |")?; - writeln!(out, "| ---- | ------- | -------- |")?; - for (name, dep) in deps { - let version = dep.try_req().unwrap_or("*"); - let feats = dep.req_features(); - let feats = if !feats.is_empty() { - feats.join(" ") - } else { - String::new() - }; - - writeln!(out, "| {name} | {version} | {feats} |")?; - } - - self.write_str(&out).await -} - -#[admin_command] -pub(super) async fn database_stats( - &self, - property: Option, - map: Option, -) -> Result { - let map_name = map.as_ref().map_or(EMPTY, String::as_str); - let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); - self.services - .db - .iter() - .filter(|&(&name, _)| map_name.is_empty() || map_name == name) - .try_stream() - .try_for_each(|(&name, map)| { - let res = map.property(&property).expect("invalid property"); - writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) - }) - .await -} - -#[admin_command] -pub(super) async fn database_files(&self, map: Option, level: Option) -> Result { - let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; - - files.sort_by_key(|f| f.name.clone()); - - writeln!(self, "| lev | sst | keys | dels | size | column |").await?; - writeln!(self, "| ---: | :--- | ---: | ---: | ---: | :--- |").await?; - files - .into_iter() - .filter(|file| { - map.as_deref() - .is_none_or(|map| map == file.column_family_name) - }) - .filter(|file| level.as_ref().is_none_or(|&level| level == file.level)) - .try_stream() - .try_for_each(|file| { - writeln!( - self, - "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", - file.level, - file.name, - file.num_entries, - file.num_deletions, - file.size, - file.column_family_name, - ) - }) - .await -} - -#[admin_command] -pub(super) async fn trim_memory(&self) -> Result { - conduwuit::alloc::trim(None)?; - - writeln!(self, "done").await -} diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs deleted file mode 100644 index 9b86f18c..00000000 --- a/src/admin/debug/mod.rs +++ /dev/null @@ -1,242 +0,0 @@ -mod commands; -pub(crate) mod tester; - -use clap::Subcommand; -use conduwuit::Result; -use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName}; -use service::rooms::short::{ShortEventId, ShortRoomId}; - -use self::tester::TesterCommand; -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(super) enum DebugCommand { - /// - Echo input of admin command - Echo { - message: Vec, - }, - - /// - Get the auth_chain of a PDU - GetAuthChain { - /// An event ID (the $ character followed by the base64 reference hash) - event_id: OwnedEventId, - }, - - /// - Parse and print a PDU from a JSON - /// - /// The PDU event is only checked for validity and is not added to the - /// database. - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - ParsePdu, - - /// - Retrieve and print a PDU by EventID from the conduwuit database - GetPdu { - /// An event ID (a $ followed by the base64 reference hash) - event_id: OwnedEventId, - }, - - /// - Retrieve and print a PDU by PduId from the conduwuit database - GetShortPdu { - /// Shortroomid integer - shortroomid: ShortRoomId, - - /// Shorteventid integer - shorteventid: ShortEventId, - }, - - /// - Attempts to retrieve a PDU from a remote server. Inserts it into our - /// database/timeline if found and we do not have this PDU already - /// (following normal event auth rules, handles it as an incoming PDU). - GetRemotePdu { - /// An event ID (a $ followed by the base64 reference hash) - event_id: OwnedEventId, - - /// Argument for us to attempt to fetch the event from the - /// specified remote server. - server: OwnedServerName, - }, - - /// - Same as `get-remote-pdu` but accepts a codeblock newline delimited - /// list of PDUs and a single server to fetch from - GetRemotePduList { - /// Argument for us to attempt to fetch all the events from the - /// specified remote server. - server: OwnedServerName, - - /// If set, ignores errors, else stops at the first error/failure. - #[arg(short, long)] - force: bool, - }, - - /// - Gets all the room state events for the specified room. - /// - /// This is functionally equivalent to `GET - /// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does - /// *not* check if the sender user is allowed to see state events. This is - /// done because it's implied that server admins here have database access - /// and can see/get room info themselves anyways if they were malicious - /// admins. - /// - /// Of course the check is still done on the actual client API. - GetRoomState { - /// Room ID - room_id: OwnedRoomOrAliasId, - }, - - /// - Get and display signing keys from local cache or remote server. - GetSigningKeys { - server_name: Option, - - #[arg(long)] - notary: Option, - - #[arg(short, long)] - query: bool, - }, - - /// - Get and display signing keys from local cache or remote server. - GetVerifyKeys { - server_name: Option, - }, - - /// - Sends a federation request to the remote server's - /// `/_matrix/federation/v1/version` endpoint and measures the latency it - /// took for the server to respond - Ping { - server: OwnedServerName, - }, - - /// - Forces device lists for all local and remote users to be updated (as - /// having new keys available) - ForceDeviceListUpdates, - - /// - Change tracing log level/filter on the fly - /// - /// This accepts the same format as the `log` config option. - ChangeLogLevel { - /// Log level/filter - filter: Option, - - /// Resets the log level/filter to the one in your config - #[arg(short, long)] - reset: bool, - }, - - /// - Verify json signatures - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - SignJson, - - /// - Verify json signatures - /// - /// This command needs a JSON blob provided in a Markdown code block below - /// the command. - VerifyJson, - - /// - Verify PDU - /// - /// This re-verifies a PDU existing in the database found by ID. - VerifyPdu { - event_id: OwnedEventId, - }, - - /// - Prints the very first PDU in the specified room (typically - /// m.room.create) - FirstPduInRoom { - /// The room ID - room_id: OwnedRoomId, - }, - - /// - Prints the latest ("last") PDU in the specified room (typically a - /// message) - LatestPduInRoom { - /// The room ID - room_id: OwnedRoomId, - }, - - /// - Forcefully replaces the room state of our local copy of the specified - /// room, with the copy (auth chain and room state events) the specified - /// remote server says. - /// - /// A common desire for room deletion is to simply "reset" our copy of the - /// room. While this admin command is not a replacement for that, if you - /// know you have split/broken room state and you know another server in the - /// room that has the best/working room state, this command can let you use - /// their room state. Such example is your server saying users are in a - /// room, but other servers are saying they're not in the room in question. - /// - /// This command will get the latest PDU in the room we know about, and - /// request the room state at that point in time via - /// `/_matrix/federation/v1/state/{roomId}`. - ForceSetRoomStateFromServer { - /// The impacted room ID - room_id: OwnedRoomId, - /// The server we will use to query the room state for - server_name: OwnedServerName, - }, - - /// - Runs a server name through conduwuit's true destination resolution - /// process - /// - /// Useful for debugging well-known issues - ResolveTrueDestination { - server_name: OwnedServerName, - - #[arg(short, long)] - no_cache: bool, - }, - - /// - Print extended memory usage - /// - /// Optional argument is a character mask (a sequence of characters in any - /// order) which enable additional extended statistics. Known characters are - /// "abdeglmx". For convenience, a '*' will enable everything. - MemoryStats { - opts: Option, - }, - - /// - Print general tokio runtime metric totals. - RuntimeMetrics, - - /// - Print detailed tokio runtime metrics accumulated since last command - /// invocation. - RuntimeInterval, - - /// - Print the current time - Time, - - /// - List dependencies - ListDependencies { - #[arg(short, long)] - names: bool, - }, - - /// - Get database statistics - DatabaseStats { - property: Option, - - #[arg(short, long, alias("column"))] - map: Option, - }, - - /// - Trim memory usage - TrimMemory, - - /// - List database files - DatabaseFiles { - map: Option, - - #[arg(long)] - level: Option, - }, - - /// - Developer test stubs - #[command(subcommand)] - #[allow(non_snake_case)] - #[clap(hide(true))] - Tester(TesterCommand), -} diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs deleted file mode 100644 index 0a2b1516..00000000 --- a/src/admin/debug/tester.rs +++ /dev/null @@ -1,52 +0,0 @@ -use conduwuit::{Err, Result}; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, clap::Subcommand)] -pub(crate) enum TesterCommand { - Panic, - Failure, - Tester, - Timer, -} - -#[rustfmt::skip] -#[admin_command] -async fn panic(&self) -> Result { - - panic!("panicked") -} - -#[rustfmt::skip] -#[admin_command] -async fn failure(&self) -> Result { - - Err!("failed") -} - -#[inline(never)] -#[rustfmt::skip] -#[admin_command] -async fn tester(&self) -> Result { - - self.write_str("Ok").await -} - -#[inline(never)] -#[rustfmt::skip] -#[admin_command] -async fn timer(&self) -> Result { - let started = std::time::Instant::now(); - timed(self.body); - - let elapsed = started.elapsed(); - self.write_str(&format!("completed in {elapsed:#?}")).await -} - -#[inline(never)] -#[rustfmt::skip] -#[allow(unused_variables)] -fn timed(body: &[&str]) { - -} diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs deleted file mode 100644 index 545dcbca..00000000 --- a/src/admin/federation/commands.rs +++ /dev/null @@ -1,122 +0,0 @@ -use std::fmt::Write; - -use conduwuit::{Err, Result}; -use futures::StreamExt; -use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; - -use crate::{admin_command, get_room_info}; - -#[admin_command] -pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { - self.services.rooms.metadata.disable_room(&room_id, true); - self.write_str("Room disabled.").await -} - -#[admin_command] -pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { - self.services.rooms.metadata.disable_room(&room_id, false); - self.write_str("Room enabled.").await -} - -#[admin_command] -pub(super) async fn incoming_federation(&self) -> Result { - let msg = { - let map = self - .services - .rooms - .event_handler - .federation_handletime - .read() - .expect("locked"); - - let mut msg = format!("Handling {} incoming pdus:\n", map.len()); - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; - } - - msg - }; - - self.write_str(&msg).await -} - -#[admin_command] -pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result { - let response = self - .services - .client - .default - .get(format!("https://{server_name}/.well-known/matrix/support")) - .send() - .await?; - - let text = response.text().await?; - - if text.is_empty() { - return Err!("Response text/body is empty."); - } - - if text.len() > 1500 { - return Err!( - "Response text/body is over 1500 characters, assuming no support well-known.", - ); - } - - let json: serde_json::Value = match serde_json::from_str(&text) { - | Ok(json) => json, - | Err(_) => { - return Err!("Response text/body is not valid JSON.",); - }, - }; - - let pretty_json: String = match serde_json::to_string_pretty(&json) { - | Ok(json) => json, - | Err(_) => { - return Err!("Response text/body is not valid JSON.",); - }, - }; - - self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```")) - .await -} - -#[admin_command] -pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result { - if user_id.server_name() == self.services.server.name { - return Err!( - "User belongs to our server, please use `list-joined-rooms` user admin command \ - instead.", - ); - } - - if !self.services.users.exists(&user_id).await { - return Err!("Remote user does not exist in our database.",); - } - - let mut rooms: Vec<(OwnedRoomId, u64, String)> = self - .services - .rooms - .state_cache - .rooms_joined(&user_id) - .then(|room_id| get_room_info(self.services, room_id)) - .collect() - .await; - - if rooms.is_empty() { - return Err!("User is not in any rooms."); - } - - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let num = rooms.len(); - let body = rooms - .iter() - .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) - .collect::>() - .join("\n"); - - self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",)) - .await -} diff --git a/src/admin/federation/mod.rs b/src/admin/federation/mod.rs deleted file mode 100644 index 2c539adc..00000000 --- a/src/admin/federation/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -mod commands; - -use clap::Subcommand; -use conduwuit::Result; -use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; - -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(super) enum FederationCommand { - /// - List all rooms we are currently handling an incoming pdu from - IncomingFederation, - - /// - Disables incoming federation handling for a room. - DisableRoom { - room_id: OwnedRoomId, - }, - - /// - Enables incoming federation handling for a room again. - EnableRoom { - room_id: OwnedRoomId, - }, - - /// - Fetch `/.well-known/matrix/support` from the specified server - /// - /// Despite the name, this is not a federation endpoint and does not go - /// through the federation / server resolution process as per-spec this is - /// supposed to be served at the server_name. - /// - /// Respecting homeservers put this file here for listing administration, - /// moderation, and security inquiries. This command provides a way to - /// easily fetch that information. - FetchSupportWellKnown { - server_name: OwnedServerName, - }, - - /// - Lists all the rooms we share/track with the specified *remote* user - RemoteUserInRooms { - user_id: OwnedUserId, - }, -} diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs deleted file mode 100644 index 7aed28db..00000000 --- a/src/admin/media/commands.rs +++ /dev/null @@ -1,376 +0,0 @@ -use std::time::Duration; - -use conduwuit::{ - Err, Result, debug, debug_info, debug_warn, error, info, trace, - utils::time::parse_timepoint_ago, warn, -}; -use conduwuit_service::media::Dim; -use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName}; - -use crate::{admin_command, utils::parse_local_user_id}; - -#[admin_command] -pub(super) async fn delete( - &self, - mxc: Option, - event_id: Option, -) -> Result { - if event_id.is_some() && mxc.is_some() { - return Err!("Please specify either an MXC or an event ID, not both.",); - } - - if let Some(mxc) = mxc { - trace!("Got MXC URL: {mxc}"); - self.services - .media - .delete(&mxc.as_str().try_into()?) - .await?; - - return Err!("Deleted the MXC from our database and on our filesystem.",); - } - - if let Some(event_id) = event_id { - trace!("Got event ID to delete media from: {event_id}"); - - let mut mxc_urls = Vec::with_capacity(4); - - // parsing the PDU for any MXC URLs begins here - match self.services.rooms.timeline.get_pdu_json(&event_id).await { - | Ok(event_json) => { - if let Some(content_key) = event_json.get("content") { - debug!("Event ID has \"content\"."); - let content_obj = content_key.as_object(); - - if let Some(content) = content_obj { - // 1. attempts to parse the "url" key - debug!("Attempting to go into \"url\" key for main media file"); - if let Some(url) = content.get("url") { - debug!("Got a URL in the event ID {event_id}: {url}"); - - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - info!( - "Found a URL in the event ID {event_id} but did not start \ - with mxc://, ignoring" - ); - } - } - - // 2. attempts to parse the "info" key - debug!("Attempting to go into \"info\" key for thumbnails"); - if let Some(info_key) = content.get("info") { - debug!("Event ID has \"info\"."); - let info_obj = info_key.as_object(); - - if let Some(info) = info_obj { - if let Some(thumbnail_url) = info.get("thumbnail_url") { - debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - - if thumbnail_url.to_string().starts_with("\"mxc://") { - debug!( - "Pushing thumbnail URL {thumbnail_url} to list of \ - MXCs to delete" - ); - let final_thumbnail_url = - thumbnail_url.to_string().replace('"', ""); - mxc_urls.push(final_thumbnail_url); - } else { - info!( - "Found a thumbnail URL in the event ID {event_id} \ - but did not start with mxc://, ignoring" - ); - } - } else { - info!( - "No \"thumbnail_url\" key in \"info\" key, assuming no \ - thumbnails." - ); - } - } - } - - // 3. attempts to parse the "file" key - debug!("Attempting to go into \"file\" key"); - if let Some(file_key) = content.get("file") { - debug!("Event ID has \"file\"."); - let file_obj = file_key.as_object(); - - if let Some(file) = file_obj { - if let Some(url) = file.get("url") { - debug!("Found url in file key: {url}"); - - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - warn!( - "Found a URL in the event ID {event_id} but did not \ - start with mxc://, ignoring" - ); - } - } else { - error!("No \"url\" key in \"file\" key."); - } - } - } - } else { - return Err!( - "Event ID does not have a \"content\" key or failed parsing the \ - event ID JSON.", - ); - } - } else { - return Err!( - "Event ID does not have a \"content\" key, this is not a message or an \ - event type that contains media.", - ); - } - }, - | _ => { - return Err!("Event ID does not exist or is not known to us.",); - }, - } - - if mxc_urls.is_empty() { - return Err!("Parsed event ID but found no MXC URLs.",); - } - - let mut mxc_deletion_count: usize = 0; - - for mxc_url in mxc_urls { - match self - .services - .media - .delete(&mxc_url.as_str().try_into()?) - .await - { - | Ok(()) => { - debug_info!("Successfully deleted {mxc_url} from filesystem and database"); - mxc_deletion_count = mxc_deletion_count.saturating_add(1); - }, - | Err(e) => { - debug_warn!("Failed to delete {mxc_url}, ignoring error and skipping: {e}"); - continue; - }, - } - } - - return self - .write_str(&format!( - "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \ - from event ID {event_id}." - )) - .await; - } - - Err!( - "Please specify either an MXC using --mxc or an event ID using --event-id of the \ - message containing an image. See --help for details." - ) -} - -#[admin_command] -pub(super) async fn delete_list(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details.",); - } - - let mut failed_parsed_mxcs: usize = 0; - - let mxc_list = self - .body - .to_vec() - .drain(1..self.body.len().checked_sub(1).unwrap()) - .filter_map(|mxc_s| { - mxc_s - .try_into() - .inspect_err(|e| { - debug_warn!("Failed to parse user-provided MXC URI: {e}"); - failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1); - }) - .ok() - }) - .collect::>>(); - - let mut mxc_deletion_count: usize = 0; - - for mxc in &mxc_list { - trace!(%failed_parsed_mxcs, %mxc_deletion_count, "Deleting MXC {mxc} in bulk"); - match self.services.media.delete(mxc).await { - | Ok(()) => { - debug_info!("Successfully deleted {mxc} from filesystem and database"); - mxc_deletion_count = mxc_deletion_count.saturating_add(1); - }, - | Err(e) => { - debug_warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); - continue; - }, - } - } - - self.write_str(&format!( - "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \ - and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.", - )) - .await -} - -#[admin_command] -pub(super) async fn delete_past_remote_media( - &self, - duration: String, - before: bool, - after: bool, - yes_i_want_to_delete_local_media: bool, -) -> Result { - if before && after { - return Err!("Please only pick one argument, --before or --after.",); - } - assert!(!(before && after), "--before and --after should not be specified together"); - - let duration = parse_timepoint_ago(&duration)?; - let deleted_count = self - .services - .media - .delete_all_remote_media_at_after_time( - duration, - before, - after, - yes_i_want_to_delete_local_media, - ) - .await?; - - self.write_str(&format!("Deleted {deleted_count} total files.",)) - .await -} - -#[admin_command] -pub(super) async fn delete_all_from_user(&self, username: String) -> Result { - let user_id = parse_local_user_id(self.services, &username)?; - - let deleted_count = self.services.media.delete_from_user(&user_id).await?; - - self.write_str(&format!("Deleted {deleted_count} total files.",)) - .await -} - -#[admin_command] -pub(super) async fn delete_all_from_server( - &self, - server_name: OwnedServerName, - yes_i_want_to_delete_local_media: bool, -) -> Result { - if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { - return Err!("This command only works for remote media by default.",); - } - - let Ok(all_mxcs) = self - .services - .media - .get_all_mxcs() - .await - .inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}")) - else { - return Err!("Failed to get MXC URIs from our database",); - }; - - let mut deleted_count: usize = 0; - - for mxc in all_mxcs { - let Ok(mxc_server_name) = mxc.server_name().inspect_err(|e| { - debug_warn!( - "Failed to parse MXC {mxc} server name from database, ignoring error and \ - skipping: {e}" - ); - }) else { - continue; - }; - - if mxc_server_name != server_name - || (self.services.globals.server_is_ours(mxc_server_name) - && !yes_i_want_to_delete_local_media) - { - trace!("skipping MXC URI {mxc}"); - continue; - } - - let mxc: Mxc<'_> = mxc.as_str().try_into()?; - - match self.services.media.delete(&mxc).await { - | Ok(()) => { - deleted_count = deleted_count.saturating_add(1); - }, - | Err(e) => { - debug_warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); - continue; - }, - } - } - - self.write_str(&format!("Deleted {deleted_count} total files.",)) - .await -} - -#[admin_command] -pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { - let mxc: Mxc<'_> = mxc.as_str().try_into()?; - let metadata = self.services.media.get_metadata(&mxc).await; - - self.write_str(&format!("```\n{metadata:#?}\n```")).await -} - -#[admin_command] -pub(super) async fn get_remote_file( - &self, - mxc: OwnedMxcUri, - server: Option, - timeout: u32, -) -> Result { - let mxc: Mxc<'_> = mxc.as_str().try_into()?; - let timeout = Duration::from_millis(timeout.into()); - let mut result = self - .services - .media - .fetch_remote_content(&mxc, None, server.as_deref(), timeout) - .await?; - - // Grab the length of the content before clearing it to not flood the output - let len = result.content.as_ref().expect("content").len(); - result.content.as_mut().expect("content").clear(); - - self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) - .await -} - -#[admin_command] -pub(super) async fn get_remote_thumbnail( - &self, - mxc: OwnedMxcUri, - server: Option, - timeout: u32, - width: u32, - height: u32, -) -> Result { - let mxc: Mxc<'_> = mxc.as_str().try_into()?; - let timeout = Duration::from_millis(timeout.into()); - let dim = Dim::new(width, height, None); - let mut result = self - .services - .media - .fetch_remote_thumbnail(&mxc, None, server.as_deref(), timeout, &dim) - .await?; - - // Grab the length of the content before clearing it to not flood the output - let len = result.content.as_ref().expect("content").len(); - result.content.as_mut().expect("content").clear(); - - self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) - .await -} diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs deleted file mode 100644 index d1e6cd3a..00000000 --- a/src/admin/media/mod.rs +++ /dev/null @@ -1,99 +0,0 @@ -#![allow(rustdoc::broken_intra_doc_links)] -mod commands; - -use clap::Subcommand; -use conduwuit::Result; -use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName}; - -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(super) enum MediaCommand { - /// - Deletes a single media file from our database and on the filesystem - /// via a single MXC URL or event ID (not redacted) - Delete { - /// The MXC URL to delete - #[arg(long)] - mxc: Option, - - /// - The message event ID which contains the media and thumbnail MXC - /// URLs - #[arg(long)] - event_id: Option, - }, - - /// - Deletes a codeblock list of MXC URLs from our database and on the - /// filesystem. This will always ignore errors. - DeleteList, - - /// - Deletes all remote (and optionally local) media created before or - /// after [duration] time using filesystem metadata first created at date, - /// or fallback to last modified date. This will always ignore errors by - /// default. - DeletePastRemoteMedia { - /// - The relative time (e.g. 30s, 5m, 7d) within which to search - duration: String, - - /// - Only delete media created before [duration] ago - #[arg(long, short)] - before: bool, - - /// - Only delete media created after [duration] ago - #[arg(long, short)] - after: bool, - - /// - Long argument to additionally delete local media - #[arg(long)] - yes_i_want_to_delete_local_media: bool, - }, - - /// - Deletes all the local media from a local user on our server. This will - /// always ignore errors by default. - DeleteAllFromUser { - username: String, - }, - - /// - Deletes all remote media from the specified remote server. This will - /// always ignore errors by default. - DeleteAllFromServer { - server_name: OwnedServerName, - - /// Long argument to delete local media - #[arg(long)] - yes_i_want_to_delete_local_media: bool, - }, - - GetFileInfo { - /// The MXC URL to lookup info for. - mxc: OwnedMxcUri, - }, - - GetRemoteFile { - /// The MXC URL to fetch - mxc: OwnedMxcUri, - - #[arg(short, long)] - server: Option, - - #[arg(short, long, default_value("10000"))] - timeout: u32, - }, - - GetRemoteThumbnail { - /// The MXC URL to fetch - mxc: OwnedMxcUri, - - #[arg(short, long)] - server: Option, - - #[arg(short, long, default_value("10000"))] - timeout: u32, - - #[arg(short, long, default_value("800"))] - width: u32, - - #[arg(short, long, default_value("800"))] - height: u32, - }, -} diff --git a/src/admin/mod.rs b/src/admin/mod.rs deleted file mode 100644 index 1f777fa9..00000000 --- a/src/admin/mod.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![recursion_limit = "192"] -#![allow(clippy::wildcard_imports)] -#![allow(clippy::enum_glob_use)] -#![allow(clippy::too_many_arguments)] - -pub(crate) mod admin; -pub(crate) mod context; -pub(crate) mod processor; -mod tests; -pub(crate) mod utils; - -pub(crate) mod appservice; -pub(crate) mod check; -pub(crate) mod debug; -pub(crate) mod federation; -pub(crate) mod media; -pub(crate) mod query; -pub(crate) mod room; -pub(crate) mod server; -pub(crate) mod user; - -extern crate conduwuit_api as api; -extern crate conduwuit_core as conduwuit; -extern crate conduwuit_service as service; - -pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch}; - -pub(crate) use crate::{context::Context, utils::get_room_info}; - -pub(crate) const PAGE_SIZE: usize = 100; - -conduwuit::mod_ctor! {} -conduwuit::mod_dtor! {} -conduwuit::rustc_flags_capture! {} - -/// Install the admin command processor -pub async fn init(admin_service: &service::admin::Service) { - _ = admin_service - .complete - .write() - .expect("locked for writing") - .insert(processor::complete); - _ = admin_service - .handle - .write() - .await - .insert(processor::dispatch); -} - -/// Uninstall the admin command handler -pub async fn fini(admin_service: &service::admin::Service) { - _ = admin_service.handle.write().await.take(); - _ = admin_service - .complete - .write() - .expect("locked for writing") - .take(); -} diff --git a/src/admin/processor.rs b/src/admin/processor.rs deleted file mode 100644 index 8282a846..00000000 --- a/src/admin/processor.rs +++ /dev/null @@ -1,290 +0,0 @@ -use std::{ - fmt::Write, - mem::take, - panic::AssertUnwindSafe, - sync::{Arc, Mutex}, - time::SystemTime, -}; - -use clap::{CommandFactory, Parser}; -use conduwuit::{ - Error, Result, debug, error, - log::{ - capture, - capture::Capture, - fmt::{markdown_table, markdown_table_head}, - }, - trace, - utils::string::{collect_stream, common_prefix}, - warn, -}; -use futures::{AsyncWriteExt, future::FutureExt, io::BufWriter}; -use ruma::{ - EventId, - events::{ - relation::InReplyTo, - room::message::{Relation::Reply, RoomMessageEventContent}, - }, -}; -use service::{ - Services, - admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, -}; -use tracing::Level; -use tracing_subscriber::{EnvFilter, filter::LevelFilter}; - -use crate::{admin, admin::AdminCommand, context::Context}; - -#[must_use] -pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } - -#[must_use] -pub(super) fn dispatch(services: Arc, command: CommandInput) -> ProcessorFuture { - Box::pin(handle_command(services, command)) -} - -#[tracing::instrument(skip_all, name = "admin")] -async fn handle_command(services: Arc, command: CommandInput) -> ProcessorResult { - AssertUnwindSafe(Box::pin(process_command(services, &command))) - .catch_unwind() - .await - .map_err(Error::from_panic) - .unwrap_or_else(|error| handle_panic(&error, &command)) -} - -async fn process_command(services: Arc, input: &CommandInput) -> ProcessorResult { - let (command, args, body) = match parse(&services, input) { - | Err(error) => return Err(error), - | Ok(parsed) => parsed, - }; - - let context = Context { - services: &services, - body: &body, - timer: SystemTime::now(), - reply_id: input.reply_id.as_deref(), - output: BufWriter::new(Vec::new()).into(), - }; - - let (result, mut logs) = process(&context, command, &args).await; - - let output = &mut context.output.lock().await; - output.flush().await.expect("final flush of output stream"); - - let output = - String::from_utf8(take(output.get_mut())).expect("invalid utf8 in command output stream"); - - match result { - | Ok(()) if logs.is_empty() => - Ok(Some(reply(RoomMessageEventContent::notice_markdown(output), context.reply_id))), - - | Ok(()) => { - logs.write_str(output.as_str()).expect("output buffer"); - Ok(Some(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id))) - }, - | Err(error) => { - write!(&mut logs, "Command failed with error:\n```\n{error:#?}\n```") - .expect("output buffer"); - - Err(reply(RoomMessageEventContent::notice_markdown(logs), context.reply_id)) - }, - } -} - -#[allow(clippy::result_large_err)] -fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { - let link = - "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; - let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}"); - let content = RoomMessageEventContent::notice_markdown(msg); - error!("Panic while processing command: {error:?}"); - Err(reply(content, command.reply_id.as_deref())) -} - -/// Parse and process a message from the admin room -async fn process( - context: &Context<'_>, - command: AdminCommand, - args: &[String], -) -> (Result, String) { - let (capture, logs) = capture_create(context); - - let capture_scope = capture.start(); - let result = Box::pin(admin::process(command, context)).await; - drop(capture_scope); - - debug!( - ok = result.is_ok(), - elapsed = ?context.timer.elapsed(), - command = ?args, - "command processed" - ); - - let mut output = String::new(); - - // Prepend the logs only if any were captured - let logs = logs.lock().expect("locked"); - if logs.lines().count() > 2 { - writeln!(&mut output, "{logs}").expect("failed to format logs to command output"); - } - drop(logs); - - (result, output) -} - -fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { - let env_config = &context.services.server.config.admin_log_capture; - let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| { - warn!("admin_log_capture filter invalid: {e:?}"); - cfg!(debug_assertions) - .then_some("debug") - .or(Some("info")) - .map(Into::into) - .expect("default capture EnvFilter") - }); - - let log_level = env_filter - .max_level_hint() - .and_then(LevelFilter::into_level) - .unwrap_or(Level::DEBUG); - - let filter = move |data: capture::Data<'_>| { - data.level() <= log_level && data.our_modules() && data.scope.contains(&"admin") - }; - - let logs = Arc::new(Mutex::new( - collect_stream(|s| markdown_table_head(s)).expect("markdown table header"), - )); - - let capture = Capture::new( - &context.services.server.log.capture, - Some(filter), - capture::fmt(markdown_table, logs.clone()), - ); - - (capture, logs) -} - -/// Parse chat messages from the admin room into an AdminCommand object -#[allow(clippy::result_large_err)] -fn parse<'a>( - services: &Arc, - input: &'a CommandInput, -) -> Result<(AdminCommand, Vec, Vec<&'a str>), CommandOutput> { - let lines = input.command.lines().filter(|line| !line.trim().is_empty()); - let command_line = lines.clone().next().expect("command missing first line"); - let body = lines.skip(1).collect(); - match parse_command(command_line) { - | Ok((command, args)) => Ok((command, args, body)), - | Err(error) => { - let message = error - .to_string() - .replace("server.name", services.globals.server_name().as_str()); - Err(reply(RoomMessageEventContent::notice_plain(message), input.reply_id.as_deref())) - }, - } -} - -fn parse_command(line: &str) -> Result<(AdminCommand, Vec)> { - let argv = parse_line(line); - let command = AdminCommand::try_parse_from(&argv)?; - Ok((command, argv)) -} - -fn complete_command(mut cmd: clap::Command, line: &str) -> String { - let argv = parse_line(line); - let mut ret = Vec::::with_capacity(argv.len().saturating_add(1)); - - 'token: for token in argv.into_iter().skip(1) { - let cmd_ = cmd.clone(); - let mut choice = Vec::new(); - - for sub in cmd_.get_subcommands() { - let name = sub.get_name(); - if *name == token { - // token already complete; recurse to subcommand - ret.push(token); - cmd.clone_from(sub); - continue 'token; - } else if name.starts_with(&token) { - // partial match; add to choices - choice.push(name); - } - } - - if choice.len() == 1 { - // One choice. Add extra space because it's complete - let choice = *choice.first().expect("only choice"); - ret.push(choice.to_owned()); - ret.push(String::new()); - } else if choice.is_empty() { - // Nothing found, return original string - ret.push(token); - } else { - // Find the common prefix - ret.push(common_prefix(&choice).into()); - } - - // Return from completion - return ret.join(" "); - } - - // Return from no completion. Needs a space though. - ret.push(String::new()); - ret.join(" ") -} - -/// Parse chat messages from the admin room into an AdminCommand object -fn parse_line(command_line: &str) -> Vec { - let mut argv = command_line - .split_whitespace() - .map(str::to_owned) - .collect::>(); - - // Remove any escapes that came with a server-side escape command - if !argv.is_empty() && argv[0].ends_with("admin") { - argv[0] = argv[0].trim_start_matches('\\').into(); - } - - // First indice has to be "admin" but for console convenience we add it here - if !argv.is_empty() && !argv[0].ends_with("admin") && !argv[0].starts_with('@') { - argv.insert(0, "admin".to_owned()); - } - - // Replace `help command` with `command --help` - // Clap has a help subcommand, but it omits the long help description. - if argv.len() > 1 && argv[1] == "help" { - argv.remove(1); - argv.push("--help".to_owned()); - } - - // Backwards compatibility with `register_appservice`-style commands - if argv.len() > 1 && argv[1].contains('_') { - argv[1] = argv[1].replace('_', "-"); - } - - // Backwards compatibility with `register_appservice`-style commands - if argv.len() > 2 && argv[2].contains('_') { - argv[2] = argv[2].replace('_', "-"); - } - - // if the user is using the `query` command (argv[1]), replace the database - // function/table calls with underscores to match the codebase - if argv.len() > 3 && argv[1].eq("query") { - argv[3] = argv[3].replace('_', "-"); - } - - trace!(?command_line, ?argv, "parse"); - argv -} - -fn reply( - mut content: RoomMessageEventContent, - reply_id: Option<&EventId>, -) -> RoomMessageEventContent { - content.relates_to = reply_id.map(|event_id| Reply { - in_reply_to: InReplyTo { event_id: event_id.to_owned() }, - }); - - content -} diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs deleted file mode 100644 index 228d2120..00000000 --- a/src/admin/query/account_data.rs +++ /dev/null @@ -1,70 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use futures::StreamExt; -use ruma::{OwnedRoomId, OwnedUserId}; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -/// All the getters and iterators from src/database/key_value/account_data.rs -pub(crate) enum AccountDataCommand { - /// - Returns all changes to the account data that happened after `since`. - ChangesSince { - /// Full user ID - user_id: OwnedUserId, - /// UNIX timestamp since (u64) - since: u64, - /// Optional room ID of the account data - room_id: Option, - }, - - /// - Searches the account data for a specific kind. - AccountDataGet { - /// Full user ID - user_id: OwnedUserId, - /// Account data event type - kind: String, - /// Optional room ID of the account data - room_id: Option, - }, -} - -#[admin_command] -async fn changes_since( - &self, - user_id: OwnedUserId, - since: u64, - room_id: Option, -) -> Result { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = self - .services - .account_data - .changes_since(room_id.as_deref(), &user_id, since, None) - .collect() - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) - .await -} - -#[admin_command] -async fn account_data_get( - &self, - user_id: OwnedUserId, - kind: String, - room_id: Option, -) -> Result { - let timer = tokio::time::Instant::now(); - let results = self - .services - .account_data - .get_raw(room_id.as_deref(), &user_id, &kind) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) - .await -} diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs deleted file mode 100644 index 28bf6451..00000000 --- a/src/admin/query/appservice.rs +++ /dev/null @@ -1,42 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use futures::TryStreamExt; - -use crate::Context; - -#[derive(Debug, Subcommand)] -/// All the getters and iterators from src/database/key_value/appservice.rs -pub(crate) enum AppserviceCommand { - /// - Gets the appservice registration info/details from the ID as a string - GetRegistration { - /// Appservice registration ID - appservice_id: String, - }, - - /// - Gets all appservice registrations with their ID and registration info - All, -} - -/// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result { - let services = context.services; - - match subcommand { - | AppserviceCommand::GetRegistration { appservice_id } => { - let timer = tokio::time::Instant::now(); - let results = services.appservice.get_registration(&appservice_id).await; - - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - | AppserviceCommand::All => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - } - .await -} diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs deleted file mode 100644 index c8c1f512..00000000 --- a/src/admin/query/globals.rs +++ /dev/null @@ -1,61 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use ruma::OwnedServerName; - -use crate::Context; - -#[derive(Debug, Subcommand)] -/// All the getters and iterators from src/database/key_value/globals.rs -pub(crate) enum GlobalsCommand { - DatabaseVersion, - - CurrentCount, - - LastCheckForAnnouncementsId, - - /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found - /// for the server. - SigningKeysFor { - origin: OwnedServerName, - }, -} - -/// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result { - let services = context.services; - - match subcommand { - | GlobalsCommand::DatabaseVersion => { - let timer = tokio::time::Instant::now(); - let results = services.globals.db.database_version().await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - | GlobalsCommand::CurrentCount => { - let timer = tokio::time::Instant::now(); - let results = services.globals.db.current_count(); - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - | GlobalsCommand::LastCheckForAnnouncementsId => { - let timer = tokio::time::Instant::now(); - let results = services - .announcements - .last_check_for_announcements_id() - .await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - | GlobalsCommand::SigningKeysFor { origin } => { - let timer = tokio::time::Instant::now(); - let results = services.server_keys.verify_keys_for(&origin).await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - } - .await -} diff --git a/src/admin/query/mod.rs b/src/admin/query/mod.rs deleted file mode 100644 index da27eb1d..00000000 --- a/src/admin/query/mod.rs +++ /dev/null @@ -1,82 +0,0 @@ -mod account_data; -mod appservice; -mod globals; -mod presence; -mod pusher; -mod raw; -mod resolver; -mod room_alias; -mod room_state_cache; -mod room_timeline; -mod sending; -mod short; -mod users; - -use clap::Subcommand; -use conduwuit::Result; - -use self::{ - account_data::AccountDataCommand, appservice::AppserviceCommand, globals::GlobalsCommand, - presence::PresenceCommand, pusher::PusherCommand, raw::RawCommand, resolver::ResolverCommand, - room_alias::RoomAliasCommand, room_state_cache::RoomStateCacheCommand, - room_timeline::RoomTimelineCommand, sending::SendingCommand, short::ShortCommand, - users::UsersCommand, -}; -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -/// Query tables from database -pub(super) enum QueryCommand { - /// - account_data.rs iterators and getters - #[command(subcommand)] - AccountData(AccountDataCommand), - - /// - appservice.rs iterators and getters - #[command(subcommand)] - Appservice(AppserviceCommand), - - /// - presence.rs iterators and getters - #[command(subcommand)] - Presence(PresenceCommand), - - /// - rooms/alias.rs iterators and getters - #[command(subcommand)] - RoomAlias(RoomAliasCommand), - - /// - rooms/state_cache iterators and getters - #[command(subcommand)] - RoomStateCache(RoomStateCacheCommand), - - /// - rooms/timeline iterators and getters - #[command(subcommand)] - RoomTimeline(RoomTimelineCommand), - - /// - globals.rs iterators and getters - #[command(subcommand)] - Globals(GlobalsCommand), - - /// - sending.rs iterators and getters - #[command(subcommand)] - Sending(SendingCommand), - - /// - users.rs iterators and getters - #[command(subcommand)] - Users(UsersCommand), - - /// - resolver service - #[command(subcommand)] - Resolver(ResolverCommand), - - /// - pusher service - #[command(subcommand)] - Pusher(PusherCommand), - - /// - short service - #[command(subcommand)] - Short(ShortCommand), - - /// - raw service - #[command(subcommand)] - Raw(RawCommand), -} diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs deleted file mode 100644 index 5b7ead4b..00000000 --- a/src/admin/query/presence.rs +++ /dev/null @@ -1,51 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use futures::StreamExt; -use ruma::OwnedUserId; - -use crate::Context; - -#[derive(Debug, Subcommand)] -/// All the getters and iterators from src/database/key_value/presence.rs -pub(crate) enum PresenceCommand { - /// - Returns the latest presence event for the given user. - GetPresence { - /// Full user ID - user_id: OwnedUserId, - }, - - /// - Iterator of the most recent presence updates that happened after the - /// event with id `since`. - PresenceSince { - /// UNIX timestamp since (u64) - since: u64, - }, -} - -/// All the getters and iterators in key_value/presence.rs -pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result { - let services = context.services; - - match subcommand { - | PresenceCommand::GetPresence { user_id } => { - let timer = tokio::time::Instant::now(); - let results = services.presence.get_presence(&user_id).await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - | PresenceCommand::PresenceSince { since } => { - let timer = tokio::time::Instant::now(); - let results: Vec<(_, _, _)> = services - .presence - .presence_since(since) - .map(|(user_id, count, bytes)| (user_id.to_owned(), count, bytes.to_vec())) - .collect() - .await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - } - .await -} diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs deleted file mode 100644 index 0d0e6cc9..00000000 --- a/src/admin/query/pusher.rs +++ /dev/null @@ -1,29 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use ruma::OwnedUserId; - -use crate::Context; - -#[derive(Debug, Subcommand)] -pub(crate) enum PusherCommand { - /// - Returns all the pushers for the user. - GetPushers { - /// Full user ID - user_id: OwnedUserId, - }, -} - -pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result { - let services = context.services; - - match subcommand { - | PusherCommand::GetPushers { user_id } => { - let timer = tokio::time::Instant::now(); - let results = services.pusher.get_pushers(&user_id).await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - } - .await -} diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs deleted file mode 100644 index 0e248c65..00000000 --- a/src/admin/query/raw.rs +++ /dev/null @@ -1,452 +0,0 @@ -use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc}; - -use clap::Subcommand; -use conduwuit::{ - Err, Result, apply, at, is_zero, - utils::{ - stream::{IterStream, ReadyExt, TryIgnore, TryParallelExt}, - string::EMPTY, - }, -}; -use conduwuit_database::Map; -use conduwuit_service::Services; -use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; -use tokio::time::Instant; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -#[allow(clippy::enum_variant_names)] -/// Query tables from database -pub(crate) enum RawCommand { - /// - List database maps - RawMaps, - - /// - Raw database query - RawGet { - /// Map name - map: String, - - /// Key - key: String, - }, - - /// - Raw database delete (for string keys) - RawDel { - /// Map name - map: String, - - /// Key - key: String, - }, - - /// - Raw database keys iteration - RawKeys { - /// Map name - map: String, - - /// Key prefix - prefix: Option, - }, - - /// - Raw database key size breakdown - RawKeysSizes { - /// Map name - map: Option, - - /// Key prefix - prefix: Option, - }, - - /// - Raw database keys total bytes - RawKeysTotal { - /// Map name - map: Option, - - /// Key prefix - prefix: Option, - }, - - /// - Raw database values size breakdown - RawValsSizes { - /// Map name - map: Option, - - /// Key prefix - prefix: Option, - }, - - /// - Raw database values total bytes - RawValsTotal { - /// Map name - map: Option, - - /// Key prefix - prefix: Option, - }, - - /// - Raw database items iteration - RawIter { - /// Map name - map: String, - - /// Key prefix - prefix: Option, - }, - - /// - Raw database keys iteration - RawKeysFrom { - /// Map name - map: String, - - /// Lower-bound - start: String, - - /// Limit - #[arg(short, long)] - limit: Option, - }, - - /// - Raw database items iteration - RawIterFrom { - /// Map name - map: String, - - /// Lower-bound - start: String, - - /// Limit - #[arg(short, long)] - limit: Option, - }, - - /// - Raw database record count - RawCount { - /// Map name - map: Option, - - /// Key prefix - prefix: Option, - }, - - /// - Compact database - Compact { - #[arg(short, long, alias("column"))] - map: Option>, - - #[arg(long)] - start: Option, - - #[arg(long)] - stop: Option, - - #[arg(long)] - from: Option, - - #[arg(long)] - into: Option, - - /// There is one compaction job per column; then this controls how many - /// columns are compacted in parallel. If zero, one compaction job is - /// still run at a time here, but in exclusive-mode blocking any other - /// automatic compaction jobs until complete. - #[arg(long)] - parallelism: Option, - - #[arg(long, default_value("false"))] - exhaustive: bool, - }, -} - -#[admin_command] -pub(super) async fn compact( - &self, - map: Option>, - start: Option, - stop: Option, - from: Option, - into: Option, - parallelism: Option, - exhaustive: bool, -) -> Result { - use conduwuit_database::compact::Options; - - let default_all_maps: Option<_> = map.is_none().then(|| { - self.services - .db - .keys() - .map(Deref::deref) - .map(ToOwned::to_owned) - }); - - let maps: Vec<_> = map - .unwrap_or_default() - .into_iter() - .chain(default_all_maps.into_iter().flatten()) - .map(|map| self.services.db.get(&map)) - .filter_map(Result::ok) - .cloned() - .collect(); - - if maps.is_empty() { - return Err!("--map argument invalid. not found in database"); - } - - let range = ( - start.as_ref().map(String::as_bytes).map(Into::into), - stop.as_ref().map(String::as_bytes).map(Into::into), - ); - - let options = Options { - range, - level: (from, into), - exclusive: parallelism.is_some_and(is_zero!()), - exhaustive, - }; - - let runtime = self.services.server.runtime().clone(); - let parallelism = parallelism.unwrap_or(1); - let results = maps - .into_iter() - .try_stream() - .paralleln_and_then(runtime, parallelism, move |map| { - map.compact_blocking(options.clone())?; - Ok(map.name().to_owned()) - }) - .collect::>(); - - let timer = Instant::now(); - let results = results.await; - let query_time = timer.elapsed(); - self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) - .await -} - -#[admin_command] -pub(super) async fn raw_count(&self, map: Option, prefix: Option) -> Result { - let prefix = prefix.as_deref().unwrap_or(EMPTY); - - let timer = Instant::now(); - let count = with_maps_or(map.as_deref(), self.services) - .then(|map| map.raw_count_prefix(&prefix)) - .ready_fold(0_usize, usize::saturating_add) - .await; - - let query_time = timer.elapsed(); - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```")) - .await -} - -#[admin_command] -pub(super) async fn raw_keys(&self, map: String, prefix: Option) -> Result { - writeln!(self, "```").boxed().await?; - - let map = self.services.db.get(map.as_str())?; - let timer = Instant::now(); - prefix - .as_deref() - .map_or_else(|| map.raw_keys().boxed(), |prefix| map.raw_keys_prefix(prefix).boxed()) - .map_ok(String::from_utf8_lossy) - .try_for_each(|str| writeln!(self, "{str:?}")) - .boxed() - .await?; - - let query_time = timer.elapsed(); - self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_keys_sizes(&self, map: Option, prefix: Option) -> Result { - let prefix = prefix.as_deref().unwrap_or(EMPTY); - - let timer = Instant::now(); - let result = with_maps_or(map.as_deref(), self.services) - .map(|map| map.raw_keys_prefix(&prefix)) - .flatten() - .ignore_err() - .map(<[u8]>::len) - .ready_fold_default(|mut map: BTreeMap<_, usize>, len| { - let entry = map.entry(len).or_default(); - *entry = entry.saturating_add(1); - map - }) - .await; - - let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_keys_total(&self, map: Option, prefix: Option) -> Result { - let prefix = prefix.as_deref().unwrap_or(EMPTY); - - let timer = Instant::now(); - let result = with_maps_or(map.as_deref(), self.services) - .map(|map| map.raw_keys_prefix(&prefix)) - .flatten() - .ignore_err() - .map(<[u8]>::len) - .ready_fold_default(|acc: usize, len| acc.saturating_add(len)) - .await; - - let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_vals_sizes(&self, map: Option, prefix: Option) -> Result { - let prefix = prefix.as_deref().unwrap_or(EMPTY); - - let timer = Instant::now(); - let result = with_maps_or(map.as_deref(), self.services) - .map(|map| map.raw_stream_prefix(&prefix)) - .flatten() - .ignore_err() - .map(at!(1)) - .map(<[u8]>::len) - .ready_fold_default(|mut map: BTreeMap<_, usize>, len| { - let entry = map.entry(len).or_default(); - *entry = entry.saturating_add(1); - map - }) - .await; - - let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_vals_total(&self, map: Option, prefix: Option) -> Result { - let prefix = prefix.as_deref().unwrap_or(EMPTY); - - let timer = Instant::now(); - let result = with_maps_or(map.as_deref(), self.services) - .map(|map| map.raw_stream_prefix(&prefix)) - .flatten() - .ignore_err() - .map(at!(1)) - .map(<[u8]>::len) - .ready_fold_default(|acc: usize, len| acc.saturating_add(len)) - .await; - - let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_iter(&self, map: String, prefix: Option) -> Result { - writeln!(self, "```").await?; - - let map = self.services.db.get(&map)?; - let timer = Instant::now(); - prefix - .as_deref() - .map_or_else(|| map.raw_stream().boxed(), |prefix| map.raw_stream_prefix(prefix).boxed()) - .map_ok(apply!(2, String::from_utf8_lossy)) - .map_ok(apply!(2, Cow::into_owned)) - .try_for_each(|keyval| writeln!(self, "{keyval:?}")) - .boxed() - .await?; - - let query_time = timer.elapsed(); - self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_keys_from( - &self, - map: String, - start: String, - limit: Option, -) -> Result { - writeln!(self, "```").await?; - - let map = self.services.db.get(&map)?; - let timer = Instant::now(); - map.raw_keys_from(&start) - .map_ok(String::from_utf8_lossy) - .take(limit.unwrap_or(usize::MAX)) - .try_for_each(|str| writeln!(self, "{str:?}")) - .boxed() - .await?; - - let query_time = timer.elapsed(); - self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_iter_from( - &self, - map: String, - start: String, - limit: Option, -) -> Result { - let map = self.services.db.get(&map)?; - let timer = Instant::now(); - let result = map - .raw_stream_from(&start) - .map_ok(apply!(2, String::from_utf8_lossy)) - .map_ok(apply!(2, Cow::into_owned)) - .take(limit.unwrap_or(usize::MAX)) - .try_collect::>() - .await?; - - let query_time = timer.elapsed(); - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -pub(super) async fn raw_del(&self, map: String, key: String) -> Result { - let map = self.services.db.get(&map)?; - let timer = Instant::now(); - map.remove(&key); - - let query_time = timer.elapsed(); - self.write_str(&format!("Operation completed in {query_time:?}")) - .await -} - -#[admin_command] -pub(super) async fn raw_get(&self, map: String, key: String) -> Result { - let map = self.services.db.get(&map)?; - let timer = Instant::now(); - let handle = map.get(&key).await?; - - let query_time = timer.elapsed(); - let result = String::from_utf8_lossy(&handle); - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) - .await -} - -#[admin_command] -pub(super) async fn raw_maps(&self) -> Result { - let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect(); - - self.write_str(&format!("{list:#?}")).await -} - -fn with_maps_or<'a>( - map: Option<&'a str>, - services: &'a Services, -) -> impl Stream> + Send + 'a { - let default_all_maps = map - .is_none() - .then(|| services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - map.into_iter() - .chain(default_all_maps) - .map(|map| services.db.get(map)) - .filter_map(Result::ok) - .stream() -} diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs deleted file mode 100644 index 4a39a40e..00000000 --- a/src/admin/query/resolver.rs +++ /dev/null @@ -1,71 +0,0 @@ -use clap::Subcommand; -use conduwuit::{Result, utils::time}; -use futures::StreamExt; -use ruma::OwnedServerName; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -/// Resolver service and caches -pub(crate) enum ResolverCommand { - /// Query the destinations cache - DestinationsCache { - server_name: Option, - }, - - /// Query the overrides cache - OverridesCache { - name: Option, - }, -} - -#[admin_command] -async fn destinations_cache(&self, server_name: Option) -> Result { - use service::resolver::cache::CachedDest; - - writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; - writeln!(self, "| ----------- | ----------- | -------- | ------- |").await?; - - let mut destinations = self.services.resolver.cache.destinations().boxed(); - - while let Some((name, CachedDest { dest, host, expire })) = destinations.next().await { - if let Some(server_name) = server_name.as_ref() { - if name != server_name { - continue; - } - } - - let expire = time::format(expire, "%+"); - self.write_str(&format!("| {name} | {dest} | {host} | {expire} |\n")) - .await?; - } - - Ok(()) -} - -#[admin_command] -async fn overrides_cache(&self, server_name: Option) -> Result { - use service::resolver::cache::CachedOverride; - - writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?; - writeln!(self, "| ----------- | --- | ----:| ------- | ---------- |").await?; - - let mut overrides = self.services.resolver.cache.overrides().boxed(); - - while let Some((name, CachedOverride { ips, port, expire, overriding })) = - overrides.next().await - { - if let Some(server_name) = server_name.as_ref() { - if name != server_name { - continue; - } - } - - let expire = time::format(expire, "%+"); - self.write_str(&format!("| {name} | {ips:?} | {port} | {expire} | {overriding:?} |\n")) - .await?; - } - - Ok(()) -} diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs deleted file mode 100644 index b646beec..00000000 --- a/src/admin/query/room_alias.rs +++ /dev/null @@ -1,66 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use futures::StreamExt; -use ruma::{OwnedRoomAliasId, OwnedRoomId}; - -use crate::Context; - -#[derive(Debug, Subcommand)] -/// All the getters and iterators from src/database/key_value/rooms/alias.rs -pub(crate) enum RoomAliasCommand { - ResolveLocalAlias { - /// Full room alias - alias: OwnedRoomAliasId, - }, - - /// - Iterator of all our local room aliases for the room ID - LocalAliasesForRoom { - /// Full room ID - room_id: OwnedRoomId, - }, - - /// - Iterator of all our local aliases in our database with their room IDs - AllLocalAliases, -} - -/// All the getters and iterators in src/database/key_value/rooms/alias.rs -pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result { - let services = context.services; - - match subcommand { - | RoomAliasCommand::ResolveLocalAlias { alias } => { - let timer = tokio::time::Instant::now(); - let results = services.rooms.alias.resolve_local_alias(&alias).await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") - }, - | RoomAliasCommand::LocalAliasesForRoom { room_id } => { - let timer = tokio::time::Instant::now(); - let aliases: Vec<_> = services - .rooms - .alias - .local_aliases_for_room(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```") - }, - | RoomAliasCommand::AllLocalAliases => { - let timer = tokio::time::Instant::now(); - let aliases = services - .rooms - .alias - .all_local_aliases() - .map(|(room_id, alias)| (room_id.to_owned(), alias.to_owned())) - .collect::>() - .await; - let query_time = timer.elapsed(); - - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{aliases:#?}\n```") - }, - } - .await -} diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs deleted file mode 100644 index c64cd173..00000000 --- a/src/admin/query/room_state_cache.rs +++ /dev/null @@ -1,338 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use futures::StreamExt; -use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; - -use crate::Context; - -#[derive(Debug, Subcommand)] -pub(crate) enum RoomStateCacheCommand { - ServerInRoom { - server: OwnedServerName, - room_id: OwnedRoomId, - }, - - RoomServers { - room_id: OwnedRoomId, - }, - - ServerRooms { - server: OwnedServerName, - }, - - RoomMembers { - room_id: OwnedRoomId, - }, - - LocalUsersInRoom { - room_id: OwnedRoomId, - }, - - ActiveLocalUsersInRoom { - room_id: OwnedRoomId, - }, - - RoomJoinedCount { - room_id: OwnedRoomId, - }, - - RoomInvitedCount { - room_id: OwnedRoomId, - }, - - RoomUserOnceJoined { - room_id: OwnedRoomId, - }, - - RoomMembersInvited { - room_id: OwnedRoomId, - }, - - GetInviteCount { - room_id: OwnedRoomId, - user_id: OwnedUserId, - }, - - GetLeftCount { - room_id: OwnedRoomId, - user_id: OwnedUserId, - }, - - RoomsJoined { - user_id: OwnedUserId, - }, - - RoomsLeft { - user_id: OwnedUserId, - }, - - RoomsInvited { - user_id: OwnedUserId, - }, - - InviteState { - user_id: OwnedUserId, - room_id: OwnedRoomId, - }, -} - -pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result { - let services = context.services; - - match subcommand { - | RoomStateCacheCommand::ServerInRoom { server, room_id } => { - let timer = tokio::time::Instant::now(); - let result = services - .rooms - .state_cache - .server_in_room(&server, &room_id) - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomServers { room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .room_servers(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::ServerRooms { server } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .server_rooms(&server) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomMembers { room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .room_members(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::LocalUsersInRoom { room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .local_users_in_room(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .active_local_users_in_room(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomJoinedCount { room_id } => { - let timer = tokio::time::Instant::now(); - let results = services.rooms.state_cache.room_joined_count(&room_id).await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomInvitedCount { room_id } => { - let timer = tokio::time::Instant::now(); - let results = services - .rooms - .state_cache - .room_invited_count(&room_id) - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomUserOnceJoined { room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .room_useroncejoined(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomMembersInvited { room_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .room_members_invited(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::GetInviteCount { room_id, user_id } => { - let timer = tokio::time::Instant::now(); - let results = services - .rooms - .state_cache - .get_invite_count(&room_id, &user_id) - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::GetLeftCount { room_id, user_id } => { - let timer = tokio::time::Instant::now(); - let results = services - .rooms - .state_cache - .get_left_count(&room_id, &user_id) - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomsJoined { user_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .rooms_joined(&user_id) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomsInvited { user_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .rooms_invited(&user_id) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::RoomsLeft { user_id } => { - let timer = tokio::time::Instant::now(); - let results: Vec<_> = services - .rooms - .state_cache - .rooms_left(&user_id) - .collect() - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - | RoomStateCacheCommand::InviteState { user_id, room_id } => { - let timer = tokio::time::Instant::now(); - let results = services - .rooms - .state_cache - .invite_state(&user_id, &room_id) - .await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - } -} diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs deleted file mode 100644 index 0fd22ca7..00000000 --- a/src/admin/query/room_timeline.rs +++ /dev/null @@ -1,61 +0,0 @@ -use clap::Subcommand; -use conduwuit::{PduCount, Result, utils::stream::TryTools}; -use futures::TryStreamExt; -use ruma::OwnedRoomOrAliasId; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -/// Query tables from database -pub(crate) enum RoomTimelineCommand { - Pdus { - room_id: OwnedRoomOrAliasId, - - from: Option, - - #[arg(short, long)] - limit: Option, - }, - - Last { - room_id: OwnedRoomOrAliasId, - }, -} - -#[admin_command] -pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { - let room_id = self.services.rooms.alias.resolve(&room_id).await?; - - let result = self - .services - .rooms - .timeline - .last_timeline_count(None, &room_id) - .await?; - - self.write_str(&format!("{result:#?}")).await -} - -#[admin_command] -pub(super) async fn pdus( - &self, - room_id: OwnedRoomOrAliasId, - from: Option, - limit: Option, -) -> Result { - let room_id = self.services.rooms.alias.resolve(&room_id).await?; - - let from: Option = from.as_deref().map(str::parse).transpose()?; - - let result: Vec<_> = self - .services - .rooms - .timeline - .pdus_rev(None, &room_id, from) - .try_take(limit.unwrap_or(3)) - .try_collect() - .await?; - - self.write_str(&format!("{result:#?}")).await -} diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs deleted file mode 100644 index 8b1676bc..00000000 --- a/src/admin/query/sending.rs +++ /dev/null @@ -1,236 +0,0 @@ -use clap::Subcommand; -use conduwuit::{Err, Result}; -use futures::StreamExt; -use ruma::{OwnedServerName, OwnedUserId}; -use service::sending::Destination; - -use crate::Context; - -#[derive(Debug, Subcommand)] -/// All the getters and iterators from src/database/key_value/sending.rs -pub(crate) enum SendingCommand { - /// - Queries database for all `servercurrentevent_data` - ActiveRequests, - - /// - Queries database for `servercurrentevent_data` but for a specific - /// destination - /// - /// This command takes only *one* format of these arguments: - /// - /// appservice_id - /// server_name - /// user_id AND push_key - /// - /// See src/service/sending/mod.rs for the definition of the `Destination` - /// enum - ActiveRequestsFor { - #[arg(short, long)] - appservice_id: Option, - #[arg(short, long)] - server_name: Option, - #[arg(short, long)] - user_id: Option, - #[arg(short, long)] - push_key: Option, - }, - - /// - Queries database for `servernameevent_data` which are the queued up - /// requests that will eventually be sent - /// - /// This command takes only *one* format of these arguments: - /// - /// appservice_id - /// server_name - /// user_id AND push_key - /// - /// See src/service/sending/mod.rs for the definition of the `Destination` - /// enum - QueuedRequests { - #[arg(short, long)] - appservice_id: Option, - #[arg(short, long)] - server_name: Option, - #[arg(short, long)] - user_id: Option, - #[arg(short, long)] - push_key: Option, - }, - - GetLatestEduCount { - server_name: OwnedServerName, - }, -} - -/// All the getters and iterators in key_value/sending.rs -pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result { - let services = context.services; - - match subcommand { - | SendingCommand::ActiveRequests => { - let timer = tokio::time::Instant::now(); - let results = services.sending.db.active_requests(); - let active_requests = results.collect::>().await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - )) - .await - }, - | SendingCommand::QueuedRequests { - appservice_id, - server_name, - user_id, - push_key, - } => { - if appservice_id.is_none() - && server_name.is_none() - && user_id.is_none() - && push_key.is_none() - { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - } - let timer = tokio::time::Instant::now(); - let results = match (appservice_id, server_name, user_id, push_key) { - | (Some(appservice_id), None, None, None) => { - if appservice_id.is_empty() { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - } - - services - .sending - .db - .queued_requests(&Destination::Appservice(appservice_id)) - }, - | (None, Some(server_name), None, None) => services - .sending - .db - .queued_requests(&Destination::Federation(server_name)), - | (None, None, Some(user_id), Some(push_key)) => { - if push_key.is_empty() { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - } - - services - .sending - .db - .queued_requests(&Destination::Push(user_id, push_key)) - }, - | (Some(_), Some(_), Some(_), Some(_)) => { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. Not all of them See --help for more details.", - ); - }, - | _ => { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - }, - }; - - let queued_requests = results.collect::>().await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" - )) - .await - }, - | SendingCommand::ActiveRequestsFor { - appservice_id, - server_name, - user_id, - push_key, - } => { - if appservice_id.is_none() - && server_name.is_none() - && user_id.is_none() - && push_key.is_none() - { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - } - - let timer = tokio::time::Instant::now(); - let results = match (appservice_id, server_name, user_id, push_key) { - | (Some(appservice_id), None, None, None) => { - if appservice_id.is_empty() { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - } - - services - .sending - .db - .active_requests_for(&Destination::Appservice(appservice_id)) - }, - | (None, Some(server_name), None, None) => services - .sending - .db - .active_requests_for(&Destination::Federation(server_name)), - | (None, None, Some(user_id), Some(push_key)) => { - if push_key.is_empty() { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - } - - services - .sending - .db - .active_requests_for(&Destination::Push(user_id, push_key)) - }, - | (Some(_), Some(_), Some(_), Some(_)) => { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. Not all of them See --help for more details.", - ); - }, - | _ => { - return Err!( - "An appservice ID, server name, or a user ID with push key must be \ - specified via arguments. See --help for more details.", - ); - }, - }; - - let active_requests = results.collect::>().await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - )) - .await - }, - | SendingCommand::GetLatestEduCount { server_name } => { - let timer = tokio::time::Instant::now(); - let results = services.sending.db.get_latest_educount(&server_name).await; - let query_time = timer.elapsed(); - - context - .write_str(&format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - )) - .await - }, - } -} diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs deleted file mode 100644 index aa7c8666..00000000 --- a/src/admin/query/short.rs +++ /dev/null @@ -1,39 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use ruma::{OwnedEventId, OwnedRoomOrAliasId}; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -/// Query tables from database -pub(crate) enum ShortCommand { - ShortEventId { - event_id: OwnedEventId, - }, - - ShortRoomId { - room_id: OwnedRoomOrAliasId, - }, -} - -#[admin_command] -pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result { - let shortid = self - .services - .rooms - .short - .get_shorteventid(&event_id) - .await?; - - self.write_str(&format!("{shortid:#?}")).await -} - -#[admin_command] -pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result { - let room_id = self.services.rooms.alias.resolve(&room_id).await?; - - let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?; - - self.write_str(&format!("{shortid:#?}")).await -} diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs deleted file mode 100644 index 0f34d13f..00000000 --- a/src/admin/query/users.rs +++ /dev/null @@ -1,371 +0,0 @@ -use clap::Subcommand; -use conduwuit::Result; -use futures::stream::StreamExt; -use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId}; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -/// All the getters and iterators from src/database/key_value/users.rs -pub(crate) enum UsersCommand { - CountUsers, - - IterUsers, - - IterUsers2, - - PasswordHash { - user_id: OwnedUserId, - }, - - ListDevices { - user_id: OwnedUserId, - }, - - ListDevicesMetadata { - user_id: OwnedUserId, - }, - - GetDeviceMetadata { - user_id: OwnedUserId, - device_id: OwnedDeviceId, - }, - - GetDevicesVersion { - user_id: OwnedUserId, - }, - - CountOneTimeKeys { - user_id: OwnedUserId, - device_id: OwnedDeviceId, - }, - - GetDeviceKeys { - user_id: OwnedUserId, - device_id: OwnedDeviceId, - }, - - GetUserSigningKey { - user_id: OwnedUserId, - }, - - GetMasterKey { - user_id: OwnedUserId, - }, - - GetToDeviceEvents { - user_id: OwnedUserId, - device_id: OwnedDeviceId, - }, - - GetLatestBackup { - user_id: OwnedUserId, - }, - - GetLatestBackupVersion { - user_id: OwnedUserId, - }, - - GetBackupAlgorithm { - user_id: OwnedUserId, - version: String, - }, - - GetAllBackups { - user_id: OwnedUserId, - version: String, - }, - - GetRoomBackups { - user_id: OwnedUserId, - version: String, - room_id: OwnedRoomId, - }, - - GetBackupSession { - user_id: OwnedUserId, - version: String, - room_id: OwnedRoomId, - session_id: String, - }, - - GetSharedRooms { - user_a: OwnedUserId, - user_b: OwnedUserId, - }, -} - -#[admin_command] -async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let result: Vec<_> = self - .services - .rooms - .state_cache - .get_shared_rooms(&user_a, &user_b) - .map(ToOwned::to_owned) - .collect() - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_backup_session( - &self, - user_id: OwnedUserId, - version: String, - room_id: OwnedRoomId, - session_id: String, -) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .key_backups - .get_session(&user_id, &version, &room_id, &session_id) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_room_backups( - &self, - user_id: OwnedUserId, - version: String, - room_id: OwnedRoomId, -) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .key_backups - .get_room(&user_id, &version, &room_id) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result { - let timer = tokio::time::Instant::now(); - let result = self.services.key_backups.get_all(&user_id, &version).await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .key_backups - .get_backup(&user_id, &version) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .key_backups - .get_latest_backup_version(&user_id) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self.services.key_backups.get_latest_backup(&user_id).await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn iter_users(&self) -> Result { - let timer = tokio::time::Instant::now(); - let result: Vec = self.services.users.stream().map(Into::into).collect().await; - - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn iter_users2(&self) -> Result { - let timer = tokio::time::Instant::now(); - let result: Vec<_> = self.services.users.stream().collect().await; - let result: Vec<_> = result - .into_iter() - .map(ruma::UserId::as_bytes) - .map(String::from_utf8_lossy) - .collect(); - - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) - .await -} - -#[admin_command] -async fn count_users(&self) -> Result { - let timer = tokio::time::Instant::now(); - let result = self.services.users.count().await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn password_hash(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self.services.users.password_hash(&user_id).await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn list_devices(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let devices = self - .services - .users - .all_device_ids(&user_id) - .map(ToOwned::to_owned) - .collect::>() - .await; - - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```")) - .await -} - -#[admin_command] -async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let devices = self - .services - .users - .all_devices_metadata(&user_id) - .collect::>() - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```")) - .await -} - -#[admin_command] -async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { - let timer = tokio::time::Instant::now(); - let device = self - .services - .users - .get_device_metadata(&user_id, &device_id) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) - .await -} - -#[admin_command] -async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let device = self.services.users.get_devicelist_version(&user_id).await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) - .await -} - -#[admin_command] -async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .users - .count_one_time_keys(&user_id, &device_id) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .users - .get_device_keys(&user_id, &device_id) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self.services.users.get_user_signing_key(&user_id).await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_master_key(&self, user_id: OwnedUserId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .users - .get_master_key(None, &user_id, &|_| true) - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} - -#[admin_command] -async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { - let timer = tokio::time::Instant::now(); - let result = self - .services - .users - .get_to_device_events(&user_id, &device_id, None, None) - .collect::>() - .await; - let query_time = timer.elapsed(); - - self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) - .await -} diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs deleted file mode 100644 index 6b37ffe4..00000000 --- a/src/admin/room/alias.rs +++ /dev/null @@ -1,159 +0,0 @@ -use std::fmt::Write; - -use clap::Subcommand; -use conduwuit::{Err, Result}; -use futures::StreamExt; -use ruma::{OwnedRoomAliasId, OwnedRoomId}; - -use crate::Context; - -#[derive(Debug, Subcommand)] -pub(crate) enum RoomAliasCommand { - /// - Make an alias point to a room. - Set { - #[arg(short, long)] - /// Set the alias even if a room is already using it - force: bool, - - /// The room id to set the alias on - room_id: OwnedRoomId, - - /// The alias localpart to use (`alias`, not `#alias:servername.tld`) - room_alias_localpart: String, - }, - - /// - Remove a local alias - Remove { - /// The alias localpart to remove (`alias`, not `#alias:servername.tld`) - room_alias_localpart: String, - }, - - /// - Show which room is using an alias - Which { - /// The alias localpart to look up (`alias`, not - /// `#alias:servername.tld`) - room_alias_localpart: String, - }, - - /// - List aliases currently being used - List { - /// If set, only list the aliases for this room - room_id: Option, - }, -} - -pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result { - let services = context.services; - let server_user = &services.globals.server_user; - - match command { - | RoomAliasCommand::Set { ref room_alias_localpart, .. } - | RoomAliasCommand::Remove { ref room_alias_localpart } - | RoomAliasCommand::Which { ref room_alias_localpart } => { - let room_alias_str = - format!("#{}:{}", room_alias_localpart, services.globals.server_name()); - let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { - | Ok(alias) => alias, - | Err(err) => { - return Err!("Failed to parse alias: {err}"); - }, - }; - match command { - | RoomAliasCommand::Set { force, room_id, .. } => { - match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { - | (true, Ok(id)) => { - match services.rooms.alias.set_alias( - &room_alias, - &room_id, - server_user, - ) { - | Err(err) => Err!("Failed to remove alias: {err}"), - | Ok(()) => - context - .write_str(&format!( - "Successfully overwrote alias (formerly {id})" - )) - .await, - } - }, - | (false, Ok(id)) => Err!( - "Refusing to overwrite in use alias for {id}, use -f or --force to \ - overwrite" - ), - | (_, Err(_)) => { - match services.rooms.alias.set_alias( - &room_alias, - &room_id, - server_user, - ) { - | Err(err) => Err!("Failed to remove alias: {err}"), - | Ok(()) => context.write_str("Successfully set alias").await, - } - }, - } - }, - | RoomAliasCommand::Remove { .. } => { - match services.rooms.alias.resolve_local_alias(&room_alias).await { - | Err(_) => Err!("Alias isn't in use."), - | Ok(id) => match services - .rooms - .alias - .remove_alias(&room_alias, server_user) - .await - { - | Err(err) => Err!("Failed to remove alias: {err}"), - | Ok(()) => - context.write_str(&format!("Removed alias from {id}")).await, - }, - } - }, - | RoomAliasCommand::Which { .. } => { - match services.rooms.alias.resolve_local_alias(&room_alias).await { - | Err(_) => Err!("Alias isn't in use."), - | Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await, - } - }, - | RoomAliasCommand::List { .. } => unreachable!(), - } - }, - | RoomAliasCommand::List { room_id } => - if let Some(room_id) = room_id { - let aliases: Vec = services - .rooms - .alias - .local_aliases_for_room(&room_id) - .map(Into::into) - .collect() - .await; - - let plain_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "- {alias}") - .expect("should be able to write to string buffer"); - output - }); - - let plain = format!("Aliases for {room_id}:\n{plain_list}"); - context.write_str(&plain).await - } else { - let aliases = services - .rooms - .alias - .all_local_aliases() - .map(|(room_id, localpart)| (room_id.into(), localpart.into())) - .collect::>() - .await; - - let server_name = services.globals.server_name(); - let plain_list = aliases - .iter() - .fold(String::new(), |mut output, (alias, id)| { - writeln!(output, "- `{alias}` -> #{id}:{server_name}") - .expect("should be able to write to string buffer"); - output - }); - - let plain = format!("Aliases:\n{plain_list}"); - context.write_str(&plain).await - }, - } -} diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs deleted file mode 100644 index 81f36f15..00000000 --- a/src/admin/room/commands.rs +++ /dev/null @@ -1,68 +0,0 @@ -use conduwuit::{Err, Result}; -use futures::StreamExt; -use ruma::OwnedRoomId; - -use crate::{PAGE_SIZE, admin_command, get_room_info}; - -#[admin_command] -pub(super) async fn list_rooms( - &self, - page: Option, - exclude_disabled: bool, - exclude_banned: bool, - no_details: bool, -) -> Result { - // TODO: i know there's a way to do this with clap, but i can't seem to find it - let page = page.unwrap_or(1); - let mut rooms = self - .services - .rooms - .metadata - .iter_ids() - .filter_map(|room_id| async move { - (!exclude_disabled || !self.services.rooms.metadata.is_disabled(room_id).await) - .then_some(room_id) - }) - .filter_map(|room_id| async move { - (!exclude_banned || !self.services.rooms.metadata.is_banned(room_id).await) - .then_some(room_id) - }) - .then(|room_id| get_room_info(self.services, room_id)) - .collect::>() - .await; - - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let rooms = rooms - .into_iter() - .skip(page.saturating_sub(1).saturating_mul(PAGE_SIZE)) - .take(PAGE_SIZE) - .collect::>(); - - if rooms.is_empty() { - return Err!("No more rooms."); - } - - let body = rooms - .iter() - .map(|(id, members, name)| { - if no_details { - format!("{id}") - } else { - format!("{id}\tMembers: {members}\tName: {name}") - } - }) - .collect::>() - .join("\n"); - - self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),)) - .await -} - -#[admin_command] -pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { - let result = self.services.rooms.metadata.exists(&room_id).await; - - self.write_str(&format!("{result}")).await -} diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs deleted file mode 100644 index a6be9a15..00000000 --- a/src/admin/room/directory.rs +++ /dev/null @@ -1,74 +0,0 @@ -use clap::Subcommand; -use conduwuit::{Err, Result}; -use futures::StreamExt; -use ruma::OwnedRoomId; - -use crate::{Context, PAGE_SIZE, get_room_info}; - -#[derive(Debug, Subcommand)] -pub(crate) enum RoomDirectoryCommand { - /// - Publish a room to the room directory - Publish { - /// The room id of the room to publish - room_id: OwnedRoomId, - }, - - /// - Unpublish a room to the room directory - Unpublish { - /// The room id of the room to unpublish - room_id: OwnedRoomId, - }, - - /// - List rooms that are published - List { - page: Option, - }, -} - -pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result { - let services = context.services; - match command { - | RoomDirectoryCommand::Publish { room_id } => { - services.rooms.directory.set_public(&room_id); - context.write_str("Room published").await - }, - | RoomDirectoryCommand::Unpublish { room_id } => { - services.rooms.directory.set_not_public(&room_id); - context.write_str("Room unpublished").await - }, - | RoomDirectoryCommand::List { page } => { - // TODO: i know there's a way to do this with clap, but i can't seem to find it - let page = page.unwrap_or(1); - let mut rooms: Vec<_> = services - .rooms - .directory - .public_rooms() - .then(|room_id| get_room_info(services, room_id)) - .collect() - .await; - - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let rooms: Vec<_> = rooms - .into_iter() - .skip(page.saturating_sub(1).saturating_mul(PAGE_SIZE)) - .take(PAGE_SIZE) - .collect(); - - if rooms.is_empty() { - return Err!("No more rooms."); - } - - let body = rooms - .iter() - .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) - .collect::>() - .join("\n"); - - context - .write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",)) - .await - }, - } -} diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs deleted file mode 100644 index 1278e820..00000000 --- a/src/admin/room/info.rs +++ /dev/null @@ -1,88 +0,0 @@ -use clap::Subcommand; -use conduwuit::{Err, Result, utils::ReadyExt}; -use futures::StreamExt; -use ruma::OwnedRoomId; - -use crate::{admin_command, admin_command_dispatch}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(crate) enum RoomInfoCommand { - /// - List joined members in a room - ListJoinedMembers { - room_id: OwnedRoomId, - - /// Lists only our local users in the specified room - #[arg(long)] - local_only: bool, - }, - - /// - Displays room topic - /// - /// Room topics can be huge, so this is in its - /// own separate command - ViewRoomTopic { - room_id: OwnedRoomId, - }, -} - -#[admin_command] -async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result { - let room_name = self - .services - .rooms - .state_accessor - .get_name(&room_id) - .await - .unwrap_or_else(|_| room_id.to_string()); - - let member_info: Vec<_> = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user_id| { - local_only - .then(|| self.services.globals.user_is_local(user_id)) - .unwrap_or(true) - }) - .map(ToOwned::to_owned) - .filter_map(|user_id| async move { - Some(( - self.services - .users - .displayname(&user_id) - .await - .unwrap_or_else(|_| user_id.to_string()), - user_id, - )) - }) - .collect() - .await; - - let num = member_info.len(); - let body = member_info - .into_iter() - .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) - .collect::>() - .join("\n"); - - self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",)) - .await -} - -#[admin_command] -async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { - let Ok(room_topic) = self - .services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - else { - return Err!("Room does not have a room topic set."); - }; - - self.write_str(&format!("Room topic:\n```\n{room_topic}\n```")) - .await -} diff --git a/src/admin/room/mod.rs b/src/admin/room/mod.rs deleted file mode 100644 index 26d2c2d8..00000000 --- a/src/admin/room/mod.rs +++ /dev/null @@ -1,59 +0,0 @@ -mod alias; -mod commands; -mod directory; -mod info; -mod moderation; - -use clap::Subcommand; -use conduwuit::Result; -use ruma::OwnedRoomId; - -use self::{ - alias::RoomAliasCommand, directory::RoomDirectoryCommand, info::RoomInfoCommand, - moderation::RoomModerationCommand, -}; -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(super) enum RoomCommand { - /// - List all rooms the server knows about - #[clap(alias = "list")] - ListRooms { - page: Option, - - /// Excludes rooms that we have federation disabled with - #[arg(long)] - exclude_disabled: bool, - - /// Excludes rooms that we have banned - #[arg(long)] - exclude_banned: bool, - - #[arg(long)] - /// Whether to only output room IDs without supplementary room - /// information - no_details: bool, - }, - - #[command(subcommand)] - /// - View information about a room we know about - Info(RoomInfoCommand), - - #[command(subcommand)] - /// - Manage moderation of remote or local rooms - Moderation(RoomModerationCommand), - - #[command(subcommand)] - /// - Manage rooms' aliases - Alias(RoomAliasCommand), - - #[command(subcommand)] - /// - Manage the room directory - Directory(RoomDirectoryCommand), - - /// - Check if we know about a room - Exists { - room_id: OwnedRoomId, - }, -} diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs deleted file mode 100644 index ee429fc6..00000000 --- a/src/admin/room/moderation.rs +++ /dev/null @@ -1,490 +0,0 @@ -use api::client::leave_room; -use clap::Subcommand; -use conduwuit::{ - Err, Result, debug, - utils::{IterStream, ReadyExt}, - warn, -}; -use futures::StreamExt; -use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId}; - -use crate::{admin_command, admin_command_dispatch, get_room_info}; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(crate) enum RoomModerationCommand { - /// - Bans a room from local users joining and evicts all our local users - /// (including server - /// admins) - /// from the room. Also blocks any invites (local and remote) for the - /// banned room, and disables federation entirely with it. - BanRoom { - /// The room in the format of `!roomid:example.com` or a room alias in - /// the format of `#roomalias:example.com` - room: OwnedRoomOrAliasId, - }, - - /// - Bans a list of rooms (room IDs and room aliases) from a newline - /// delimited codeblock similar to `user deactivate-all`. Applies the same - /// steps as ban-room - BanListOfRooms, - - /// - Unbans a room to allow local users to join again - UnbanRoom { - /// The room in the format of `!roomid:example.com` or a room alias in - /// the format of `#roomalias:example.com` - room: OwnedRoomOrAliasId, - }, - - /// - List of all rooms we have banned - ListBannedRooms { - #[arg(long)] - /// Whether to only output room IDs without supplementary room - /// information - no_details: bool, - }, -} - -#[admin_command] -async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { - debug!("Got room alias or ID: {}", room); - - let admin_room_alias = &self.services.globals.admin_alias; - - if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { - if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) { - return Err!("Not allowed to ban the admin room."); - } - } - - let room_id = if room.is_room_id() { - let room_id = match RoomId::parse(&room) { - | Ok(room_id) => room_id, - | Err(e) => { - return Err!( - "Failed to parse room ID {room}. Please note that this requires a full room \ - ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ - (`#roomalias:example.com`): {e}" - ); - }, - }; - - debug!("Room specified is a room ID, banning room ID"); - self.services.rooms.metadata.ban_room(room_id, true); - - room_id.to_owned() - } else if room.is_room_alias_id() { - let room_alias = match RoomAliasId::parse(&room) { - | Ok(room_alias) => room_alias, - | Err(e) => { - return Err!( - "Failed to parse room ID {room}. Please note that this requires a full room \ - ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ - (`#roomalias:example.com`): {e}" - ); - }, - }; - - debug!( - "Room specified is not a room ID, attempting to resolve room alias to a room ID \ - locally, if not using get_alias_helper to fetch room ID remotely" - ); - - let room_id = match self - .services - .rooms - .alias - .resolve_local_alias(room_alias) - .await - { - | Ok(room_id) => room_id, - | _ => { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch \ - room ID over federation" - ); - - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room_id}" - ); - room_id - }, - | Err(e) => { - return Err!( - "Failed to resolve room alias {room_alias} to a room ID: {e}" - ); - }, - } - }, - }; - - self.services.rooms.metadata.ban_room(&room_id, true); - - room_id - } else { - return Err!( - "Room specified is not a room ID or room alias. Please note that this requires a \ - full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ - (`#roomalias:example.com`)", - ); - }; - - debug!("Making all users leave the room {room_id} and forgetting it"); - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .map(ToOwned::to_owned) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - - while let Some(ref user_id) = users.next().await { - debug!( - "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ - evicting admins too)", - ); - - if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { - warn!("Failed to leave room: {e}"); - } - - self.services.rooms.state_cache.forget(&room_id, user_id); - } - - self.services - .rooms - .alias - .local_aliases_for_room(&room_id) - .map(ToOwned::to_owned) - .for_each(|local_alias| async move { - self.services - .rooms - .alias - .remove_alias(&local_alias, &self.services.globals.server_user) - .await - .ok(); - }) - .await; - - // unpublish from room directory - self.services.rooms.directory.set_not_public(&room_id); - - self.services.rooms.metadata.disable_room(&room_id, true); - - self.write_str( - "Room banned, removed all our local users, and disabled incoming federation with room.", - ) - .await -} - -#[admin_command] -async fn ban_list_of_rooms(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details.",); - } - - let rooms_s = self - .body - .to_vec() - .drain(1..self.body.len().saturating_sub(1)) - .collect::>(); - - let admin_room_alias = &self.services.globals.admin_alias; - - let mut room_ban_count: usize = 0; - let mut room_ids: Vec = Vec::new(); - - for &room in &rooms_s { - match <&RoomOrAliasId>::try_from(room) { - | Ok(room_alias_or_id) => { - if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { - if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) - { - warn!("User specified admin room in bulk ban list, ignoring"); - continue; - } - } - - if room_alias_or_id.is_room_id() { - let room_id = match RoomId::parse(room_alias_or_id) { - | Ok(room_id) => room_id, - | Err(e) => { - // ignore rooms we failed to parse - warn!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - }, - }; - - room_ids.push(room_id.to_owned()); - } - - if room_alias_or_id.is_room_alias_id() { - match RoomAliasId::parse(room_alias_or_id) { - | Ok(room_alias) => { - let room_id = match self - .services - .rooms - .alias - .resolve_local_alias(room_alias) - .await - { - | Ok(room_id) => room_id, - | _ => { - debug!( - "We don't have this room alias to a room ID locally, \ - attempting to fetch room ID over federation" - ); - - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for \ - {room}", - ); - room_id - }, - | Err(e) => { - warn!( - "Failed to resolve room alias {room} to a room \ - ID: {e}" - ); - continue; - }, - } - }, - }; - - room_ids.push(room_id); - }, - | Err(e) => { - warn!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - }, - } - } - }, - | Err(e) => { - warn!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ - logging here: {e}" - ); - continue; - }, - } - } - - for room_id in room_ids { - self.services.rooms.metadata.ban_room(&room_id, true); - - debug!("Banned {room_id} successfully"); - room_ban_count = room_ban_count.saturating_add(1); - - debug!("Making all users leave the room {room_id} and forgetting it"); - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .map(ToOwned::to_owned) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - - while let Some(ref user_id) = users.next().await { - debug!( - "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ - evicting admins too)", - ); - - if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { - warn!("Failed to leave room: {e}"); - } - - self.services.rooms.state_cache.forget(&room_id, user_id); - } - - // remove any local aliases, ignore errors - self.services - .rooms - .alias - .local_aliases_for_room(&room_id) - .map(ToOwned::to_owned) - .for_each(|local_alias| async move { - self.services - .rooms - .alias - .remove_alias(&local_alias, &self.services.globals.server_user) - .await - .ok(); - }) - .await; - - // unpublish from room directory, ignore errors - self.services.rooms.directory.set_not_public(&room_id); - - self.services.rooms.metadata.disable_room(&room_id, true); - } - - self.write_str(&format!( - "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ - disabled incoming federation with the room." - )) - .await -} - -#[admin_command] -async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { - let room_id = if room.is_room_id() { - let room_id = match RoomId::parse(&room) { - | Ok(room_id) => room_id, - | Err(e) => { - return Err!( - "Failed to parse room ID {room}. Please note that this requires a full room \ - ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ - (`#roomalias:example.com`): {e}" - ); - }, - }; - - debug!("Room specified is a room ID, unbanning room ID"); - self.services.rooms.metadata.ban_room(room_id, false); - - room_id.to_owned() - } else if room.is_room_alias_id() { - let room_alias = match RoomAliasId::parse(&room) { - | Ok(room_alias) => room_alias, - | Err(e) => { - return Err!( - "Failed to parse room ID {room}. Please note that this requires a full room \ - ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ - (`#roomalias:example.com`): {e}" - ); - }, - }; - - debug!( - "Room specified is not a room ID, attempting to resolve room alias to a room ID \ - locally, if not using get_alias_helper to fetch room ID remotely" - ); - - let room_id = match self - .services - .rooms - .alias - .resolve_local_alias(room_alias) - .await - { - | Ok(room_id) => room_id, - | _ => { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch \ - room ID over federation" - ); - - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for room {room}" - ); - room_id - }, - | Err(e) => { - return Err!("Failed to resolve room alias {room} to a room ID: {e}"); - }, - } - }, - }; - - self.services.rooms.metadata.ban_room(&room_id, false); - - room_id - } else { - return Err!( - "Room specified is not a room ID or room alias. Please note that this requires a \ - full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ - (`#roomalias:example.com`)", - ); - }; - - self.services.rooms.metadata.disable_room(&room_id, false); - self.write_str("Room unbanned and federation re-enabled.") - .await -} - -#[admin_command] -async fn list_banned_rooms(&self, no_details: bool) -> Result { - let room_ids: Vec = self - .services - .rooms - .metadata - .list_banned_rooms() - .map(Into::into) - .collect() - .await; - - if room_ids.is_empty() { - return Err!("No rooms are banned."); - } - - let mut rooms = room_ids - .iter() - .stream() - .then(|room_id| get_room_info(self.services, room_id)) - .collect::>() - .await; - - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let num = rooms.len(); - - let body = rooms - .iter() - .map(|(id, members, name)| { - if no_details { - format!("{id}") - } else { - format!("{id}\tMembers: {members}\tName: {name}") - } - }) - .collect::>() - .join("\n"); - - self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",)) - .await -} diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs deleted file mode 100644 index 6027a9eb..00000000 --- a/src/admin/server/commands.rs +++ /dev/null @@ -1,154 +0,0 @@ -use std::{fmt::Write, path::PathBuf, sync::Arc}; - -use conduwuit::{ - Err, Result, info, - utils::{stream::IterStream, time}, - warn, -}; -use futures::TryStreamExt; - -use crate::admin_command; - -#[admin_command] -pub(super) async fn uptime(&self) -> Result { - let elapsed = self - .services - .server - .started - .elapsed() - .expect("standard duration"); - - let result = time::pretty(elapsed); - self.write_str(&format!("{result}.")).await -} - -#[admin_command] -pub(super) async fn show_config(&self) -> Result { - self.write_str(&format!("{}", *self.services.server.config)) - .await -} - -#[admin_command] -pub(super) async fn reload_config(&self, path: Option) -> Result { - let path = path.as_deref().into_iter(); - self.services.config.reload(path)?; - - self.write_str("Successfully reconfigured.").await -} - -#[admin_command] -pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result { - let delim = if comma { "," } else { " " }; - if enabled && !available { - let features = info::rustc::features().join(delim); - let out = format!("`\n{features}\n`"); - return self.write_str(&out).await; - } - - if available && !enabled { - let features = info::cargo::features().join(delim); - let out = format!("`\n{features}\n`"); - return self.write_str(&out).await; - } - - let mut features = String::new(); - let enabled = info::rustc::features(); - let available = info::cargo::features(); - for feature in available { - let active = enabled.contains(&feature.as_str()); - let emoji = if active { "✅" } else { "❌" }; - let remark = if active { "[enabled]" } else { "" }; - writeln!(features, "{emoji} {feature} {remark}")?; - } - - self.write_str(&features).await -} - -#[admin_command] -pub(super) async fn memory_usage(&self) -> Result { - let services_usage = self.services.memory_usage().await?; - let database_usage = self.services.db.db.memory_usage()?; - let allocator_usage = - conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); - - self.write_str(&format!( - "Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}", - )) - .await -} - -#[admin_command] -pub(super) async fn clear_caches(&self) -> Result { - self.services.clear_cache().await; - - self.write_str("Done.").await -} - -#[admin_command] -pub(super) async fn list_backups(&self) -> Result { - self.services - .db - .db - .backup_list()? - .try_stream() - .try_for_each(|result| write!(self, "{result}")) - .await -} - -#[admin_command] -pub(super) async fn backup_database(&self) -> Result { - let db = Arc::clone(&self.services.db); - let result = self - .services - .server - .runtime() - .spawn_blocking(move || match db.db.backup() { - | Ok(()) => "Done".to_owned(), - | Err(e) => format!("Failed: {e}"), - }) - .await?; - - let count = self.services.db.db.backup_count()?; - self.write_str(&format!("{result}. Currently have {count} backups.")) - .await -} - -#[admin_command] -pub(super) async fn admin_notice(&self, message: Vec) -> Result { - let message = message.join(" "); - self.services.admin.send_text(&message).await; - - self.write_str("Notice was sent to #admins").await -} - -#[admin_command] -pub(super) async fn reload_mods(&self) -> Result { - self.services.server.reload()?; - - self.write_str("Reloading server...").await -} - -#[admin_command] -#[cfg(unix)] -pub(super) async fn restart(&self, force: bool) -> Result { - use conduwuit::utils::sys::current_exe_deleted; - - if !force && current_exe_deleted() { - return Err!( - "The server cannot be restarted because the executable changed. If this is expected \ - use --force to override." - ); - } - - self.services.server.restart()?; - - self.write_str("Restarting server...").await -} - -#[admin_command] -pub(super) async fn shutdown(&self) -> Result { - warn!("shutdown command"); - self.services.server.shutdown()?; - - self.write_str("Shutting down server...").await -} diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs deleted file mode 100644 index 6b99e5de..00000000 --- a/src/admin/server/mod.rs +++ /dev/null @@ -1,67 +0,0 @@ -mod commands; - -use std::path::PathBuf; - -use clap::Subcommand; -use conduwuit::Result; - -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(super) enum ServerCommand { - /// - Time elapsed since startup - Uptime, - - /// - Show configuration values - ShowConfig, - - /// - Reload configuration values - ReloadConfig { - path: Option, - }, - - /// - List the features built into the server - ListFeatures { - #[arg(short, long)] - available: bool, - - #[arg(short, long)] - enabled: bool, - - #[arg(short, long)] - comma: bool, - }, - - /// - Print database memory usage statistics - MemoryUsage, - - /// - Clears all of Continuwuity's caches - ClearCaches, - - /// - Performs an online backup of the database (only available for RocksDB - /// at the moment) - BackupDatabase, - - /// - List database backups - ListBackups, - - /// - Send a message to the admin room. - AdminNotice { - message: Vec, - }, - - /// - Hot-reload the server - #[clap(alias = "reload")] - ReloadMods, - - #[cfg(unix)] - /// - Restart the server - Restart { - #[arg(short, long)] - force: bool, - }, - - /// - Shutdown the server - Shutdown, -} diff --git a/src/admin/tests.rs b/src/admin/tests.rs deleted file mode 100644 index 296d4888..00000000 --- a/src/admin/tests.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![cfg(test)] - -#[test] -fn get_help_short() { get_help_inner("-h"); } - -#[test] -fn get_help_long() { get_help_inner("--help"); } - -#[test] -fn get_help_subcommand() { get_help_inner("help"); } - -fn get_help_inner(input: &str) { - use clap::Parser; - - use crate::admin::AdminCommand; - - let Err(error) = AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) else { - panic!("no error!"); - }; - - let error = error.to_string(); - // Search for a handful of keywords that suggest the help printed properly - assert!(error.contains("Usage:")); - assert!(error.contains("Commands:")); - assert!(error.contains("Options:")); -} diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs deleted file mode 100644 index e5e481e5..00000000 --- a/src/admin/user/commands.rs +++ /dev/null @@ -1,883 +0,0 @@ -use std::{collections::BTreeMap, fmt::Write as _}; - -use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; -use conduwuit::{ - Err, Result, debug, debug_warn, error, info, is_equal_to, - matrix::pdu::PduBuilder, - utils::{self, ReadyExt}, - warn, -}; -use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; -use futures::StreamExt; -use ruma::{ - OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, - events::{ - RoomAccountDataEventType, StateEventType, - room::{ - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - redaction::RoomRedactionEventContent, - }, - tag::{TagEvent, TagEventContent, TagInfo}, - }, -}; - -use crate::{ - admin_command, get_room_info, - utils::{parse_active_local_user_id, parse_local_user_id}, -}; - -const AUTO_GEN_PASSWORD_LENGTH: usize = 25; -const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin."; - -#[admin_command] -pub(super) async fn list_users(&self) -> Result { - let users: Vec<_> = self - .services - .users - .list_local_users() - .map(ToString::to_string) - .collect() - .await; - - let mut plain_msg = format!("Found {} local user account(s):\n```\n", users.len()); - plain_msg += users.join("\n").as_str(); - plain_msg += "\n```"; - - self.write_str(&plain_msg).await -} - -#[admin_command] -pub(super) async fn create_user(&self, username: String, password: Option) -> Result { - // Validate user id - let user_id = parse_local_user_id(self.services, &username)?; - - if let Err(e) = user_id.validate_strict() { - if self.services.config.emergency_password.is_none() { - return Err!("Username {user_id} contains disallowed characters or spaces: {e}"); - } - } - - if self.services.users.exists(&user_id).await { - return Err!("User {user_id} already exists"); - } - - let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); - - // Create user - self.services - .users - .create(&user_id, Some(password.as_str()))?; - - // Default to pretty displayname - let mut displayname = user_id.localpart().to_owned(); - - // If `new_user_displayname_suffix` is set, registration will push whatever - // content is set to the user's display name with a space before it - if !self - .services - .server - .config - .new_user_displayname_suffix - .is_empty() - { - write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?; - } - - self.services - .users - .set_displayname(&user_id, Some(displayname)); - - // Initial account data - self.services - .account_data - .update( - None, - &user_id, - ruma::events::GlobalAccountDataEventType::PushRules - .to_string() - .into(), - &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: ruma::push::Ruleset::server_default(&user_id), - }, - })?, - ) - .await?; - - if !self.services.server.config.auto_join_rooms.is_empty() { - for room in &self.services.server.config.auto_join_rooms { - let Ok(room_id) = self.services.rooms.alias.resolve(room).await else { - error!( - %user_id, - "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping" - ); - continue; - }; - - if !self - .services - .rooms - .state_cache - .server_in_room(self.services.globals.server_name(), &room_id) - .await - { - warn!( - "Skipping room {room} to automatically join as we have never joined before." - ); - continue; - } - - if let Some(room_server_name) = room.server_name() { - match join_room_by_id_helper( - self.services, - &user_id, - &room_id, - Some("Automatically joining this room upon registration".to_owned()), - &[ - self.services.globals.server_name().to_owned(), - room_server_name.to_owned(), - ], - None, - &None, - ) - .await - { - | Ok(_response) => { - info!("Automatically joined room {room} for user {user_id}"); - }, - | Err(e) => { - // don't return this error so we don't fail registrations - error!( - "Failed to automatically join room {room} for user {user_id}: {e}" - ); - self.services - .admin - .send_text(&format!( - "Failed to automatically join room {room} for user {user_id}: \ - {e}" - )) - .await; - }, - } - } - } - } - - // we dont add a device since we're not the user, just the creator - - // if this account creation is from the CLI / --execute, invite the first user - // to admin room - if let Ok(admin_room) = self.services.admin.get_admin_room().await { - if self - .services - .rooms - .state_cache - .room_joined_count(&admin_room) - .await - .is_ok_and(is_equal_to!(1)) - { - self.services.admin.make_user_admin(&user_id).await?; - warn!("Granting {user_id} admin privileges as the first user"); - } - } else { - debug!("create_user admin command called without an admin room being available"); - } - - self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`")) - .await -} - -#[admin_command] -pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result { - // Validate user id - let user_id = parse_local_user_id(self.services, &user_id)?; - - // don't deactivate the server service account - if user_id == self.services.globals.server_user { - return Err!("Not allowed to deactivate the server service account.",); - } - - self.services.users.deactivate_account(&user_id).await?; - - if !no_leave_rooms { - self.services - .admin - .send_text(&format!("Making {user_id} leave all rooms after deactivation...")) - .await; - - let all_joined_rooms: Vec = self - .services - .rooms - .state_cache - .rooms_joined(&user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?; - update_displayname(self.services, &user_id, None, &all_joined_rooms).await; - update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await; - leave_all_rooms(self.services, &user_id).await; - } - - self.write_str(&format!("User {user_id} has been deactivated")) - .await -} - -#[admin_command] -pub(super) async fn reset_password(&self, username: String, password: Option) -> Result { - let user_id = parse_local_user_id(self.services, &username)?; - - if user_id == self.services.globals.server_user { - return Err!( - "Not allowed to set the password for the server account. Please use the emergency \ - password config option.", - ); - } - - let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); - - match self - .services - .users - .set_password(&user_id, Some(new_password.as_str())) - { - | Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"), - | Ok(()) => - write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"), - } - .await -} - -#[admin_command] -pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details.",); - } - - let usernames = self - .body - .to_vec() - .drain(1..self.body.len().saturating_sub(1)) - .collect::>(); - - let mut user_ids: Vec = Vec::with_capacity(usernames.len()); - let mut admins = Vec::new(); - - for username in usernames { - match parse_active_local_user_id(self.services, username).await { - | Err(e) => { - self.services - .admin - .send_text(&format!("{username} is not a valid username, skipping over: {e}")) - .await; - - continue; - }, - | Ok(user_id) => { - if self.services.users.is_admin(&user_id).await && !force { - self.services - .admin - .send_text(&format!( - "{username} is an admin and --force is not set, skipping over" - )) - .await; - - admins.push(username); - continue; - } - - // don't deactivate the server service account - if user_id == self.services.globals.server_user { - self.services - .admin - .send_text(&format!( - "{username} is the server service account, skipping over" - )) - .await; - - continue; - } - - user_ids.push(user_id); - }, - } - } - - let mut deactivation_count: usize = 0; - - for user_id in user_ids { - match self.services.users.deactivate_account(&user_id).await { - | Err(e) => { - self.services - .admin - .send_text(&format!("Failed deactivating user: {e}")) - .await; - }, - | Ok(()) => { - deactivation_count = deactivation_count.saturating_add(1); - if !no_leave_rooms { - info!("Forcing user {user_id} to leave all rooms apart of deactivate-all"); - let all_joined_rooms: Vec = self - .services - .rooms - .state_cache - .rooms_joined(&user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?; - update_displayname(self.services, &user_id, None, &all_joined_rooms).await; - update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms) - .await; - leave_all_rooms(self.services, &user_id).await; - } - }, - } - } - - if admins.is_empty() { - write!(self, "Deactivated {deactivation_count} accounts.") - } else { - write!( - self, - "Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \ - --force to deactivate admin accounts", - admins.join(", ") - ) - } - .await -} - -#[admin_command] -pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { - // Validate user id - let user_id = parse_local_user_id(self.services, &user_id)?; - - let mut rooms: Vec<(OwnedRoomId, u64, String)> = self - .services - .rooms - .state_cache - .rooms_joined(&user_id) - .then(|room_id| get_room_info(self.services, room_id)) - .collect() - .await; - - if rooms.is_empty() { - return Err!("User is not in any rooms."); - } - - rooms.sort_by_key(|r| r.1); - rooms.reverse(); - - let body = rooms - .iter() - .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) - .collect::>() - .join("\n"); - - self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),)) - .await -} - -#[admin_command] -pub(super) async fn force_join_list_of_local_users( - &self, - room_id: OwnedRoomOrAliasId, - yes_i_want_to_do_this: bool, -) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" - { - return Err!("Expected code block in command body. Add --help for details.",); - } - - if !yes_i_want_to_do_this { - return Err!( - "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ - bulk join all specified local users.", - ); - } - - let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Err!("There is not an admin room to check for server admins.",); - }; - - let (room_id, servers) = self - .services - .rooms - .alias - .resolve_with_servers(&room_id, None) - .await?; - - if !self - .services - .rooms - .state_cache - .server_in_room(self.services.globals.server_name(), &room_id) - .await - { - return Err!("We are not joined in this room."); - } - - let server_admins: Vec<_> = self - .services - .rooms - .state_cache - .active_local_users_in_room(&admin_room) - .map(ToOwned::to_owned) - .collect() - .await; - - if !self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) - .await - { - return Err!("There is not a single server admin in the room.",); - } - - let usernames = self - .body - .to_vec() - .drain(1..self.body.len().saturating_sub(1)) - .collect::>(); - - let mut user_ids: Vec = Vec::with_capacity(usernames.len()); - - for username in usernames { - match parse_active_local_user_id(self.services, username).await { - | Ok(user_id) => { - // don't make the server service account join - if user_id == self.services.globals.server_user { - self.services - .admin - .send_text(&format!( - "{username} is the server service account, skipping over" - )) - .await; - - continue; - } - - user_ids.push(user_id); - }, - | Err(e) => { - self.services - .admin - .send_text(&format!("{username} is not a valid username, skipping over: {e}")) - .await; - - continue; - }, - } - } - - let mut failed_joins: usize = 0; - let mut successful_joins: usize = 0; - - for user_id in user_ids { - match join_room_by_id_helper( - self.services, - &user_id, - &room_id, - Some(String::from(BULK_JOIN_REASON)), - &servers, - None, - &None, - ) - .await - { - | Ok(_res) => { - successful_joins = successful_joins.saturating_add(1); - }, - | Err(e) => { - debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); - failed_joins = failed_joins.saturating_add(1); - }, - } - } - - self.write_str(&format!( - "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ - failed.", - )) - .await -} - -#[admin_command] -pub(super) async fn force_join_all_local_users( - &self, - room_id: OwnedRoomOrAliasId, - yes_i_want_to_do_this: bool, -) -> Result { - if !yes_i_want_to_do_this { - return Err!( - "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ - bulk join all local users.", - ); - } - - let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Err!("There is not an admin room to check for server admins.",); - }; - - let (room_id, servers) = self - .services - .rooms - .alias - .resolve_with_servers(&room_id, None) - .await?; - - if !self - .services - .rooms - .state_cache - .server_in_room(self.services.globals.server_name(), &room_id) - .await - { - return Err!("We are not joined in this room."); - } - - let server_admins: Vec<_> = self - .services - .rooms - .state_cache - .active_local_users_in_room(&admin_room) - .map(ToOwned::to_owned) - .collect() - .await; - - if !self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) - .await - { - return Err!("There is not a single server admin in the room.",); - } - - let mut failed_joins: usize = 0; - let mut successful_joins: usize = 0; - - for user_id in &self - .services - .users - .list_local_users() - .map(UserId::to_owned) - .collect::>() - .await - { - match join_room_by_id_helper( - self.services, - user_id, - &room_id, - Some(String::from(BULK_JOIN_REASON)), - &servers, - None, - &None, - ) - .await - { - | Ok(_res) => { - successful_joins = successful_joins.saturating_add(1); - }, - | Err(e) => { - debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); - failed_joins = failed_joins.saturating_add(1); - }, - } - } - - self.write_str(&format!( - "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ - failed.", - )) - .await -} - -#[admin_command] -pub(super) async fn force_join_room( - &self, - user_id: String, - room_id: OwnedRoomOrAliasId, -) -> Result { - let user_id = parse_local_user_id(self.services, &user_id)?; - let (room_id, servers) = self - .services - .rooms - .alias - .resolve_with_servers(&room_id, None) - .await?; - - assert!( - self.services.globals.user_is_local(&user_id), - "Parsed user_id must be a local user" - ); - join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None) - .await?; - - self.write_str(&format!("{user_id} has been joined to {room_id}.",)) - .await -} - -#[admin_command] -pub(super) async fn force_leave_room( - &self, - user_id: String, - room_id: OwnedRoomOrAliasId, -) -> Result { - let user_id = parse_local_user_id(self.services, &user_id)?; - let room_id = self.services.rooms.alias.resolve(&room_id).await?; - - assert!( - self.services.globals.user_is_local(&user_id), - "Parsed user_id must be a local user" - ); - - if !self - .services - .rooms - .state_cache - .is_joined(&user_id, &room_id) - .await - { - return Err!("{user_id} is not joined in the room"); - } - - leave_room(self.services, &user_id, &room_id, None).await?; - - self.write_str(&format!("{user_id} has left {room_id}.",)) - .await -} - -#[admin_command] -pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result { - let user_id = parse_local_user_id(self.services, &user_id)?; - let room_id = self.services.rooms.alias.resolve(&room_id).await?; - - assert!( - self.services.globals.user_is_local(&user_id), - "Parsed user_id must be a local user" - ); - - let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - - let room_power_levels: Option = self - .services - .rooms - .state_accessor - .room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "") - .await - .ok(); - - let user_can_demote_self = room_power_levels - .as_ref() - .is_some_and(|power_levels_content| { - RoomPowerLevels::from(power_levels_content.clone()) - .user_can_change_user_power_level(&user_id, &user_id) - }) || self - .services - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "") - .await - .is_ok_and(|event| event.sender == user_id); - - if !user_can_demote_self { - return Err!("User is not allowed to modify their own power levels in the room.",); - } - - let mut power_levels_content = room_power_levels.unwrap_or_default(); - power_levels_content.users.remove(&user_id); - - let event_id = self - .services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &power_levels_content), - &user_id, - &room_id, - &state_lock, - ) - .await?; - - self.write_str(&format!( - "User {user_id} demoted themselves to the room default power level in {room_id} - \ - {event_id}" - )) - .await -} - -#[admin_command] -pub(super) async fn make_user_admin(&self, user_id: String) -> Result { - let user_id = parse_local_user_id(self.services, &user_id)?; - assert!( - self.services.globals.user_is_local(&user_id), - "Parsed user_id must be a local user" - ); - - self.services.admin.make_user_admin(&user_id).await?; - - self.write_str(&format!("{user_id} has been granted admin privileges.",)) - .await -} - -#[admin_command] -pub(super) async fn put_room_tag( - &self, - user_id: String, - room_id: OwnedRoomId, - tag: String, -) -> Result { - let user_id = parse_active_local_user_id(self.services, &user_id).await?; - - let mut tags_event = self - .services - .account_data - .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) - .await - .unwrap_or(TagEvent { - content: TagEventContent { tags: BTreeMap::new() }, - }); - - tags_event - .content - .tags - .insert(tag.clone().into(), TagInfo::new()); - - self.services - .account_data - .update( - Some(&room_id), - &user_id, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - ) - .await?; - - self.write_str(&format!( - "Successfully updated room account data for {user_id} and room {room_id} with tag {tag}" - )) - .await -} - -#[admin_command] -pub(super) async fn delete_room_tag( - &self, - user_id: String, - room_id: OwnedRoomId, - tag: String, -) -> Result { - let user_id = parse_active_local_user_id(self.services, &user_id).await?; - - let mut tags_event = self - .services - .account_data - .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) - .await - .unwrap_or(TagEvent { - content: TagEventContent { tags: BTreeMap::new() }, - }); - - tags_event.content.tags.remove(&tag.clone().into()); - - self.services - .account_data - .update( - Some(&room_id), - &user_id, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - ) - .await?; - - self.write_str(&format!( - "Successfully updated room account data for {user_id} and room {room_id}, deleting room \ - tag {tag}" - )) - .await -} - -#[admin_command] -pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result { - let user_id = parse_active_local_user_id(self.services, &user_id).await?; - - let tags_event = self - .services - .account_data - .get_room(&room_id, &user_id, RoomAccountDataEventType::Tag) - .await - .unwrap_or(TagEvent { - content: TagEventContent { tags: BTreeMap::new() }, - }); - - self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags)) - .await -} - -#[admin_command] -pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result { - let Ok(event) = self - .services - .rooms - .timeline - .get_non_outlier_pdu(&event_id) - .await - else { - return Err!("Event does not exist in our database."); - }; - - if event.is_redacted() { - return Err!("Event is already redacted."); - } - - let room_id = event.room_id; - let sender_user = event.sender; - - if !self.services.globals.user_is_local(&sender_user) { - return Err!("This command only works on local users."); - } - - let reason = format!( - "The administrator(s) of {} has redacted this user's message.", - self.services.globals.server_name() - ); - - let redaction_event_id = { - let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - - self.services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - redacts: Some(event.event_id.clone()), - ..PduBuilder::timeline(&RoomRedactionEventContent { - redacts: Some(event.event_id.clone()), - reason: Some(reason), - }) - }, - &sender_user, - &room_id, - &state_lock, - ) - .await? - }; - - self.write_str(&format!( - "Successfully redacted event. Redaction event ID: {redaction_event_id}" - )) - .await -} diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs deleted file mode 100644 index e789376a..00000000 --- a/src/admin/user/mod.rs +++ /dev/null @@ -1,156 +0,0 @@ -mod commands; - -use clap::Subcommand; -use conduwuit::Result; -use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId}; - -use crate::admin_command_dispatch; - -#[admin_command_dispatch] -#[derive(Debug, Subcommand)] -pub(super) enum UserCommand { - /// - Create a new user - #[clap(alias = "create")] - CreateUser { - /// Username of the new user - username: String, - /// Password of the new user, if unspecified one is generated - password: Option, - }, - - /// - Reset user password - ResetPassword { - /// Username of the user for whom the password should be reset - username: String, - /// New password for the user, if unspecified one is generated - password: Option, - }, - - /// - Deactivate a user - /// - /// User will be removed from all rooms by default. - /// Use --no-leave-rooms to not leave all rooms by default. - Deactivate { - #[arg(short, long)] - no_leave_rooms: bool, - user_id: String, - }, - - /// - Deactivate a list of users - /// - /// Recommended to use in conjunction with list-local-users. - /// - /// Users will be removed from joined rooms by default. - /// - /// Can be overridden with --no-leave-rooms. - /// - /// Removing a mass amount of users from a room may cause a significant - /// amount of leave events. The time to leave rooms may depend significantly - /// on joined rooms and servers. - /// - /// This command needs a newline separated list of users provided in a - /// Markdown code block below the command. - DeactivateAll { - #[arg(short, long)] - /// Does not leave any rooms the user is in on deactivation - no_leave_rooms: bool, - #[arg(short, long)] - /// Also deactivate admin accounts and will assume leave all rooms too - force: bool, - }, - - /// - List local users in the database - #[clap(alias = "list")] - ListUsers, - - /// - Lists all the rooms (local and remote) that the specified user is - /// joined in - ListJoinedRooms { - user_id: String, - }, - - /// - Manually join a local user to a room. - ForceJoinRoom { - user_id: String, - room_id: OwnedRoomOrAliasId, - }, - - /// - Manually leave a local user from a room. - ForceLeaveRoom { - user_id: String, - room_id: OwnedRoomOrAliasId, - }, - - /// - Forces the specified user to drop their power levels to the room - /// default, if their permissions allow and the auth check permits - ForceDemote { - user_id: String, - room_id: OwnedRoomOrAliasId, - }, - - /// - Grant server-admin privileges to a user. - MakeUserAdmin { - user_id: String, - }, - - /// - Puts a room tag for the specified user and room ID. - /// - /// This is primarily useful if you'd like to set your admin room - /// to the special "System Alerts" section in Element as a way to - /// permanently see your admin room without it being buried away in your - /// favourites or rooms. To do this, you would pass your user, your admin - /// room's internal ID, and the tag name `m.server_notice`. - PutRoomTag { - user_id: String, - room_id: OwnedRoomId, - tag: String, - }, - - /// - Deletes the room tag for the specified user and room ID - DeleteRoomTag { - user_id: String, - room_id: OwnedRoomId, - tag: String, - }, - - /// - Gets all the room tags for the specified user and room ID - GetRoomTags { - user_id: String, - room_id: OwnedRoomId, - }, - - /// - Attempts to forcefully redact the specified event ID from the sender - /// user - /// - /// This is only valid for local users - RedactEvent { - event_id: OwnedEventId, - }, - - /// - Force joins a specified list of local users to join the specified - /// room. - /// - /// Specify a codeblock of usernames. - /// - /// At least 1 server admin must be in the room to reduce abuse. - /// - /// Requires the `--yes-i-want-to-do-this` flag. - ForceJoinListOfLocalUsers { - room_id: OwnedRoomOrAliasId, - - #[arg(long)] - yes_i_want_to_do_this: bool, - }, - - /// - Force joins all local users to the specified room. - /// - /// At least 1 server admin must be in the room to reduce abuse. - /// - /// Requires the `--yes-i-want-to-do-this` flag. - ForceJoinAllLocalUsers { - room_id: OwnedRoomOrAliasId, - - #[arg(long)] - yes_i_want_to_do_this: bool, - }, -} diff --git a/src/admin/utils.rs b/src/admin/utils.rs deleted file mode 100644 index ea9696b2..00000000 --- a/src/admin/utils.rs +++ /dev/null @@ -1,67 +0,0 @@ -#![allow(dead_code)] - -use conduwuit_core::{Err, Result, err}; -use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; -use service::Services; - -pub(crate) fn escape_html(s: &str) -> String { - s.replace('&', "&") - .replace('<', "<") - .replace('>', ">") -} - -pub(crate) async fn get_room_info( - services: &Services, - room_id: &RoomId, -) -> (OwnedRoomId, u64, String) { - ( - room_id.into(), - services - .rooms - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0), - services - .rooms - .state_accessor - .get_name(room_id) - .await - .unwrap_or_else(|_| room_id.to_string()), - ) -} - -/// Parses user ID -pub(crate) fn parse_user_id(services: &Services, user_id: &str) -> Result { - UserId::parse_with_server_name(user_id.to_lowercase(), services.globals.server_name()) - .map_err(|e| err!("The supplied username is not a valid username: {e}")) -} - -/// Parses user ID as our local user -pub(crate) fn parse_local_user_id(services: &Services, user_id: &str) -> Result { - let user_id = parse_user_id(services, user_id)?; - - if !services.globals.user_is_local(&user_id) { - return Err!("User {user_id:?} does not belong to our server."); - } - - Ok(user_id) -} - -/// Parses user ID that is an active (not guest or deactivated) local user -pub(crate) async fn parse_active_local_user_id( - services: &Services, - user_id: &str, -) -> Result { - let user_id = parse_local_user_id(services, user_id)?; - - if !services.users.exists(&user_id).await { - return Err!("User {user_id:?} does not exist on this server."); - } - - if services.users.is_deactivated(&user_id).await? { - return Err!("User {user_id:?} is deactivated."); - } - - Ok(user_id) -} diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml deleted file mode 100644 index 15ada812..00000000 --- a/src/api/Cargo.toml +++ /dev/null @@ -1,95 +0,0 @@ -[package] -name = "conduwuit_api" -categories.workspace = true -description.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version.workspace = true - -[lib] -path = "mod.rs" -crate-type = [ - "rlib", -# "dylib", -] - -[features] -brotli_compression = [ - "conduwuit-core/brotli_compression", - "conduwuit-service/brotli_compression", - "reqwest/brotli", -] -element_hacks = [ - "conduwuit-service/element_hacks", -] -gzip_compression = [ - "conduwuit-core/gzip_compression", - "conduwuit-service/gzip_compression", - "reqwest/gzip", -] -io_uring = [ - "conduwuit-service/io_uring", -] -jemalloc = [ - "conduwuit-core/jemalloc", - "conduwuit-service/jemalloc", -] -jemalloc_conf = [ - "conduwuit-core/jemalloc_conf", - "conduwuit-service/jemalloc_conf", -] -jemalloc_prof = [ - "conduwuit-core/jemalloc_prof", - "conduwuit-service/jemalloc_prof", -] -jemalloc_stats = [ - "conduwuit-core/jemalloc_stats", - "conduwuit-service/jemalloc_stats", -] -release_max_log_level = [ - "conduwuit-core/release_max_log_level", - "conduwuit-service/release_max_log_level", - "log/max_level_trace", - "log/release_max_level_info", - "tracing/max_level_trace", - "tracing/release_max_level_info", -] -zstd_compression = [ - "conduwuit-core/zstd_compression", - "conduwuit-service/zstd_compression", - "reqwest/zstd", -] - -[dependencies] -async-trait.workspace = true -axum-client-ip.workspace = true -axum-extra.workspace = true -axum.workspace = true -base64.workspace = true -bytes.workspace = true -conduwuit-core.workspace = true -conduwuit-service.workspace = true -const-str.workspace = true -futures.workspace = true -hmac.workspace = true -http.workspace = true -http-body-util.workspace = true -hyper.workspace = true -ipaddress.workspace = true -itertools.workspace = true -log.workspace = true -rand.workspace = true -reqwest.workspace = true -ruma.workspace = true -serde_html_form.workspace = true -serde_json.workspace = true -serde.workspace = true -sha1.workspace = true -tokio.workspace = true -tracing.workspace = true - -[lints] -workspace = true diff --git a/src/api/client/account.rs b/src/api/client/account.rs deleted file mode 100644 index 32f2530c..00000000 --- a/src/api/client/account.rs +++ /dev/null @@ -1,921 +0,0 @@ -use std::fmt::Write; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Error, Result, debug_info, err, error, info, is_equal_to, - matrix::pdu::PduBuilder, - utils, - utils::{ReadyExt, stream::BroadbandExt}, - warn, -}; -use conduwuit_service::Services; -use futures::{FutureExt, StreamExt}; -use register::RegistrationKind; -use ruma::{ - OwnedRoomId, UserId, - api::client::{ - account::{ - ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, - deactivate, get_3pids, get_username_availability, - register::{self, LoginType}, - request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, - whoami, - }, - uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, - events::{ - GlobalAccountDataEventType, StateEventType, - room::{ - message::RoomMessageEventContent, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - }, - }, - push, -}; - -use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; -use crate::Ruma; - -const RANDOM_USER_ID_LENGTH: usize = 10; - -/// # `GET /_matrix/client/v3/register/available` -/// -/// Checks if a username is valid and available on this server. -/// -/// Conditions for returning true: -/// - The user id is not historical -/// - The server name of the user id matches this server -/// - No user or appservice on this server already claimed this username -/// -/// Note: This will not reserve the username, so the username might become -/// invalid when trying to register -#[tracing::instrument(skip_all, fields(%client), name = "register_available")] -pub(crate) async fn get_register_available_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue - let is_matrix_appservice_irc = body.appservice_info.as_ref().is_some_and(|appservice| { - appservice.registration.id == "irc" - || appservice.registration.id.contains("matrix-appservice-irc") - || appservice.registration.id.contains("matrix_appservice_irc") - }); - - if services - .globals - .forbidden_usernames() - .is_match(&body.username) - { - return Err!(Request(Forbidden("Username is forbidden"))); - } - - // don't force the username lowercase if it's from matrix-appservice-irc - let body_username = if is_matrix_appservice_irc { - body.username.clone() - } else { - body.username.to_lowercase() - }; - - // Validate user id - let user_id = - match UserId::parse_with_server_name(&body_username, services.globals.server_name()) { - | Ok(user_id) => { - if let Err(e) = user_id.validate_strict() { - // unless the username is from the broken matrix appservice IRC bridge, we - // should follow synapse's behaviour on not allowing things like spaces - // and UTF-8 characters in usernames - if !is_matrix_appservice_irc { - return Err!(Request(InvalidUsername(debug_warn!( - "Username {body_username} contains disallowed characters or spaces: \ - {e}" - )))); - } - } - - user_id - }, - | Err(e) => { - return Err!(Request(InvalidUsername(debug_warn!( - "Username {body_username} is not valid: {e}" - )))); - }, - }; - - // Check if username is creative enough - if services.users.exists(&user_id).await { - return Err!(Request(UserInUse("User ID is not available."))); - } - - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { - return Err!(Request(Exclusive("Username is not in an appservice namespace."))); - } - } - - if services.appservice.is_exclusive_user_id(&user_id).await { - return Err!(Request(Exclusive("Username is reserved by an appservice."))); - } - - Ok(get_username_availability::v3::Response { available: true }) -} - -/// # `POST /_matrix/client/v3/register` -/// -/// Register an account on this homeserver. -/// -/// You can use [`GET -/// /_matrix/client/v3/register/available`](fn.get_register_available_route. -/// html) to check if the user id is valid and available. -/// -/// - Only works if registration is enabled -/// - If type is guest: ignores all parameters except -/// initial_device_display_name -/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage) -/// - If type is not guest and no username is given: Always fails after UIAA -/// check -/// - Creates a new account and populates it with default account data -/// - If `inhibit_login` is false: Creates a device and returns device id and -/// access_token -#[allow(clippy::doc_markdown)] -#[tracing::instrument(skip_all, fields(%client), name = "register")] -pub(crate) async fn register_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let is_guest = body.kind == RegistrationKind::Guest; - let emergency_mode_enabled = services.config.emergency_password.is_some(); - - if !services.config.allow_registration && body.appservice_info.is_none() { - match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { - | (Some(username), Some(device_display_name)) => { - info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); - }, - | (Some(username), _) => { - info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled"); - }, - | (_, Some(device_display_name)) => { - info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); - }, - | (None, _) => { - info!(%is_guest, "Rejecting registration attempt as registration is disabled"); - }, - } - - return Err!(Request(Forbidden("Registration has been disabled."))); - } - - if is_guest - && (!services.config.allow_guest_registration - || (services.config.allow_registration - && services.globals.registration_token.is_some())) - { - info!( - "Guest registration disabled / registration enabled with token configured, \ - rejecting guest registration attempt, initial device name: \"{}\"", - body.initial_device_display_name.as_deref().unwrap_or("") - ); - return Err!(Request(GuestAccessForbidden("Guest registration is disabled."))); - } - - // forbid guests from registering if there is not a real admin user yet. give - // generic user error. - if is_guest && services.users.count().await < 2 { - warn!( - "Guest account attempted to register before a real admin user has been registered, \ - rejecting registration. Guest's initial device name: \"{}\"", - body.initial_device_display_name.as_deref().unwrap_or("") - ); - return Err!(Request(Forbidden("Registration is temporarily disabled."))); - } - - let user_id = match (body.username.as_ref(), is_guest) { - | (Some(username), false) => { - // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue - let is_matrix_appservice_irc = - body.appservice_info.as_ref().is_some_and(|appservice| { - appservice.registration.id == "irc" - || appservice.registration.id.contains("matrix-appservice-irc") - || appservice.registration.id.contains("matrix_appservice_irc") - }); - - if services.globals.forbidden_usernames().is_match(username) - && !emergency_mode_enabled - { - return Err!(Request(Forbidden("Username is forbidden"))); - } - - // don't force the username lowercase if it's from matrix-appservice-irc - let body_username = if is_matrix_appservice_irc { - username.clone() - } else { - username.to_lowercase() - }; - - let proposed_user_id = match UserId::parse_with_server_name( - &body_username, - services.globals.server_name(), - ) { - | Ok(user_id) => { - if let Err(e) = user_id.validate_strict() { - // unless the username is from the broken matrix appservice IRC bridge, or - // we are in emergency mode, we should follow synapse's behaviour on - // not allowing things like spaces and UTF-8 characters in usernames - if !is_matrix_appservice_irc && !emergency_mode_enabled { - return Err!(Request(InvalidUsername(debug_warn!( - "Username {body_username} contains disallowed characters or \ - spaces: {e}" - )))); - } - } - - user_id - }, - | Err(e) => { - return Err!(Request(InvalidUsername(debug_warn!( - "Username {body_username} is not valid: {e}" - )))); - }, - }; - - if services.users.exists(&proposed_user_id).await { - return Err!(Request(UserInUse("User ID is not available."))); - } - - proposed_user_id - }, - | _ => loop { - let proposed_user_id = UserId::parse_with_server_name( - utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - services.globals.server_name(), - ) - .unwrap(); - if !services.users.exists(&proposed_user_id).await { - break proposed_user_id; - } - }, - }; - - if body.body.login_type == Some(LoginType::ApplicationService) { - match body.appservice_info { - | Some(ref info) => - if !info.is_user_match(&user_id) && !emergency_mode_enabled { - return Err!(Request(Exclusive( - "Username is not in an appservice namespace." - ))); - }, - | _ => { - return Err!(Request(MissingToken("Missing appservice token."))); - }, - } - } else if services.appservice.is_exclusive_user_id(&user_id).await && !emergency_mode_enabled - { - return Err!(Request(Exclusive("Username is reserved by an appservice."))); - } - - // UIAA - let mut uiaainfo; - let skip_auth = if services.globals.registration_token.is_some() { - // Registration token required - uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::RegistrationToken], - }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - body.appservice_info.is_some() - } else { - // No registration token necessary, but clients must still go through the flow - uiaainfo = UiaaInfo { - flows: vec![AuthFlow { stages: vec![AuthType::Dummy] }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - body.appservice_info.is_some() || is_guest - }; - - if !skip_auth { - match &body.auth { - | Some(auth) => { - let (worked, uiaainfo) = services - .uiaa - .try_auth( - &UserId::parse_with_server_name("", services.globals.server_name()) - .unwrap(), - "".into(), - auth, - &uiaainfo, - ) - .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - }, - | _ => match body.json_body { - | Some(ref json) => { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services.uiaa.create( - &UserId::parse_with_server_name("", services.globals.server_name()) - .unwrap(), - "".into(), - &uiaainfo, - json, - ); - return Err(Error::Uiaa(uiaainfo)); - }, - | _ => { - return Err!(Request(NotJson("JSON body is not valid"))); - }, - }, - } - } - - let password = if is_guest { None } else { body.password.as_deref() }; - - // Create user - services.users.create(&user_id, password)?; - - // Default to pretty displayname - let mut displayname = user_id.localpart().to_owned(); - - // If `new_user_displayname_suffix` is set, registration will push whatever - // content is set to the user's display name with a space before it - if !services.globals.new_user_displayname_suffix().is_empty() - && body.appservice_info.is_none() - { - write!(displayname, " {}", services.server.config.new_user_displayname_suffix) - .expect("should be able to write to string buffer"); - } - - services - .users - .set_displayname(&user_id, Some(displayname.clone())); - - // Initial account data - services - .account_data - .update( - None, - &user_id, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: push::Ruleset::server_default(&user_id), - }, - }) - .expect("to json always works"), - ) - .await?; - - if (!is_guest && body.inhibit_login) - || body - .appservice_info - .as_ref() - .is_some_and(|appservice| appservice.registration.device_management) - { - return Ok(register::v3::Response { - access_token: None, - user_id, - device_id: None, - refresh_token: None, - expires_in: None, - }); - } - - // Generate new device id if the user didn't specify one - let device_id = if is_guest { None } else { body.device_id.clone() } - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); - - // Generate new token for the device - let token = utils::random_string(TOKEN_LENGTH); - - // Create device for this account - services - .users - .create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - Some(client.to_string()), - ) - .await?; - - debug_info!(%user_id, %device_id, "User account was created"); - - let device_display_name = body.initial_device_display_name.as_deref().unwrap_or(""); - - // log in conduit admin channel if a non-guest user registered - if body.appservice_info.is_none() && !is_guest { - if !device_display_name.is_empty() { - info!( - "New user \"{user_id}\" registered on this server with device display name: \ - \"{device_display_name}\"" - ); - - if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "New user \"{user_id}\" registered on this server from IP {client} and \ - device display name \"{device_display_name}\"" - ))) - .await - .ok(); - } - } else { - info!("New user \"{user_id}\" registered on this server."); - - if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "New user \"{user_id}\" registered on this server from IP {client}" - ))) - .await - .ok(); - } - } - } - - // log in conduit admin channel if a guest registered - if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations { - debug_info!("New guest user \"{user_id}\" registered on this server."); - - if !device_display_name.is_empty() { - if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "Guest user \"{user_id}\" with device display name \ - \"{device_display_name}\" registered on this server from IP {client}" - ))) - .await - .ok(); - } - } else { - #[allow(clippy::collapsible_else_if)] - if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "Guest user \"{user_id}\" with no device display name registered on \ - this server from IP {client}", - ))) - .await - .ok(); - } - } - } - - // If this is the first real user, grant them admin privileges except for guest - // users - // Note: the server user is generated first - if !is_guest { - if let Ok(admin_room) = services.admin.get_admin_room().await { - if services - .rooms - .state_cache - .room_joined_count(&admin_room) - .await - .is_ok_and(is_equal_to!(1)) - { - services.admin.make_user_admin(&user_id).await?; - warn!("Granting {user_id} admin privileges as the first user"); - } - } - } - - if body.appservice_info.is_none() - && !services.server.config.auto_join_rooms.is_empty() - && (services.config.allow_guests_auto_join_rooms || !is_guest) - { - for room in &services.server.config.auto_join_rooms { - let Ok(room_id) = services.rooms.alias.resolve(room).await else { - error!( - "Failed to resolve room alias to room ID when attempting to auto join \ - {room}, skipping" - ); - continue; - }; - - if !services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), &room_id) - .await - { - warn!( - "Skipping room {room} to automatically join as we have never joined before." - ); - continue; - } - - if let Some(room_server_name) = room.server_name() { - match join_room_by_id_helper( - &services, - &user_id, - &room_id, - Some("Automatically joining this room upon registration".to_owned()), - &[services.globals.server_name().to_owned(), room_server_name.to_owned()], - None, - &body.appservice_info, - ) - .boxed() - .await - { - | Err(e) => { - // don't return this error so we don't fail registrations - error!( - "Failed to automatically join room {room} for user {user_id}: {e}" - ); - }, - | _ => { - info!("Automatically joined room {room} for user {user_id}"); - }, - } - } - } - } - - Ok(register::v3::Response { - access_token: Some(token), - user_id, - device_id: Some(device_id), - refresh_token: None, - expires_in: None, - }) -} - -/// # `POST /_matrix/client/r0/account/password` -/// -/// Changes the password of this account. -/// -/// - Requires UIAA to verify user password -/// - Changes the password of the sender user -/// - The password hash is calculated using argon2 with 32 character salt, the -/// plain password is -/// not saved -/// -/// If logout_devices is true it does the following for each device except the -/// sender device: -/// - Invalidates access token -/// - Deletes device metadata (device id, device display name, last seen ip, -/// last seen ts) -/// - Forgets to-device events -/// - Triggers device list updates -#[tracing::instrument(skip_all, fields(%client), name = "change_password")] -pub(crate) async fn change_password_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - // Authentication for this endpoint was made optional, but we need - // authentication currently - let sender_user = body - .sender_user - .as_ref() - .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; - let sender_device = body.sender_device(); - - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { stages: vec![AuthType::Password] }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - - match &body.auth { - | Some(auth) => { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; - - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - - // Success! - }, - | _ => match body.json_body { - | Some(ref json) => { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); - - return Err(Error::Uiaa(uiaainfo)); - }, - | _ => { - return Err!(Request(NotJson("JSON body is not valid"))); - }, - }, - } - - services - .users - .set_password(sender_user, Some(&body.new_password))?; - - if body.logout_devices { - // Logout all devices except the current one - services - .users - .all_device_ids(sender_user) - .ready_filter(|id| *id != sender_device) - .for_each(|id| services.users.remove_device(sender_user, id)) - .await; - - // Remove all pushers except the ones associated with this session - services - .pusher - .get_pushkeys(sender_user) - .map(ToOwned::to_owned) - .broad_filter_map(|pushkey| async move { - services - .pusher - .get_pusher_device(&pushkey) - .await - .ok() - .filter(|pusher_device| pusher_device != sender_device) - .is_some() - .then_some(pushkey) - }) - .for_each(|pushkey| async move { - services.pusher.delete_pusher(sender_user, &pushkey).await; - }) - .await; - } - - info!("User {sender_user} changed their password."); - - if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} changed their password." - ))) - .await - .ok(); - } - - Ok(change_password::v3::Response {}) -} - -/// # `GET _matrix/client/r0/account/whoami` -/// -/// Get `user_id` of the sender user. -/// -/// Note: Also works for Application Services -pub(crate) async fn whoami_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let device_id = body.sender_device.clone(); - - Ok(whoami::v3::Response { - user_id: sender_user.clone(), - device_id, - is_guest: services.users.is_deactivated(sender_user).await? - && body.appservice_info.is_none(), - }) -} - -/// # `POST /_matrix/client/r0/account/deactivate` -/// -/// Deactivate sender user account. -/// -/// - Leaves all rooms and rejects all invitations -/// - Invalidates all access tokens -/// - Deletes all device metadata (device id, device display name, last seen ip, -/// last seen ts) -/// - Forgets all to-device events -/// - Triggers device list updates -/// - Removes ability to log in again -#[tracing::instrument(skip_all, fields(%client), name = "deactivate")] -pub(crate) async fn deactivate_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - // Authentication for this endpoint was made optional, but we need - // authentication currently - let sender_user = body - .sender_user - .as_ref() - .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; - let sender_device = body.sender_device(); - - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { stages: vec![AuthType::Password] }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - - match &body.auth { - | Some(auth) => { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; - - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - }, - | _ => match body.json_body { - | Some(ref json) => { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); - - return Err(Error::Uiaa(uiaainfo)); - }, - | _ => { - return Err!(Request(NotJson("JSON body is not valid"))); - }, - }, - } - - // Remove profile pictures and display name - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(sender_user) - .map(Into::into) - .collect() - .await; - - super::update_displayname(&services, sender_user, None, &all_joined_rooms).await; - super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await; - - full_user_deactivate(&services, sender_user, &all_joined_rooms).await?; - - info!("User {sender_user} deactivated their account."); - - if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} deactivated their account." - ))) - .await - .ok(); - } - - Ok(deactivate::v3::Response { - id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, - }) -} - -/// # `GET _matrix/client/v3/account/3pid` -/// -/// Get a list of third party identifiers associated with this account. -/// -/// - Currently always returns empty list -pub(crate) async fn third_party_route( - body: Ruma, -) -> Result { - let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - Ok(get_3pids::v3::Response::new(Vec::new())) -} - -/// # `POST /_matrix/client/v3/account/3pid/email/requestToken` -/// -/// "This API should be used to request validation tokens when adding an email -/// address to an account" -/// -/// - 403 signals that The homeserver does not allow the third party identifier -/// as a contact option. -pub(crate) async fn request_3pid_management_token_via_email_route( - _body: Ruma, -) -> Result { - Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) -} - -/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` -/// -/// "This API should be used to request validation tokens when adding an phone -/// number to an account" -/// -/// - 403 signals that The homeserver does not allow the third party identifier -/// as a contact option. -pub(crate) async fn request_3pid_management_token_via_msisdn_route( - _body: Ruma, -) -> Result { - Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) -} - -/// # `GET /_matrix/client/v1/register/m.login.registration_token/validity` -/// -/// Checks if the provided registration token is valid at the time of checking -/// -/// Currently does not have any ratelimiting, and this isn't very practical as -/// there is only one registration token allowed. -pub(crate) async fn check_registration_token_validity( - State(services): State, - body: Ruma, -) -> Result { - let Some(reg_token) = services.globals.registration_token.clone() else { - return Err!(Request(Forbidden("Server does not allow token registration"))); - }; - - Ok(check_registration_token_validity::v1::Response { valid: reg_token == body.token }) -} - -/// Runs through all the deactivation steps: -/// -/// - Mark as deactivated -/// - Removing display name -/// - Removing avatar URL and blurhash -/// - Removing all profile data -/// - Leaving all rooms (and forgets all of them) -pub async fn full_user_deactivate( - services: &Services, - user_id: &UserId, - all_joined_rooms: &[OwnedRoomId], -) -> Result<()> { - services.users.deactivate_account(user_id).await.ok(); - super::update_displayname(services, user_id, None, all_joined_rooms).await; - super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await; - - services - .users - .all_profile_keys(user_id) - .ready_for_each(|(profile_key, _)| { - services.users.set_profile_key(user_id, &profile_key, None); - }) - .await; - - for room_id in all_joined_rooms { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let room_power_levels = services - .rooms - .state_accessor - .room_state_get_content::( - room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await - .ok(); - - let user_can_demote_self = - room_power_levels - .as_ref() - .is_some_and(|power_levels_content| { - RoomPowerLevels::from(power_levels_content.clone()) - .user_can_change_user_power_level(user_id, user_id) - }) || services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - .is_ok_and(|event| event.sender == user_id); - - if user_can_demote_self { - let mut power_levels_content = room_power_levels.unwrap_or_default(); - power_levels_content.users.remove(user_id); - - // ignore errors so deactivation doesn't fail - match services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &power_levels_content), - user_id, - room_id, - &state_lock, - ) - .await - { - | Err(e) => { - warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); - }, - | _ => { - info!("Demoted {user_id} in {room_id} as part of account deactivation"); - }, - } - } - } - - super::leave_all_rooms(services, user_id).await; - - Ok(()) -} diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs deleted file mode 100644 index e44ce4e7..00000000 --- a/src/api/client/account_data.rs +++ /dev/null @@ -1,160 +0,0 @@ -use axum::extract::State; -use conduwuit::{Err, Result, err}; -use conduwuit_service::Services; -use ruma::{ - RoomId, UserId, - api::client::config::{ - get_global_account_data, get_room_account_data, set_global_account_data, - set_room_account_data, - }, - events::{ - AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent, - GlobalAccountDataEventType, RoomAccountDataEventType, - }, - serde::Raw, -}; -use serde::Deserialize; -use serde_json::{json, value::RawValue as RawJsonValue}; - -use crate::Ruma; - -/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` -/// -/// Sets some account data for the sender user. -pub(crate) async fn set_global_account_data_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot set account data for other users."))); - } - - set_account_data( - &services, - None, - &body.user_id, - &body.event_type.to_string(), - body.data.json(), - ) - .await?; - - Ok(set_global_account_data::v3::Response {}) -} - -/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` -/// -/// Sets some room account data for the sender user. -pub(crate) async fn set_room_account_data_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot set account data for other users."))); - } - - set_account_data( - &services, - Some(&body.room_id), - &body.user_id, - &body.event_type.to_string(), - body.data.json(), - ) - .await?; - - Ok(set_room_account_data::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` -/// -/// Gets some account data for the sender user. -pub(crate) async fn get_global_account_data_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot get account data of other users."))); - } - - let account_data: ExtractGlobalEventContent = services - .account_data - .get_global(&body.user_id, body.event_type.clone()) - .await - .map_err(|_| err!(Request(NotFound("Data not found."))))?; - - Ok(get_global_account_data::v3::Response { account_data: account_data.content }) -} - -/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` -/// -/// Gets some room account data for the sender user. -pub(crate) async fn get_room_account_data_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot get account data of other users."))); - } - - let account_data: ExtractRoomEventContent = services - .account_data - .get_room(&body.room_id, &body.user_id, body.event_type.clone()) - .await - .map_err(|_| err!(Request(NotFound("Data not found."))))?; - - Ok(get_room_account_data::v3::Response { account_data: account_data.content }) -} - -async fn set_account_data( - services: &Services, - room_id: Option<&RoomId>, - sender_user: &UserId, - event_type_s: &str, - data: &RawJsonValue, -) -> Result { - if event_type_s == RoomAccountDataEventType::FullyRead.to_cow_str() { - return Err!(Request(BadJson( - "This endpoint cannot be used for marking a room as fully read (setting \ - m.fully_read)" - ))); - } - - if event_type_s == GlobalAccountDataEventType::PushRules.to_cow_str() { - return Err!(Request(BadJson( - "This endpoint cannot be used for setting/configuring push rules." - ))); - } - - let data: serde_json::Value = serde_json::from_str(data.get()) - .map_err(|e| err!(Request(BadJson(warn!("Invalid JSON provided: {e}")))))?; - - services - .account_data - .update( - room_id, - sender_user, - event_type_s.into(), - &json!({ - "type": event_type_s, - "content": data, - }), - ) - .await -} - -#[derive(Deserialize)] -struct ExtractRoomEventContent { - content: Raw, -} - -#[derive(Deserialize)] -struct ExtractGlobalEventContent { - content: Raw, -} diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs deleted file mode 100644 index 9f1b05f8..00000000 --- a/src/api/client/alias.rs +++ /dev/null @@ -1,154 +0,0 @@ -use axum::extract::State; -use conduwuit::{Err, Result, debug}; -use conduwuit_service::Services; -use futures::StreamExt; -use rand::seq::SliceRandom; -use ruma::{ - OwnedServerName, RoomAliasId, RoomId, - api::client::alias::{create_alias, delete_alias, get_alias}, -}; - -use crate::Ruma; - -/// # `PUT /_matrix/client/v3/directory/room/{roomAlias}` -/// -/// Creates a new room alias on this server. -pub(crate) async fn create_alias_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - services - .rooms - .alias - .appservice_checks(&body.room_alias, &body.appservice_info) - .await?; - - // this isn't apart of alias_checks or delete alias route because we should - // allow removing forbidden room aliases - if services - .globals - .forbidden_alias_names() - .is_match(body.room_alias.alias()) - { - return Err!(Request(Forbidden("Room alias is forbidden."))); - } - - if services - .rooms - .alias - .resolve_local_alias(&body.room_alias) - .await - .is_ok() - { - return Err!(Conflict("Alias already exists.")); - } - - services - .rooms - .alias - .set_alias(&body.room_alias, &body.room_id, sender_user)?; - - Ok(create_alias::v3::Response::new()) -} - -/// # `DELETE /_matrix/client/v3/directory/room/{roomAlias}` -/// -/// Deletes a room alias from this server. -/// -/// - TODO: Update canonical alias event -pub(crate) async fn delete_alias_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - services - .rooms - .alias - .appservice_checks(&body.room_alias, &body.appservice_info) - .await?; - - services - .rooms - .alias - .remove_alias(&body.room_alias, sender_user) - .await?; - - // TODO: update alt_aliases? - - Ok(delete_alias::v3::Response::new()) -} - -/// # `GET /_matrix/client/v3/directory/room/{roomAlias}` -/// -/// Resolve an alias locally or over federation. -pub(crate) async fn get_alias_route( - State(services): State, - body: Ruma, -) -> Result { - let room_alias = body.body.room_alias; - - let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await - else { - return Err!(Request(NotFound("Room with alias not found."))); - }; - - let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; - debug!(?room_alias, ?room_id, "available servers: {servers:?}"); - - Ok(get_alias::v3::Response::new(room_id, servers)) -} - -async fn room_available_servers( - services: &Services, - room_id: &RoomId, - room_alias: &RoomAliasId, - pre_servers: Vec, -) -> Vec { - // find active servers in room state cache to suggest - let mut servers: Vec = services - .rooms - .state_cache - .room_servers(room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - // push any servers we want in the list already (e.g. responded remote alias - // servers, room alias server itself) - servers.extend(pre_servers); - - servers.sort_unstable(); - servers.dedup(); - - // shuffle list of servers randomly after sort and dedupe - servers.shuffle(&mut rand::thread_rng()); - - // insert our server as the very first choice if in list, else check if we can - // prefer the room alias server first - match servers - .iter() - .position(|server_name| services.globals.server_is_ours(server_name)) - { - | Some(server_index) => { - servers.swap_remove(server_index); - servers.insert(0, services.globals.server_name().to_owned()); - }, - | _ => { - match servers - .iter() - .position(|server| server == room_alias.server_name()) - { - | Some(alias_server_index) => { - servers.swap_remove(alias_server_index); - servers.insert(0, room_alias.server_name().into()); - }, - | _ => {}, - } - }, - } - - servers -} diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs deleted file mode 100644 index eb6b3312..00000000 --- a/src/api/client/appservice.rs +++ /dev/null @@ -1,51 +0,0 @@ -use axum::extract::State; -use conduwuit::{Err, Result, err}; -use ruma::api::{appservice::ping, client::appservice::request_ping}; - -use crate::Ruma; - -/// # `POST /_matrix/client/v1/appservice/{appserviceId}/ping` -/// -/// Ask the homeserver to ping the application service to ensure the connection -/// works. -pub(crate) async fn appservice_ping( - State(services): State, - body: Ruma, -) -> Result { - let appservice_info = body.appservice_info.as_ref().ok_or_else(|| { - err!(Request(Forbidden("This endpoint can only be called by appservices."))) - })?; - - if body.appservice_id != appservice_info.registration.id { - return Err!(Request(Forbidden( - "Appservices can only ping themselves (wrong appservice ID)." - ))); - } - - if appservice_info.registration.url.is_none() - || appservice_info - .registration - .url - .as_ref() - .is_some_and(|url| url.is_empty() || url == "null") - { - return Err!(Request(UrlNotSet( - "Appservice does not have a URL set, there is nothing to ping." - ))); - } - - let timer = tokio::time::Instant::now(); - - let _response = services - .sending - .send_appservice_request( - appservice_info.registration.clone(), - ping::send_ping::v1::Request { - transaction_id: body.transaction_id.clone(), - }, - ) - .await? - .expect("We already validated if an appservice URL exists above"); - - Ok(request_ping::v1::Response { duration: timer.elapsed() }) -} diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs deleted file mode 100644 index 2ad37cf3..00000000 --- a/src/api/client/backup.rs +++ /dev/null @@ -1,443 +0,0 @@ -use std::cmp::Ordering; - -use axum::extract::State; -use conduwuit::{Err, Result, err}; -use ruma::{ - UInt, - api::client::backup::{ - add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, - create_backup_version, delete_backup_keys, delete_backup_keys_for_room, - delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys, - get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, - update_backup_version, - }, -}; - -use crate::Ruma; - -/// # `POST /_matrix/client/r0/room_keys/version` -/// -/// Creates a new backup. -pub(crate) async fn create_backup_version_route( - State(services): State, - body: Ruma, -) -> Result { - let version = services - .key_backups - .create_backup(body.sender_user(), &body.algorithm)?; - - Ok(create_backup_version::v3::Response { version }) -} - -/// # `PUT /_matrix/client/r0/room_keys/version/{version}` -/// -/// Update information about an existing backup. Only `auth_data` can be -/// modified. -pub(crate) async fn update_backup_version_route( - State(services): State, - body: Ruma, -) -> Result { - services - .key_backups - .update_backup(body.sender_user(), &body.version, &body.algorithm) - .await?; - - Ok(update_backup_version::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/room_keys/version` -/// -/// Get information about the latest backup version. -pub(crate) async fn get_latest_backup_info_route( - State(services): State, - body: Ruma, -) -> Result { - let (version, algorithm) = services - .key_backups - .get_latest_backup(body.sender_user()) - .await - .map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?; - - Ok(get_latest_backup_info::v3::Response { - algorithm, - count: (UInt::try_from( - services - .key_backups - .count_keys(body.sender_user(), &version) - .await, - ) - .expect("user backup keys count should not be that high")), - etag: services - .key_backups - .get_etag(body.sender_user(), &version) - .await, - version, - }) -} - -/// # `GET /_matrix/client/v3/room_keys/version/{version}` -/// -/// Get information about an existing backup. -pub(crate) async fn get_backup_info_route( - State(services): State, - body: Ruma, -) -> Result { - let algorithm = services - .key_backups - .get_backup(body.sender_user(), &body.version) - .await - .map_err(|_| { - err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))) - })?; - - Ok(get_backup_info::v3::Response { - algorithm, - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - version: body.version.clone(), - }) -} - -/// # `DELETE /_matrix/client/r0/room_keys/version/{version}` -/// -/// Delete an existing key backup. -/// -/// - Deletes both information about the backup, as well as all key data related -/// to the backup -pub(crate) async fn delete_backup_version_route( - State(services): State, - body: Ruma, -) -> Result { - services - .key_backups - .delete_backup(body.sender_user(), &body.version) - .await; - - Ok(delete_backup_version::v3::Response {}) -} - -/// # `PUT /_matrix/client/r0/room_keys/keys` -/// -/// Add the received backup keys to the database. -/// -/// - Only manipulating the most recently created version of the backup is -/// allowed -/// - Adds the keys to the backup -/// - Returns the new number of keys in this backup and the etag -pub(crate) async fn add_backup_keys_route( - State(services): State, - body: Ruma, -) -> Result { - if services - .key_backups - .get_latest_backup_version(body.sender_user()) - .await - .is_ok_and(|version| version != body.version) - { - return Err!(Request(InvalidParam( - "You may only manipulate the most recently created version of the backup." - ))); - } - - for (room_id, room) in &body.rooms { - for (session_id, key_data) in &room.sessions { - services - .key_backups - .add_key(body.sender_user(), &body.version, room_id, session_id, key_data) - .await?; - } - } - - Ok(add_backup_keys::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) -} - -/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` -/// -/// Add the received backup keys to the database. -/// -/// - Only manipulating the most recently created version of the backup is -/// allowed -/// - Adds the keys to the backup -/// - Returns the new number of keys in this backup and the etag -pub(crate) async fn add_backup_keys_for_room_route( - State(services): State, - body: Ruma, -) -> Result { - if services - .key_backups - .get_latest_backup_version(body.sender_user()) - .await - .is_ok_and(|version| version != body.version) - { - return Err!(Request(InvalidParam( - "You may only manipulate the most recently created version of the backup." - ))); - } - - for (session_id, key_data) in &body.sessions { - services - .key_backups - .add_key(body.sender_user(), &body.version, &body.room_id, session_id, key_data) - .await?; - } - - Ok(add_backup_keys_for_room::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) -} - -/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` -/// -/// Add the received backup key to the database. -/// -/// - Only manipulating the most recently created version of the backup is -/// allowed -/// - Adds the keys to the backup -/// - Returns the new number of keys in this backup and the etag -pub(crate) async fn add_backup_keys_for_session_route( - State(services): State, - body: Ruma, -) -> Result { - if services - .key_backups - .get_latest_backup_version(body.sender_user()) - .await - .is_ok_and(|version| version != body.version) - { - return Err!(Request(InvalidParam( - "You may only manipulate the most recently created version of the backup." - ))); - } - - // Check if we already have a better key - let mut ok_to_replace = true; - if let Some(old_key) = &services - .key_backups - .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) - .await - .ok() - { - let old_is_verified = old_key - .get_field::("is_verified")? - .unwrap_or_default(); - - let new_is_verified = body - .session_data - .get_field::("is_verified")? - .ok_or_else(|| err!(Request(BadJson("`is_verified` field should exist"))))?; - - // Prefer key that `is_verified` - if old_is_verified != new_is_verified { - if old_is_verified { - ok_to_replace = false; - } - } else { - // If both have same `is_verified`, prefer the one with lower - // `first_message_index` - let old_first_message_index = old_key - .get_field::("first_message_index")? - .unwrap_or(UInt::MAX); - - let new_first_message_index = body - .session_data - .get_field::("first_message_index")? - .ok_or_else(|| { - err!(Request(BadJson("`first_message_index` field should exist"))) - })?; - - ok_to_replace = match new_first_message_index.cmp(&old_first_message_index) { - | Ordering::Less => true, - | Ordering::Greater => false, - | Ordering::Equal => { - // If both have same `first_message_index`, prefer the one with lower - // `forwarded_count` - let old_forwarded_count = old_key - .get_field::("forwarded_count")? - .unwrap_or(UInt::MAX); - - let new_forwarded_count = body - .session_data - .get_field::("forwarded_count")? - .ok_or_else(|| { - err!(Request(BadJson("`forwarded_count` field should exist"))) - })?; - - new_forwarded_count < old_forwarded_count - }, - }; - } - } - - if ok_to_replace { - services - .key_backups - .add_key( - body.sender_user(), - &body.version, - &body.room_id, - &body.session_id, - &body.session_data, - ) - .await?; - } - - Ok(add_backup_keys_for_session::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) -} - -/// # `GET /_matrix/client/r0/room_keys/keys` -/// -/// Retrieves all keys from the backup. -pub(crate) async fn get_backup_keys_route( - State(services): State, - body: Ruma, -) -> Result { - let rooms = services - .key_backups - .get_all(body.sender_user(), &body.version) - .await; - - Ok(get_backup_keys::v3::Response { rooms }) -} - -/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` -/// -/// Retrieves all keys from the backup for a given room. -pub(crate) async fn get_backup_keys_for_room_route( - State(services): State, - body: Ruma, -) -> Result { - let sessions = services - .key_backups - .get_room(body.sender_user(), &body.version, &body.room_id) - .await; - - Ok(get_backup_keys_for_room::v3::Response { sessions }) -} - -/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` -/// -/// Retrieves a key from the backup. -pub(crate) async fn get_backup_keys_for_session_route( - State(services): State, - body: Ruma, -) -> Result { - let key_data = services - .key_backups - .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) - .await - .map_err(|_| { - err!(Request(NotFound(debug_error!("Backup key not found for this user's session.")))) - })?; - - Ok(get_backup_keys_for_session::v3::Response { key_data }) -} - -/// # `DELETE /_matrix/client/r0/room_keys/keys` -/// -/// Delete the keys from the backup. -pub(crate) async fn delete_backup_keys_route( - State(services): State, - body: Ruma, -) -> Result { - services - .key_backups - .delete_all_keys(body.sender_user(), &body.version) - .await; - - Ok(delete_backup_keys::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) -} - -/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` -/// -/// Delete the keys from the backup for a given room. -pub(crate) async fn delete_backup_keys_for_room_route( - State(services): State, - body: Ruma, -) -> Result { - services - .key_backups - .delete_room_keys(body.sender_user(), &body.version, &body.room_id) - .await; - - Ok(delete_backup_keys_for_room::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) -} - -/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` -/// -/// Delete a key from the backup. -pub(crate) async fn delete_backup_keys_for_session_route( - State(services): State, - body: Ruma, -) -> Result { - services - .key_backups - .delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id) - .await; - - Ok(delete_backup_keys_for_session::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) -} diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs deleted file mode 100644 index 470ff6ab..00000000 --- a/src/api/client/capabilities.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{Result, Server}; -use ruma::{ - RoomVersionId, - api::client::discovery::get_capabilities::{ - self, Capabilities, GetLoginTokenCapability, RoomVersionStability, - RoomVersionsCapability, ThirdPartyIdChangesCapability, - }, -}; -use serde_json::json; - -use crate::Ruma; - -/// # `GET /_matrix/client/v3/capabilities` -/// -/// Get information on the supported feature set and other relevent capabilities -/// of this server. -pub(crate) async fn get_capabilities_route( - State(services): State, - _body: Ruma, -) -> Result { - let available: BTreeMap = - Server::available_room_versions().collect(); - - let mut capabilities = Capabilities::default(); - capabilities.room_versions = RoomVersionsCapability { - default: services.server.config.default_room_version.clone(), - available, - }; - - // we do not implement 3PID stuff - capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { enabled: false }; - - capabilities.get_login_token = GetLoginTokenCapability { - enabled: services.server.config.login_via_existing_session, - }; - - // MSC4133 capability - capabilities - .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) - .expect("this is valid JSON we created"); - - capabilities - .set( - "org.matrix.msc4267.forget_forced_upon_leave", - json!({"enabled": services.config.forget_forced_upon_leave}), - ) - .expect("valid JSON we created"); - - Ok(get_capabilities::v3::Response { capabilities }) -} diff --git a/src/api/client/context.rs b/src/api/client/context.rs deleted file mode 100644 index dbc2a22f..00000000 --- a/src/api/client/context.rs +++ /dev/null @@ -1,217 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Err, Result, at, debug_warn, err, - matrix::pdu::PduEvent, - ref_at, - utils::{ - IterStream, - future::TryExtExt, - stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, - }, -}; -use conduwuit_service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; -use futures::{ - FutureExt, StreamExt, TryFutureExt, TryStreamExt, - future::{OptionFuture, join, join3, try_join3}, -}; -use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; - -use crate::{ - Ruma, - client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, -}; - -const LIMIT_MAX: usize = 100; -const LIMIT_DEFAULT: usize = 10; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}` -/// -/// Allows loading room history around an event. -/// -/// - Only works if the user is joined (TODO: always allow, but only show events -/// if the user was joined, depending on history_visibility) -pub(crate) async fn get_context_route( - State(services): State, - body: Ruma, -) -> Result { - let sender = body.sender(); - let (sender_user, sender_device) = sender; - let room_id = &body.room_id; - let event_id = &body.event_id; - let filter = &body.filter; - - if !services.rooms.metadata.exists(room_id).await { - return Err!(Request(Forbidden("Room does not exist to this server"))); - } - - // Use limit or else 10, with maximum 100 - let limit: usize = body - .limit - .try_into() - .unwrap_or(LIMIT_DEFAULT) - .min(LIMIT_MAX); - - let base_id = services - .rooms - .timeline - .get_pdu_id(event_id) - .map_err(|_| err!(Request(NotFound("Event not found.")))); - - let base_pdu = services - .rooms - .timeline - .get_pdu(event_id) - .map_err(|_| err!(Request(NotFound("Base event not found.")))); - - let visible = services - .rooms - .state_accessor - .user_can_see_event(sender_user, room_id, event_id) - .map(Ok); - - let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; - - if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id { - return Err!(Request(NotFound("Base event not found."))); - } - - if !visible { - debug_warn!(req_evt = ?event_id, ?base_id, ?room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404"); - return Err!(Request(NotFound("Event not found."))); - } - - let base_count = base_id.pdu_count(); - - let base_event = ignored_filter(&services, (base_count, base_pdu), sender_user); - - let events_before = services - .rooms - .timeline - .pdus_rev(Some(sender_user), room_id, Some(base_count)) - .ignore_err() - .ready_filter_map(|item| event_filter(item, filter)) - .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) - .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) - .take(limit / 2) - .collect(); - - let events_after = services - .rooms - .timeline - .pdus(Some(sender_user), room_id, Some(base_count)) - .ignore_err() - .ready_filter_map(|item| event_filter(item, filter)) - .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) - .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) - .take(limit / 2) - .collect(); - - let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = - join3(base_event, events_before, events_after).boxed().await; - - let lazy_loading_context = lazy_loading::Context { - user_id: sender_user, - device_id: sender_device, - room_id, - token: Some(base_count.into_unsigned()), - options: Some(&filter.lazy_load_options), - }; - - let lazy_loading_witnessed: OptionFuture<_> = filter - .lazy_load_options - .is_enabled() - .then_some( - base_event - .iter() - .chain(events_before.iter()) - .chain(events_after.iter()), - ) - .map(|witnessed| lazy_loading_witness(&services, &lazy_loading_context, witnessed)) - .into(); - - let state_at = events_after - .last() - .map(ref_at!(1)) - .map_or(body.event_id.as_ref(), |pdu| pdu.event_id.as_ref()); - - let state_ids = services - .rooms - .state_accessor - .pdu_shortstatehash(state_at) - .or_else(|_| services.rooms.state.get_room_shortstatehash(room_id)) - .map_ok(|shortstatehash| { - services - .rooms - .state_accessor - .state_full_ids(shortstatehash) - .map(Ok) - }) - .map_err(|e| err!(Database("State not found: {e}"))) - .try_flatten_stream() - .try_collect() - .boxed(); - - let (lazy_loading_witnessed, state_ids) = join(lazy_loading_witnessed, state_ids).await; - - let state_ids: Vec<(ShortStateKey, OwnedEventId)> = state_ids?; - let shortstatekeys = state_ids.iter().map(at!(0)).stream(); - let shorteventids = state_ids.iter().map(ref_at!(1)).stream(); - let lazy_loading_witnessed = lazy_loading_witnessed.unwrap_or_default(); - let state: Vec<_> = services - .rooms - .short - .multi_get_statekey_from_short(shortstatekeys) - .zip(shorteventids) - .ready_filter_map(|item| Some((item.0.ok()?, item.1))) - .ready_filter_map(|((event_type, state_key), event_id)| { - if filter.lazy_load_options.is_enabled() - && event_type == StateEventType::RoomMember - && state_key - .as_str() - .try_into() - .is_ok_and(|user_id: &UserId| !lazy_loading_witnessed.contains(user_id)) - { - return None; - } - - Some(event_id) - }) - .broad_filter_map(|event_id: &OwnedEventId| { - services.rooms.timeline.get_pdu(event_id.as_ref()).ok() - }) - .map(PduEvent::into_state_event) - .collect() - .await; - - Ok(get_context::v3::Response { - event: base_event.map(at!(1)).map(PduEvent::into_room_event), - - start: events_before - .last() - .map(at!(0)) - .or(Some(base_count)) - .as_ref() - .map(ToString::to_string), - - end: events_after - .last() - .map(at!(0)) - .or(Some(base_count)) - .as_ref() - .map(ToString::to_string), - - events_before: events_before - .into_iter() - .map(at!(1)) - .map(PduEvent::into_room_event) - .collect(), - - events_after: events_after - .into_iter() - .map(at!(1)) - .map(PduEvent::into_room_event) - .collect(), - - state, - }) -} diff --git a/src/api/client/device.rs b/src/api/client/device.rs deleted file mode 100644 index 5519a1a5..00000000 --- a/src/api/client/device.rs +++ /dev/null @@ -1,263 +0,0 @@ -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Error, Result, debug, err, utils}; -use futures::StreamExt; -use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedDeviceId, - api::client::{ - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - error::ErrorKind, - uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, -}; - -use super::SESSION_ID_LENGTH; -use crate::{Ruma, client::DEVICE_ID_LENGTH}; - -/// # `GET /_matrix/client/r0/devices` -/// -/// Get metadata on all devices of the sender user. -pub(crate) async fn get_devices_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let devices: Vec = services - .users - .all_devices_metadata(sender_user) - .collect() - .await; - - Ok(get_devices::v3::Response { devices }) -} - -/// # `GET /_matrix/client/r0/devices/{deviceId}` -/// -/// Get metadata on a single device of the sender user. -pub(crate) async fn get_device_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let device = services - .users - .get_device_metadata(sender_user, &body.body.device_id) - .await - .map_err(|_| err!(Request(NotFound("Device not found."))))?; - - Ok(get_device::v3::Response { device }) -} - -/// # `PUT /_matrix/client/r0/devices/{deviceId}` -/// -/// Updates the metadata on a given device of the sender user. -#[tracing::instrument(skip_all, fields(%client), name = "update_device")] -pub(crate) async fn update_device_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let appservice = body.appservice_info.as_ref(); - - match services - .users - .get_device_metadata(sender_user, &body.device_id) - .await - { - | Ok(mut device) => { - device.display_name.clone_from(&body.display_name); - device.last_seen_ip.clone_from(&Some(client.to_string())); - device - .last_seen_ts - .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); - - services - .users - .update_device_metadata(sender_user, &body.device_id, &device) - .await?; - - Ok(update_device::v3::Response {}) - }, - | Err(_) => { - let Some(appservice) = appservice else { - return Err!(Request(NotFound("Device not found."))); - }; - if !appservice.registration.device_management { - return Err!(Request(NotFound("Device not found."))); - } - - debug!( - "Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \ - and device ID does not exist", - appservice.registration.id - ); - - let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH)); - - services - .users - .create_device( - sender_user, - &device_id, - &appservice.registration.as_token, - None, - Some(client.to_string()), - ) - .await?; - - return Ok(update_device::v3::Response {}); - }, - } -} - -/// # `DELETE /_matrix/client/r0/devices/{deviceId}` -/// -/// Deletes the given device. -/// -/// - Requires UIAA to verify user password -/// - Invalidates access token -/// - Deletes device metadata (device id, device display name, last seen ip, -/// last seen ts) -/// - Forgets to-device events -/// - Triggers device list updates -pub(crate) async fn delete_device_route( - State(services): State, - body: Ruma, -) -> Result { - let (sender_user, sender_device) = body.sender(); - let appservice = body.appservice_info.as_ref(); - - if appservice.is_some_and(|appservice| appservice.registration.device_management) { - debug!( - "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ - enabled" - ); - services - .users - .remove_device(sender_user, &body.device_id) - .await; - - return Ok(delete_device::v3::Response {}); - } - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { stages: vec![AuthType::Password] }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - - match &body.auth { - | Some(auth) => { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; - - if !worked { - return Err!(Uiaa(uiaainfo)); - } - // Success! - }, - | _ => match body.json_body { - | Some(ref json) => { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); - - return Err!(Uiaa(uiaainfo)); - }, - | _ => { - return Err!(Request(NotJson("Not json."))); - }, - }, - } - - services - .users - .remove_device(sender_user, &body.device_id) - .await; - - Ok(delete_device::v3::Response {}) -} - -/// # `POST /_matrix/client/v3/delete_devices` -/// -/// Deletes the given list of devices. -/// -/// - Requires UIAA to verify user password unless from an appservice with -/// MSC4190 enabled. -/// -/// For each device: -/// - Invalidates access token -/// - Deletes device metadata (device id, device display name, last seen ip, -/// last seen ts) -/// - Forgets to-device events -/// - Triggers device list updates -pub(crate) async fn delete_devices_route( - State(services): State, - body: Ruma, -) -> Result { - let (sender_user, sender_device) = body.sender(); - let appservice = body.appservice_info.as_ref(); - - if appservice.is_some_and(|appservice| appservice.registration.device_management) { - debug!( - "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ - enabled" - ); - for device_id in &body.devices { - services.users.remove_device(sender_user, device_id).await; - } - - return Ok(delete_devices::v3::Response {}); - } - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { stages: vec![AuthType::Password] }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - - match &body.auth { - | Some(auth) => { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; - - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - }, - | _ => match body.json_body { - | Some(ref json) => { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); - - return Err(Error::Uiaa(uiaainfo)); - }, - | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - }, - }, - } - - for device_id in &body.devices { - services.users.remove_device(sender_user, device_id).await; - } - - Ok(delete_devices::v3::Response {}) -} diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs deleted file mode 100644 index aa6ae168..00000000 --- a/src/api/client/directory.rs +++ /dev/null @@ -1,430 +0,0 @@ -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Result, err, info, - utils::{ - TryFutureExtExt, - math::Expected, - result::FlatOk, - stream::{ReadyExt, WidebandExt}, - }, -}; -use conduwuit_service::Services; -use futures::{ - FutureExt, StreamExt, TryFutureExt, - future::{join, join4, join5}, -}; -use ruma::{ - OwnedRoomId, RoomId, ServerName, UInt, UserId, - api::{ - client::{ - directory::{ - get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, - }, - room, - }, - federation, - }, - directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter}, - events::{ - StateEventType, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - }, - }, - uint, -}; - -use crate::Ruma; - -/// # `POST /_matrix/client/v3/publicRooms` -/// -/// Lists the public rooms on this server. -/// -/// - Rooms are ordered by the number of joined members -#[tracing::instrument(skip_all, fields(%client), name = "publicrooms")] -pub(crate) async fn get_public_rooms_filtered_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - if let Some(server) = &body.server { - if services - .moderation - .is_remote_server_room_directory_forbidden(server) - { - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - } - - let response = get_public_rooms_filtered_helper( - &services, - body.server.as_deref(), - body.limit, - body.since.as_deref(), - &body.filter, - &body.room_network, - ) - .await - .map_err(|e| { - err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) - })?; - - Ok(response) -} - -/// # `GET /_matrix/client/v3/publicRooms` -/// -/// Lists the public rooms on this server. -/// -/// - Rooms are ordered by the number of joined members -#[tracing::instrument(skip_all, fields(%client), name = "publicrooms")] -pub(crate) async fn get_public_rooms_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - if let Some(server) = &body.server { - if services.moderation.is_remote_server_forbidden(server) { - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - } - - let response = get_public_rooms_filtered_helper( - &services, - body.server.as_deref(), - body.limit, - body.since.as_deref(), - &Filter::default(), - &RoomNetwork::Matrix, - ) - .await - .map_err(|e| { - err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) - })?; - - Ok(get_public_rooms::v3::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }) -} - -/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` -/// -/// Sets the visibility of a given room in the room directory. -#[tracing::instrument(skip_all, fields(%client), name = "room_directory")] -pub(crate) async fn set_room_visibility_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if !services.rooms.metadata.exists(&body.room_id).await { - // Return 404 if the room doesn't exist - return Err!(Request(NotFound("Room not found"))); - } - - if services - .users - .is_deactivated(sender_user) - .await - .unwrap_or(false) - && body.appservice_info.is_none() - { - return Err!(Request(Forbidden("Guests cannot publish to room directories"))); - } - - if !user_can_publish_room(&services, sender_user, &body.room_id).await? { - return Err!(Request(Forbidden("User is not allowed to publish this room"))); - } - - match &body.visibility { - | room::Visibility::Public => { - if services.server.config.lockdown_public_room_directory - && !services.users.is_admin(sender_user).await - && body.appservice_info.is_none() - { - info!( - "Non-admin user {sender_user} tried to publish {0} to the room directory \ - while \"lockdown_public_room_directory\" is enabled", - body.room_id - ); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "Non-admin user {sender_user} tried to publish {0} to the room \ - directory while \"lockdown_public_room_directory\" is enabled", - body.room_id - )) - .await; - } - - return Err!(Request(Forbidden( - "Publishing rooms to the room directory is not allowed", - ))); - } - - services.rooms.directory.set_public(&body.room_id); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "{sender_user} made {} public to the room directory", - body.room_id - )) - .await; - } - info!("{sender_user} made {0} public to the room directory", body.room_id); - }, - | room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), - | _ => { - return Err!(Request(InvalidParam("Room visibility type is not supported.",))); - }, - } - - Ok(set_room_visibility::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/directory/list/room/{roomId}` -/// -/// Gets the visibility of a given room in the room directory. -pub(crate) async fn get_room_visibility_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.rooms.metadata.exists(&body.room_id).await { - // Return 404 if the room doesn't exist - return Err!(Request(NotFound("Room not found"))); - } - - Ok(get_room_visibility::v3::Response { - visibility: if services.rooms.directory.is_public_room(&body.room_id).await { - room::Visibility::Public - } else { - room::Visibility::Private - }, - }) -} - -pub(crate) async fn get_public_rooms_filtered_helper( - services: &Services, - server: Option<&ServerName>, - limit: Option, - since: Option<&str>, - filter: &Filter, - _network: &RoomNetwork, -) -> Result { - if let Some(other_server) = - server.filter(|server_name| !services.globals.server_is_ours(server_name)) - { - let response = services - .sending - .send_federation_request( - other_server, - federation::directory::get_public_rooms_filtered::v1::Request { - limit, - since: since.map(ToOwned::to_owned), - filter: Filter { - generic_search_term: filter.generic_search_term.clone(), - room_types: filter.room_types.clone(), - }, - room_network: RoomNetwork::Matrix, - }, - ) - .await?; - - return Ok(get_public_rooms_filtered::v3::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }); - } - - // Use limit or else 10, with maximum 100 - let limit: usize = limit.map_or(10_u64, u64::from).try_into()?; - let mut num_since: usize = 0; - - if let Some(s) = &since { - let mut characters = s.chars(); - let backwards = match characters.next() { - | Some('n') => false, - | Some('p') => true, - | _ => { - return Err!(Request(InvalidParam("Invalid `since` token"))); - }, - }; - - num_since = characters - .collect::() - .parse() - .map_err(|_| err!(Request(InvalidParam("Invalid `since` token."))))?; - - if backwards { - num_since = num_since.saturating_sub(limit); - } - } - - let mut all_rooms: Vec = services - .rooms - .directory - .public_rooms() - .map(ToOwned::to_owned) - .wide_then(|room_id| public_rooms_chunk(services, room_id)) - .ready_filter_map(|chunk| { - if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { - return None; - } - - if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { - if let Some(name) = &chunk.name { - if name.as_str().to_lowercase().contains(&query) { - return Some(chunk); - } - } - - if let Some(topic) = &chunk.topic { - if topic.to_lowercase().contains(&query) { - return Some(chunk); - } - } - - if let Some(canonical_alias) = &chunk.canonical_alias { - if canonical_alias.as_str().to_lowercase().contains(&query) { - return Some(chunk); - } - } - - return None; - } - - // No search term - Some(chunk) - }) - // We need to collect all, so we can sort by member count - .collect() - .await; - - all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - - let total_room_count_estimate = UInt::try_from(all_rooms.len()) - .unwrap_or_else(|_| uint!(0)) - .into(); - - let chunk: Vec<_> = all_rooms.into_iter().skip(num_since).take(limit).collect(); - - let prev_batch = num_since.ne(&0).then_some(format!("p{num_since}")); - - let next_batch = chunk - .len() - .ge(&limit) - .then_some(format!("n{}", num_since.expected_add(limit))); - - Ok(get_public_rooms_filtered::v3::Response { - chunk, - prev_batch, - next_batch, - total_room_count_estimate, - }) -} - -/// Check whether the user can publish to the room directory via power levels of -/// room history visibility event or room creator -async fn user_can_publish_room( - services: &Services, - user_id: &UserId, - room_id: &RoomId, -) -> Result { - match services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") - .await - { - | Ok(event) => serde_json::from_str(event.content.get()) - .map_err(|_| err!(Database("Invalid event content for m.room.power_levels"))) - .map(|content: RoomPowerLevelsEventContent| { - RoomPowerLevels::from(content) - .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) - }), - | _ => { - match services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - | Ok(event) => Ok(event.sender == user_id), - | _ => Err!(Request(Forbidden("User is not allowed to publish this room"))), - } - }, - } -} - -async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk { - let name = services.rooms.state_accessor.get_name(&room_id).ok(); - - let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok(); - - let canonical_alias = services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .ok(); - - let avatar_url = services.rooms.state_accessor.get_avatar(&room_id); - - let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok(); - - let world_readable = services.rooms.state_accessor.is_world_readable(&room_id); - - let join_rule = services - .rooms - .state_accessor - .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") - .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { - | JoinRule::Public => PublicRoomJoinRule::Public, - | JoinRule::Knock => "knock".into(), - | JoinRule::KnockRestricted(_) => "knock_restricted".into(), - | _ => "invite".into(), - }); - - let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id); - - let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id); - - let ( - (avatar_url, canonical_alias, guest_can_join, join_rule, name), - (num_joined_members, room_type, topic, world_readable), - ) = join( - join5(avatar_url, canonical_alias, guest_can_join, join_rule, name), - join4(num_joined_members, room_type, topic, world_readable), - ) - .boxed() - .await; - - PublicRoomsChunk { - avatar_url: avatar_url.into_option().unwrap_or_default().url, - canonical_alias, - guest_can_join, - join_rule: join_rule.unwrap_or_default(), - name, - num_joined_members: num_joined_members - .map(TryInto::try_into) - .map(Result::ok) - .flat_ok() - .unwrap_or_else(|| uint!(0)), - room_id, - room_type, - topic, - world_readable, - } -} diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs deleted file mode 100644 index 97044ffc..00000000 --- a/src/api/client/filter.rs +++ /dev/null @@ -1,38 +0,0 @@ -use axum::extract::State; -use conduwuit::{Result, err}; -use ruma::api::client::filter::{create_filter, get_filter}; - -use crate::Ruma; - -/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` -/// -/// Loads a filter that was previously created. -/// -/// - A user can only access their own filters -pub(crate) async fn get_filter_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - services - .users - .get_filter(sender_user, &body.filter_id) - .await - .map(get_filter::v3::Response::new) - .map_err(|_| err!(Request(NotFound("Filter not found.")))) -} - -/// # `PUT /_matrix/client/r0/user/{userId}/filter` -/// -/// Creates a new filter to be used by other endpoints. -pub(crate) async fn create_filter_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let filter_id = services.users.create_filter(sender_user, &body.filter); - - Ok(create_filter::v3::Response::new(filter_id)) -} diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs deleted file mode 100644 index 650c573f..00000000 --- a/src/api/client/keys.rs +++ /dev/null @@ -1,673 +0,0 @@ -use std::collections::{BTreeMap, HashMap, HashSet}; - -use axum::extract::State; -use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils}; -use conduwuit_service::{Services, users::parse_master_key}; -use futures::{StreamExt, stream::FuturesUnordered}; -use ruma::{ - OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, - api::{ - client::{ - error::ErrorKind, - keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, - upload_signatures::{self}, - upload_signing_keys, - }, - uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, - federation, - }, - encryption::CrossSigningKey, - serde::Raw, -}; -use serde_json::json; - -use super::SESSION_ID_LENGTH; -use crate::Ruma; - -/// # `POST /_matrix/client/r0/keys/upload` -/// -/// Publish end-to-end encryption keys for the sender device. -/// -/// - Adds one time keys -/// - If there are no device keys yet: Adds device keys (TODO: merge with -/// existing keys?) -pub(crate) async fn upload_keys_route( - State(services): State, - body: Ruma, -) -> Result { - let (sender_user, sender_device) = body.sender(); - - for (key_id, one_time_key) in &body.one_time_keys { - if one_time_key - .deserialize() - .inspect_err(|e| { - debug_warn!( - ?key_id, - ?one_time_key, - "Invalid one time key JSON submitted by client, skipping: {e}" - ); - }) - .is_err() - { - continue; - } - - services - .users - .add_one_time_key(sender_user, sender_device, key_id, one_time_key) - .await?; - } - - if let Some(device_keys) = &body.device_keys { - let deser_device_keys = device_keys.deserialize().map_err(|e| { - err!(Request(BadJson(debug_warn!( - ?device_keys, - "Invalid device keys JSON uploaded by client: {e}" - )))) - })?; - - if deser_device_keys.user_id != sender_user { - return Err!(Request(Unknown( - "User ID in keys uploaded does not match your own user ID" - ))); - } - if deser_device_keys.device_id != sender_device { - return Err!(Request(Unknown( - "Device ID in keys uploaded does not match your own device ID" - ))); - } - - if let Ok(existing_keys) = services - .users - .get_device_keys(sender_user, sender_device) - .await - { - if existing_keys.json().get() == device_keys.json().get() { - debug!( - ?sender_user, - ?sender_device, - ?device_keys, - "Ignoring user uploaded keys as they are an exact copy already in the \ - database" - ); - } else { - services - .users - .add_device_keys(sender_user, sender_device, device_keys) - .await; - } - } else { - services - .users - .add_device_keys(sender_user, sender_device, device_keys) - .await; - } - } - - Ok(upload_keys::v3::Response { - one_time_key_counts: services - .users - .count_one_time_keys(sender_user, sender_device) - .await, - }) -} - -/// # `POST /_matrix/client/r0/keys/query` -/// -/// Get end-to-end encryption keys for the given users. -/// -/// - Always fetches users from other servers over federation -/// - Gets master keys, self-signing keys, user signing keys and device keys. -/// - The master and self-signing keys contain signatures that the user is -/// allowed to see -pub(crate) async fn get_keys_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - get_keys_helper( - &services, - Some(sender_user), - &body.device_keys, - |u| u == sender_user, - true, // Always allow local users to see device names of other local users - ) - .await -} - -/// # `POST /_matrix/client/r0/keys/claim` -/// -/// Claims one-time keys -pub(crate) async fn claim_keys_route( - State(services): State, - body: Ruma, -) -> Result { - claim_keys_helper(&services, &body.one_time_keys).await -} - -/// # `POST /_matrix/client/r0/keys/device_signing/upload` -/// -/// Uploads end-to-end key information for the sender user. -/// -/// - Requires UIAA to verify password -pub(crate) async fn upload_signing_keys_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { stages: vec![AuthType::Password] }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - - match check_for_new_keys( - services, - sender_user, - body.self_signing_key.as_ref(), - body.user_signing_key.as_ref(), - body.master_key.as_ref(), - ) - .await - .inspect_err(|e| debug!(?e)) - { - | Ok(exists) => { - if let Some(result) = exists { - // No-op, they tried to reupload the same set of keys - // (lost connection for example) - return Ok(result); - } - debug!( - "Skipping UIA in accordance with MSC3967, the user didn't have any existing keys" - ); - // Some of the keys weren't found, so we let them upload - }, - | _ => { - match &body.auth { - | Some(auth) => { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; - - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - }, - | _ => match body.json_body { - | Some(json) => { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); - - return Err(Error::Uiaa(uiaainfo)); - }, - | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - }, - }, - } - }, - } - - services - .users - .add_cross_signing_keys( - sender_user, - &body.master_key, - &body.self_signing_key, - &body.user_signing_key, - true, // notify so that other users see the new keys - ) - .await?; - - Ok(upload_signing_keys::v3::Response {}) -} - -async fn check_for_new_keys( - services: crate::State, - user_id: &UserId, - self_signing_key: Option<&Raw>, - user_signing_key: Option<&Raw>, - master_signing_key: Option<&Raw>, -) -> Result> { - debug!("checking for existing keys"); - let mut empty = false; - if let Some(master_signing_key) = master_signing_key { - let (key, value) = parse_master_key(user_id, master_signing_key)?; - let result = services - .users - .get_master_key(None, user_id, &|_| true) - .await; - if result.is_not_found() { - empty = true; - } else { - let existing_master_key = result?; - let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?; - if existing_key != key || existing_value != value { - return Err!(Request(Forbidden( - "Tried to change an existing master key, UIA required" - ))); - } - } - } - if let Some(user_signing_key) = user_signing_key { - let key = services.users.get_user_signing_key(user_id).await; - if key.is_not_found() && !empty { - return Err!(Request(Forbidden( - "Tried to update an existing user signing key, UIA required" - ))); - } - if !key.is_not_found() { - let existing_signing_key = key?.deserialize()?; - if existing_signing_key != user_signing_key.deserialize()? { - return Err!(Request(Forbidden( - "Tried to change an existing user signing key, UIA required" - ))); - } - } - } - if let Some(self_signing_key) = self_signing_key { - let key = services - .users - .get_self_signing_key(None, user_id, &|_| true) - .await; - if key.is_not_found() && !empty { - debug!(?key); - return Err!(Request(Forbidden( - "Tried to add a new signing key independently from the master key" - ))); - } - if !key.is_not_found() { - let existing_signing_key = key?.deserialize()?; - if existing_signing_key != self_signing_key.deserialize()? { - return Err!(Request(Forbidden( - "Tried to update an existing self signing key, UIA required" - ))); - } - } - } - if empty { - return Ok(None); - } - - Ok(Some(upload_signing_keys::v3::Response {})) -} - -/// # `POST /_matrix/client/r0/keys/signatures/upload` -/// -/// Uploads end-to-end key signatures from the sender user. -/// -/// TODO: clean this timo-code up more and integrate failures. tried to improve -/// it a bit to stop exploding the entire request on bad sigs, but needs way -/// more work. -pub(crate) async fn upload_signatures_route( - State(services): State, - body: Ruma, -) -> Result { - if body.signed_keys.is_empty() { - debug!("Empty signed_keys sent in key signature upload"); - return Ok(upload_signatures::v3::Response::new()); - } - - let sender_user = body.sender_user(); - - for (user_id, keys) in &body.signed_keys { - for (key_id, key) in keys { - let Ok(key) = serde_json::to_value(key) - .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) - else { - continue; - }; - - let Some(signatures) = key.get("signatures") else { - continue; - }; - - let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { - continue; - }; - - let Some(sender_user_object) = sender_user_val.as_object() else { - continue; - }; - - for (signature, val) in sender_user_object.clone() { - let Some(val) = val.as_str().map(ToOwned::to_owned) else { - continue; - }; - let signature = (signature, val); - - if let Err(_e) = services - .users - .sign_key(user_id, key_id, signature, sender_user) - .await - .inspect_err(|e| debug_warn!("{e}")) - { - continue; - } - } - } - } - - Ok(upload_signatures::v3::Response { failures: BTreeMap::new() }) -} - -/// # `POST /_matrix/client/r0/keys/changes` -/// -/// Gets a list of users who have updated their device identity keys since the -/// previous sync token. -/// -/// - TODO: left users -pub(crate) async fn get_key_changes_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut device_list_updates = HashSet::new(); - - let from = body - .from - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?; - - let to = body - .to - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?; - - device_list_updates.extend( - services - .users - .keys_changed(sender_user, from, Some(to)) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - let mut rooms_joined = services.rooms.state_cache.rooms_joined(sender_user).boxed(); - - while let Some(room_id) = rooms_joined.next().await { - device_list_updates.extend( - services - .users - .room_keys_changed(room_id, from, Some(to)) - .map(|(user_id, _)| user_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - } - - Ok(get_key_changes::v3::Response { - changed: device_list_updates.into_iter().collect(), - left: Vec::new(), // TODO - }) -} - -pub(crate) async fn get_keys_helper( - services: &Services, - sender_user: Option<&UserId>, - device_keys_input: &BTreeMap>, - allowed_signatures: F, - include_display_names: bool, -) -> Result -where - F: Fn(&UserId) -> bool + Send + Sync, -{ - let mut master_keys = BTreeMap::new(); - let mut self_signing_keys = BTreeMap::new(); - let mut user_signing_keys = BTreeMap::new(); - let mut device_keys = BTreeMap::new(); - - let mut get_over_federation = HashMap::new(); - - for (user_id, device_ids) in device_keys_input { - let user_id: &UserId = user_id; - - if !services.globals.user_is_local(user_id) { - get_over_federation - .entry(user_id.server_name()) - .or_insert_with(Vec::new) - .push((user_id, device_ids)); - continue; - } - - if device_ids.is_empty() { - let mut container = BTreeMap::new(); - let mut devices = services.users.all_device_ids(user_id).boxed(); - - while let Some(device_id) = devices.next().await { - if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await { - let metadata = services - .users - .get_device_metadata(user_id, device_id) - .await - .map_err(|_| { - err!(Database("all_device_keys contained nonexistent device.")) - })?; - - add_unsigned_device_display_name(&mut keys, metadata, include_display_names) - .map_err(|_| err!(Database("invalid device keys in database")))?; - - container.insert(device_id.to_owned(), keys); - } - } - - device_keys.insert(user_id.to_owned(), container); - } else { - for device_id in device_ids { - let mut container = BTreeMap::new(); - if let Ok(mut keys) = services.users.get_device_keys(user_id, device_id).await { - let metadata = services - .users - .get_device_metadata(user_id, device_id) - .await - .map_err(|_| { - err!(Request(InvalidParam( - "Tried to get keys for nonexistent device." - ))) - })?; - - add_unsigned_device_display_name(&mut keys, metadata, include_display_names) - .map_err(|_| err!(Database("invalid device keys in database")))?; - - container.insert(device_id.to_owned(), keys); - } - - device_keys.insert(user_id.to_owned(), container); - } - } - - if let Ok(master_key) = services - .users - .get_master_key(sender_user, user_id, &allowed_signatures) - .await - { - master_keys.insert(user_id.to_owned(), master_key); - } - if let Ok(self_signing_key) = services - .users - .get_self_signing_key(sender_user, user_id, &allowed_signatures) - .await - { - self_signing_keys.insert(user_id.to_owned(), self_signing_key); - } - if Some(user_id) == sender_user { - if let Ok(user_signing_key) = services.users.get_user_signing_key(user_id).await { - user_signing_keys.insert(user_id.to_owned(), user_signing_key); - } - } - } - - let mut failures = BTreeMap::new(); - - let mut futures: FuturesUnordered<_> = get_over_federation - .into_iter() - .map(|(server, vec)| async move { - let mut device_keys_input_fed = BTreeMap::new(); - for (user_id, keys) in vec { - device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); - } - - let request = - federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed }; - - let response = services - .sending - .send_federation_request(server, request) - .await; - - (server, response) - }) - .collect(); - - while let Some((server, response)) = futures.next().await { - match response { - | Ok(response) => { - for (user, master_key) in response.master_keys { - let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; - - if let Ok(our_master_key) = services - .users - .get_key(&master_key_id, sender_user, &user, &allowed_signatures) - .await - { - let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; - master_key.signatures.append(&mut our_master_key.signatures); - } - let json = serde_json::to_value(master_key).expect("to_value always works"); - let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services - .users - .add_cross_signing_keys( - &user, &raw, &None, &None, - false, /* Dont notify. A notification would trigger another key - * request resulting in an endless loop */ - ) - .await?; - if let Some(raw) = raw { - master_keys.insert(user.clone(), raw); - } - } - - self_signing_keys.extend(response.self_signing_keys); - device_keys.extend(response.device_keys); - }, - | _ => { - failures.insert(server.to_string(), json!({})); - }, - } - } - - Ok(get_keys::v3::Response { - failures, - device_keys, - master_keys, - self_signing_keys, - user_signing_keys, - }) -} - -fn add_unsigned_device_display_name( - keys: &mut Raw, - metadata: ruma::api::client::device::Device, - include_display_names: bool, -) -> serde_json::Result<()> { - if let Some(display_name) = metadata.display_name { - let mut object = keys.deserialize_as::>()?; - - let unsigned = object.entry("unsigned").or_insert_with(|| json!({})); - if let serde_json::Value::Object(unsigned_object) = unsigned { - if include_display_names { - unsigned_object.insert("device_display_name".to_owned(), display_name.into()); - } else { - unsigned_object.insert( - "device_display_name".to_owned(), - Some(metadata.device_id.as_str().to_owned()).into(), - ); - } - } - - *keys = Raw::from_json(serde_json::value::to_raw_value(&object)?); - } - - Ok(()) -} - -pub(crate) async fn claim_keys_helper( - services: &Services, - one_time_keys_input: &BTreeMap>, -) -> Result { - let mut one_time_keys = BTreeMap::new(); - - let mut get_over_federation = BTreeMap::new(); - - for (user_id, map) in one_time_keys_input { - if !services.globals.user_is_local(user_id) { - get_over_federation - .entry(user_id.server_name()) - .or_insert_with(Vec::new) - .push((user_id, map)); - } - - let mut container = BTreeMap::new(); - for (device_id, key_algorithm) in map { - if let Ok(one_time_keys) = services - .users - .take_one_time_key(user_id, device_id, key_algorithm) - .await - { - let mut c = BTreeMap::new(); - c.insert(one_time_keys.0, one_time_keys.1); - container.insert(device_id.clone(), c); - } - } - one_time_keys.insert(user_id.clone(), container); - } - - let mut failures = BTreeMap::new(); - - let mut futures: FuturesUnordered<_> = get_over_federation - .into_iter() - .map(|(server, vec)| async move { - let mut one_time_keys_input_fed = BTreeMap::new(); - for (user_id, keys) in vec { - one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); - } - ( - server, - services - .sending - .send_federation_request(server, federation::keys::claim_keys::v1::Request { - one_time_keys: one_time_keys_input_fed, - }) - .await, - ) - }) - .collect(); - - while let Some((server, response)) = futures.next().await { - match response { - | Ok(keys) => { - one_time_keys.extend(keys.one_time_keys); - }, - | Err(_e) => { - failures.insert(server.to_string(), json!({})); - }, - } - } - - Ok(claim_keys::v3::Response { failures, one_time_keys }) -} diff --git a/src/api/client/media.rs b/src/api/client/media.rs deleted file mode 100644 index 94572413..00000000 --- a/src/api/client/media.rs +++ /dev/null @@ -1,331 +0,0 @@ -use std::time::Duration; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Result, err, - utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, -}; -use conduwuit_service::{ - Services, - media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH}, -}; -use reqwest::Url; -use ruma::{ - Mxc, UserId, - api::client::{ - authenticated_media::{ - get_content, get_content_as_filename, get_content_thumbnail, get_media_config, - get_media_preview, - }, - media::create_content, - }, -}; - -use crate::Ruma; - -/// # `GET /_matrix/client/v1/media/config` -pub(crate) async fn get_media_config_route( - State(services): State, - _body: Ruma, -) -> Result { - Ok(get_media_config::v1::Response { - upload_size: ruma_from_usize(services.server.config.max_request_size), - }) -} - -/// # `POST /_matrix/media/v3/upload` -/// -/// Permanently save media in the server. -/// -/// - Some metadata will be saved in the database -/// - Media will be saved in the media/ directory -#[tracing::instrument( - name = "media_upload", - level = "debug", - skip_all, - fields(%client), -)] -pub(crate) async fn create_content_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); - - let filename = body.filename.as_deref(); - let content_type = body.content_type.as_deref(); - let content_disposition = make_content_disposition(None, content_type, filename); - let ref mxc = Mxc { - server_name: services.globals.server_name(), - media_id: &utils::random_string(MXC_LENGTH), - }; - - services - .media - .create(mxc, Some(user), Some(&content_disposition), content_type, &body.file) - .await?; - - let blurhash = body.generate_blurhash.then(|| { - services - .media - .create_blurhash(&body.file, content_type, filename) - .ok() - .flatten() - }); - - Ok(create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash: blurhash.flatten(), - }) -} - -/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` -/// -/// Load media thumbnail from our server or over federation. -#[tracing::instrument( - name = "media_thumbnail_get", - level = "debug", - skip_all, - fields(%client), -)] -pub(crate) async fn get_content_thumbnail_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); - - let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - let mxc = Mxc { - server_name: &body.server_name, - media_id: &body.media_id, - }; - - let FileMeta { - content, - content_type, - content_disposition, - } = fetch_thumbnail(&services, &mxc, user, body.timeout_ms, &dim).await?; - - Ok(get_content_thumbnail::v1::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition, - }) -} - -/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}` -/// -/// Load media from our server or over federation. -#[tracing::instrument( - name = "media_get", - level = "debug", - skip_all, - fields(%client), -)] -pub(crate) async fn get_content_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); - - let mxc = Mxc { - server_name: &body.server_name, - media_id: &body.media_id, - }; - - let FileMeta { - content, - content_type, - content_disposition, - } = fetch_file(&services, &mxc, user, body.timeout_ms, None).await?; - - Ok(get_content::v1::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition, - }) -} - -/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}` -/// -/// Load media from our server or over federation as fileName. -#[tracing::instrument( - name = "media_get_af", - level = "debug", - skip_all, - fields(%client), -)] -pub(crate) async fn get_content_as_filename_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); - - let mxc = Mxc { - server_name: &body.server_name, - media_id: &body.media_id, - }; - - let FileMeta { - content, - content_type, - content_disposition, - } = fetch_file(&services, &mxc, user, body.timeout_ms, Some(&body.filename)).await?; - - Ok(get_content_as_filename::v1::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition, - }) -} - -/// # `GET /_matrix/client/v1/media/preview_url` -/// -/// Returns URL preview. -#[tracing::instrument( - name = "url_preview", - level = "debug", - skip_all, - fields(%client), -)] -pub(crate) async fn get_media_preview_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let url = &body.url; - let url = Url::parse(&body.url).map_err(|e| { - err!(Request(InvalidParam( - debug_warn!(%sender_user, %url, "Requested URL is not valid: {e}") - ))) - })?; - - if !services.media.url_preview_allowed(&url) { - return Err!(Request(Forbidden( - debug_warn!(%sender_user, %url, "URL is not allowed to be previewed") - ))); - } - - let preview = services - .media - .get_url_preview(&url) - .await - .map_err(|error| { - err!(Request(Unknown( - debug_error!(%sender_user, %url, "Failed to fetch URL preview: {error}") - ))) - })?; - - serde_json::value::to_raw_value(&preview) - .map(get_media_preview::v1::Response::from_raw_value) - .map_err(|error| { - err!(Request(Unknown( - debug_error!(%sender_user, %url, "Failed to parse URL preview: {error}") - ))) - }) -} - -async fn fetch_thumbnail( - services: &Services, - mxc: &Mxc<'_>, - user: &UserId, - timeout_ms: Duration, - dim: &Dim, -) -> Result { - let FileMeta { - content, - content_type, - content_disposition, - } = fetch_thumbnail_meta(services, mxc, user, timeout_ms, dim).await?; - - let content_disposition = Some(make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - None, - )); - - Ok(FileMeta { - content, - content_type, - content_disposition, - }) -} - -async fn fetch_file( - services: &Services, - mxc: &Mxc<'_>, - user: &UserId, - timeout_ms: Duration, - filename: Option<&str>, -) -> Result { - let FileMeta { - content, - content_type, - content_disposition, - } = fetch_file_meta(services, mxc, user, timeout_ms).await?; - - let content_disposition = Some(make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - filename, - )); - - Ok(FileMeta { - content, - content_type, - content_disposition, - }) -} - -async fn fetch_thumbnail_meta( - services: &Services, - mxc: &Mxc<'_>, - user: &UserId, - timeout_ms: Duration, - dim: &Dim, -) -> Result { - if let Some(filemeta) = services.media.get_thumbnail(mxc, dim).await? { - return Ok(filemeta); - } - - if services.globals.server_is_ours(mxc.server_name) { - return Err!(Request(NotFound("Local thumbnail not found."))); - } - - services - .media - .fetch_remote_thumbnail(mxc, Some(user), None, timeout_ms, dim) - .await -} - -async fn fetch_file_meta( - services: &Services, - mxc: &Mxc<'_>, - user: &UserId, - timeout_ms: Duration, -) -> Result { - if let Some(filemeta) = services.media.get(mxc).await? { - return Ok(filemeta); - } - - if services.globals.server_is_ours(mxc.server_name) { - return Err!(Request(NotFound("Local media not found."))); - } - - services - .media - .fetch_remote_content(mxc, Some(user), None, timeout_ms) - .await -} diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs deleted file mode 100644 index d9f24f77..00000000 --- a/src/api/client/media_legacy.rs +++ /dev/null @@ -1,395 +0,0 @@ -#![allow(deprecated)] - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Result, err, - utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, -}; -use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta}; -use reqwest::Url; -use ruma::{ - Mxc, - api::client::media::{ - create_content, get_content, get_content_as_filename, get_content_thumbnail, - get_media_config, get_media_preview, - }, -}; - -use crate::{Ruma, RumaResponse, client::create_content_route}; - -/// # `GET /_matrix/media/v3/config` -/// -/// Returns max upload size. -pub(crate) async fn get_media_config_legacy_route( - State(services): State, - _body: Ruma, -) -> Result { - Ok(get_media_config::v3::Response { - upload_size: ruma_from_usize(services.server.config.max_request_size), - }) -} - -/// # `GET /_matrix/media/v1/config` -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See -/// -/// Returns max upload size. -pub(crate) async fn get_media_config_legacy_legacy_route( - State(services): State, - body: Ruma, -) -> Result> { - get_media_config_legacy_route(State(services), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/media/v3/preview_url` -/// -/// Returns URL preview. -#[tracing::instrument(skip_all, fields(%client), name = "url_preview_legacy", level = "debug")] -pub(crate) async fn get_media_preview_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let url = &body.url; - let url = Url::parse(&body.url).map_err(|e| { - err!(Request(InvalidParam( - debug_warn!(%sender_user, %url, "Requested URL is not valid: {e}") - ))) - })?; - - if !services.media.url_preview_allowed(&url) { - return Err!(Request(Forbidden( - debug_warn!(%sender_user, %url, "URL is not allowed to be previewed") - ))); - } - - let preview = services.media.get_url_preview(&url).await.map_err(|e| { - err!(Request(Unknown( - debug_error!(%sender_user, %url, "Failed to fetch a URL preview: {e}") - ))) - })?; - - serde_json::value::to_raw_value(&preview) - .map(get_media_preview::v3::Response::from_raw_value) - .map_err(|error| { - err!(Request(Unknown( - debug_error!(%sender_user, %url, "Failed to parse URL preview: {error}") - ))) - }) -} - -/// # `GET /_matrix/media/v1/preview_url` -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See -/// -/// Returns URL preview. -pub(crate) async fn get_media_preview_legacy_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_media_preview_legacy_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `POST /_matrix/media/v1/upload` -/// -/// Permanently save media in the server. -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See -/// -/// - Some metadata will be saved in the database -/// - Media will be saved in the media/ directory -pub(crate) async fn create_content_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - create_content_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}` -/// -/// Load media from our server or over federation. -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] -pub(crate) async fn get_content_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let mxc = Mxc { - server_name: &body.server_name, - media_id: &body.media_id, - }; - - match services.media.get(&mxc).await? { - | Some(FileMeta { - content, - content_type, - content_disposition, - }) => { - let content_disposition = make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - None, - ); - - Ok(get_content::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - }, - | _ => - if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; - - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); - - Ok(get_content::v3::Response { - file: response.file, - content_type: response.content_type, - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) - }, - } -} - -/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}` -/// -/// Load media from our server or over federation. -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] -pub(crate) async fn get_content_legacy_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_content_legacy_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}/{fileName}` -/// -/// Load media from our server or over federation, permitting desired filename. -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_get_legacy", level = "debug")] -pub(crate) async fn get_content_as_filename_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let mxc = Mxc { - server_name: &body.server_name, - media_id: &body.media_id, - }; - - match services.media.get(&mxc).await? { - | Some(FileMeta { - content, - content_type, - content_disposition, - }) => { - let content_disposition = make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - Some(&body.filename), - ); - - Ok(get_content_as_filename::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - }, - | _ => - if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; - - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); - - Ok(get_content_as_filename::v3::Response { - content_disposition: Some(content_disposition), - content_type: response.content_type, - file: response.file, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) - }, - } -} - -/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}/{fileName}` -/// -/// Load media from our server or over federation, permitting desired filename. -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -pub(crate) async fn get_content_as_filename_legacy_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_content_as_filename_legacy_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/media/v3/thumbnail/{serverName}/{mediaId}` -/// -/// Load media thumbnail from our server or over federation. -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -#[tracing::instrument(skip_all, fields(%client), name = "media_thumbnail_get_legacy", level = "debug")] -pub(crate) async fn get_content_thumbnail_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let mxc = Mxc { - server_name: &body.server_name, - media_id: &body.media_id, - }; - - let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - match services.media.get_thumbnail(&mxc, &dim).await? { - | Some(FileMeta { - content, - content_type, - content_disposition, - }) => { - let content_disposition = make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - None, - ); - - Ok(get_content_thumbnail::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - }, - | _ => - if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_thumbnail_legacy(&body) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; - - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); - - Ok(get_content_thumbnail::v3::Response { - file: response.file, - content_type: response.content_type, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else { - Err!(Request(NotFound("Media not found."))) - }, - } -} - -/// # `GET /_matrix/media/v1/thumbnail/{serverName}/{mediaId}` -/// -/// Load media thumbnail from our server or over federation. -/// -/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or -/// clients may call. conduwuit adds these for compatibility purposes. -/// See -/// -/// - Only allows federation if `allow_remote` is true -/// - Only redirects if `allow_redirect` is true -/// - Uses client-provided `timeout_ms` if available, else defaults to 20 -/// seconds -pub(crate) async fn get_content_thumbnail_legacy_legacy_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_content_thumbnail_legacy_route(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs deleted file mode 100644 index 2847d668..00000000 --- a/src/api/client/membership.rs +++ /dev/null @@ -1,2621 +0,0 @@ -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - iter::once, - net::IpAddr, - sync::Arc, -}; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching, - matrix::{ - StateKey, - pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, - state_res, - }, - result::{FlatOk, NotFound}, - trace, - utils::{ - self, FutureBoolExt, - future::ReadyEqExt, - shuffle, - stream::{BroadbandExt, IterStream, ReadyExt}, - }, - warn, -}; -use conduwuit_service::{ - Services, - appservice::RegistrationInfo, - rooms::{ - state::RoomMutexGuard, - state_compressor::{CompressedState, HashSetCompressStateEvent}, - }, -}; -use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut}; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, - api::{ - client::{ - error::ErrorKind, - knock::knock_room, - membership::{ - ThirdPartySigned, ban_user, forget_room, - get_member_events::{self, v3::MembershipEventFilter}, - invite_user, join_room_by_id, join_room_by_id_or_alias, - joined_members::{self, v3::RoomMember}, - joined_rooms, kick_user, leave_room, unban_user, - }, - }, - federation::{self, membership::create_invite}, - }, - canonical_json::to_canonical_value, - events::{ - StateEventType, - room::{ - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - }, -}; - -use crate::{Ruma, client::full_user_deactivate}; - -/// Checks if the room is banned in any way possible and the sender user is not -/// an admin. -/// -/// Performs automatic deactivation if `auto_deactivate_banned_room_attempts` is -/// enabled -#[tracing::instrument(skip(services))] -async fn banned_room_check( - services: &Services, - user_id: &UserId, - room_id: Option<&RoomId>, - server_name: Option<&ServerName>, - client_ip: IpAddr, -) -> Result { - if services.users.is_admin(user_id).await { - return Ok(()); - } - - if let Some(room_id) = room_id { - if services.rooms.metadata.is_banned(room_id).await - || services - .moderation - .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) - { - warn!( - "User {user_id} who is not an admin attempted to send an invite for or \ - attempted to join a banned room or banned room server name: {room_id}" - ); - - if services.server.config.auto_deactivate_banned_room_attempts { - warn!( - "Automatically deactivating user {user_id} due to attempted banned room join" - ); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "Automatically deactivating user {user_id} due to attempted banned \ - room join from IP {client_ip}" - )) - .await; - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(services, user_id, &all_joined_rooms).await?; - } - - return Err!(Request(Forbidden("This room is banned on this homeserver."))); - } - } else if let Some(server_name) = server_name { - if services - .config - .forbidden_remote_server_names - .is_match(server_name.host()) - { - warn!( - "User {user_id} who is not an admin tried joining a room which has the server \ - name {server_name} that is globally forbidden. Rejecting.", - ); - - if services.server.config.auto_deactivate_banned_room_attempts { - warn!( - "Automatically deactivating user {user_id} due to attempted banned room join" - ); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "Automatically deactivating user {user_id} due to attempted banned \ - room join from IP {client_ip}" - )) - .await; - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(services, user_id, &all_joined_rooms).await?; - } - - return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); - } - } - - Ok(()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/join` -/// -/// Tries to join the sender user into a room. -/// -/// - If the server knowns about this room: creates the join event and does auth -/// rules locally -/// - If the server does not know about the room: asks other servers over -/// federation -#[tracing::instrument(skip_all, fields(%client), name = "join")] -pub(crate) async fn join_room_by_id_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - banned_room_check( - &services, - sender_user, - Some(&body.room_id), - body.room_id.server_name(), - client, - ) - .await?; - - // There is no body.server_name for /roomId/join - let mut servers: Vec<_> = services - .rooms - .state_cache - .servers_invite_via(&body.room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - servers.extend( - services - .rooms - .state_cache - .invite_state(sender_user, &body.room_id) - .await - .unwrap_or_default() - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - - if let Some(server) = body.room_id.server_name() { - servers.push(server.into()); - } - - servers.sort_unstable(); - servers.dedup(); - shuffle(&mut servers); - - join_room_by_id_helper( - &services, - sender_user, - &body.room_id, - body.reason.clone(), - &servers, - body.third_party_signed.as_ref(), - &body.appservice_info, - ) - .boxed() - .await -} - -/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` -/// -/// Tries to join the sender user into a room. -/// -/// - If the server knowns about this room: creates the join event and does auth -/// rules locally -/// - If the server does not know about the room: use the server name query -/// param if specified. if not specified, asks other servers over federation -/// via room alias server name and room ID server name -#[tracing::instrument(skip_all, fields(%client), name = "join")] -pub(crate) async fn join_room_by_id_or_alias_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_deref().expect("user is authenticated"); - let appservice_info = &body.appservice_info; - let body = body.body; - - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { - | Ok(room_id) => { - banned_room_check( - &services, - sender_user, - Some(&room_id), - room_id.server_name(), - client, - ) - .await?; - - let mut servers = body.via.clone(); - servers.extend( - services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - servers.extend( - services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default() - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - - if let Some(server) = room_id.server_name() { - servers.push(server.to_owned()); - } - - servers.sort_unstable(); - servers.dedup(); - shuffle(&mut servers); - - (servers, room_id) - }, - | Err(room_alias) => { - let (room_id, mut servers) = services - .rooms - .alias - .resolve_alias(&room_alias, Some(body.via.clone())) - .await?; - - banned_room_check( - &services, - sender_user, - Some(&room_id), - Some(room_alias.server_name()), - client, - ) - .await?; - - let addl_via_servers = services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned); - - let addl_state_servers = services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default(); - - let mut addl_servers: Vec<_> = addl_state_servers - .iter() - .map(|event| event.get_field("sender")) - .filter_map(FlatOk::flat_ok) - .map(|user: &UserId| user.server_name().to_owned()) - .stream() - .chain(addl_via_servers) - .collect() - .await; - - addl_servers.sort_unstable(); - addl_servers.dedup(); - shuffle(&mut addl_servers); - servers.append(&mut addl_servers); - - (servers, room_id) - }, - }; - - let join_room_response = join_room_by_id_helper( - &services, - sender_user, - &room_id, - body.reason.clone(), - &servers, - body.third_party_signed.as_ref(), - appservice_info, - ) - .boxed() - .await?; - - Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) -} - -/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}` -/// -/// Tries to knock the room to ask permission to join for the sender user. -#[tracing::instrument(skip_all, fields(%client), name = "knock")] -pub(crate) async fn knock_room_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let body = &body.body; - - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { - | Ok(room_id) => { - banned_room_check( - &services, - sender_user, - Some(&room_id), - room_id.server_name(), - client, - ) - .await?; - - let mut servers = body.via.clone(); - servers.extend( - services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - servers.extend( - services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default() - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - - if let Some(server) = room_id.server_name() { - servers.push(server.to_owned()); - } - - servers.sort_unstable(); - servers.dedup(); - shuffle(&mut servers); - - (servers, room_id) - }, - | Err(room_alias) => { - let (room_id, mut servers) = services - .rooms - .alias - .resolve_alias(&room_alias, Some(body.via.clone())) - .await?; - - banned_room_check( - &services, - sender_user, - Some(&room_id), - Some(room_alias.server_name()), - client, - ) - .await?; - - let addl_via_servers = services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned); - - let addl_state_servers = services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default(); - - let mut addl_servers: Vec<_> = addl_state_servers - .iter() - .map(|event| event.get_field("sender")) - .filter_map(FlatOk::flat_ok) - .map(|user: &UserId| user.server_name().to_owned()) - .stream() - .chain(addl_via_servers) - .collect() - .await; - - addl_servers.sort_unstable(); - addl_servers.dedup(); - shuffle(&mut addl_servers); - servers.append(&mut addl_servers); - - (servers, room_id) - }, - }; - - knock_room_by_id_helper(&services, sender_user, &room_id, body.reason.clone(), &servers) - .boxed() - .await -} - -/// # `POST /_matrix/client/v3/rooms/{roomId}/leave` -/// -/// Tries to leave the sender user from a room. -/// -/// - This should always work if the user is currently joined. -pub(crate) async fn leave_room_route( - State(services): State, - body: Ruma, -) -> Result { - leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) - .await - .map(|()| leave_room::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/invite` -/// -/// Tries to send an invite event into the room. -#[tracing::instrument(skip_all, fields(%client), name = "invite")] -pub(crate) async fn invite_user_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { - debug_error!( - "User {sender_user} is not an admin and attempted to send an invite to room {}", - &body.room_id - ); - return Err!(Request(Forbidden("Invites are not allowed on this server."))); - } - - banned_room_check( - &services, - sender_user, - Some(&body.room_id), - body.room_id.server_name(), - client, - ) - .await?; - - match &body.recipient { - | invite_user::v3::InvitationRecipient::UserId { user_id } => { - let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); - let recipient_ignored_by_sender = - services.users.user_is_ignored(user_id, sender_user); - - let (sender_ignored_recipient, recipient_ignored_by_sender) = - join!(sender_ignored_recipient, recipient_ignored_by_sender); - - if sender_ignored_recipient { - return Ok(invite_user::v3::Response {}); - } - - if let Ok(target_user_membership) = services - .rooms - .state_accessor - .get_member(&body.room_id, user_id) - .await - { - if target_user_membership.membership == MembershipState::Ban { - return Err!(Request(Forbidden("User is banned from this room."))); - } - } - - if recipient_ignored_by_sender { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - return Ok(invite_user::v3::Response {}); - } - - invite_helper( - &services, - sender_user, - user_id, - &body.room_id, - body.reason.clone(), - false, - ) - .boxed() - .await?; - - Ok(invite_user::v3::Response {}) - }, - | _ => { - Err!(Request(NotFound("User not found."))) - }, - } -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/kick` -/// -/// Tries to send a kick event into the room. -pub(crate) async fn kick_user_route( - State(services): State, - body: Ruma, -) -> Result { - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let Ok(event) = services - .rooms - .state_accessor - .get_member(&body.room_id, &body.user_id) - .await - else { - // copy synapse's behaviour of returning 200 without any change to the state - // instead of erroring on left users - return Ok(kick_user::v3::Response::new()); - }; - - if !matches!( - event.membership, - MembershipState::Invite | MembershipState::Knock | MembershipState::Join, - ) { - return Err!(Request(Forbidden( - "Cannot kick a user who is not apart of the room (current membership: {})", - event.membership - ))); - } - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Leave, - reason: body.reason.clone(), - is_direct: None, - join_authorized_via_users_server: None, - third_party_invite: None, - ..event - }), - body.sender_user(), - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(kick_user::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/ban` -/// -/// Tries to send a ban event into the room. -pub(crate) async fn ban_user_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if sender_user == body.user_id { - return Err!(Request(Forbidden("You cannot ban yourself."))); - } - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let current_member_content = services - .rooms - .state_accessor - .get_member(&body.room_id, &body.user_id) - .await - .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban)); - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Ban, - reason: body.reason.clone(), - displayname: None, // display name may be offensive - avatar_url: None, // avatar may be offensive - is_direct: None, - join_authorized_via_users_server: None, - third_party_invite: None, - ..current_member_content - }), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(ban_user::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/unban` -/// -/// Tries to send an unban event into the room. -pub(crate) async fn unban_user_route( - State(services): State, - body: Ruma, -) -> Result { - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let current_member_content = services - .rooms - .state_accessor - .get_member(&body.room_id, &body.user_id) - .await - .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Leave)); - - if current_member_content.membership != MembershipState::Ban { - return Err!(Request(Forbidden( - "Cannot unban a user who is not banned (current membership: {})", - current_member_content.membership - ))); - } - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Leave, - reason: body.reason.clone(), - join_authorized_via_users_server: None, - third_party_invite: None, - is_direct: None, - ..current_member_content - }), - body.sender_user(), - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(unban_user::v3::Response::new()) -} - -/// # `POST /_matrix/client/v3/rooms/{roomId}/forget` -/// -/// Forgets about a room. -/// -/// - If the sender user currently left the room: Stops sender user from -/// receiving information about the room -/// -/// Note: Other devices of the user have no way of knowing the room was -/// forgotten, so this has to be called from every device -pub(crate) async fn forget_room_route( - State(services): State, - body: Ruma, -) -> Result { - let user_id = body.sender_user(); - let room_id = &body.room_id; - - let joined = services.rooms.state_cache.is_joined(user_id, room_id); - let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); - let invited = services.rooms.state_cache.is_invited(user_id, room_id); - - pin_mut!(joined, knocked, invited); - if joined.or(knocked).or(invited).await { - return Err!(Request(Unknown("You must leave the room before forgetting it"))); - } - - let membership = services - .rooms - .state_accessor - .get_member(room_id, user_id) - .await; - - if membership.is_not_found() { - return Err!(Request(Unknown("No membership event was found, room was never joined"))); - } - - let non_membership = membership - .map(|member| member.membership) - .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); - - if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { - services.rooms.state_cache.forget(room_id, user_id); - } - - Ok(forget_room::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/joined_rooms` -/// -/// Lists all rooms the user has joined. -pub(crate) async fn joined_rooms_route( - State(services): State, - body: Ruma, -) -> Result { - Ok(joined_rooms::v3::Response { - joined_rooms: services - .rooms - .state_cache - .rooms_joined(body.sender_user()) - .map(ToOwned::to_owned) - .collect() - .await, - }) -} - -fn membership_filter( - pdu: PduEvent, - for_membership: Option<&MembershipEventFilter>, - not_membership: Option<&MembershipEventFilter>, -) -> Option { - let membership_state_filter = match for_membership { - | Some(MembershipEventFilter::Ban) => MembershipState::Ban, - | Some(MembershipEventFilter::Invite) => MembershipState::Invite, - | Some(MembershipEventFilter::Knock) => MembershipState::Knock, - | Some(MembershipEventFilter::Leave) => MembershipState::Leave, - | Some(_) | None => MembershipState::Join, - }; - - let not_membership_state_filter = match not_membership { - | Some(MembershipEventFilter::Ban) => MembershipState::Ban, - | Some(MembershipEventFilter::Invite) => MembershipState::Invite, - | Some(MembershipEventFilter::Join) => MembershipState::Join, - | Some(MembershipEventFilter::Knock) => MembershipState::Knock, - | Some(_) | None => MembershipState::Leave, - }; - - let evt_membership = pdu.get_content::().ok()?.membership; - - if for_membership.is_some() && not_membership.is_some() { - if membership_state_filter != evt_membership - || not_membership_state_filter == evt_membership - { - None - } else { - Some(pdu) - } - } else if for_membership.is_some() && not_membership.is_none() { - if membership_state_filter != evt_membership { - None - } else { - Some(pdu) - } - } else if not_membership.is_some() && for_membership.is_none() { - if not_membership_state_filter == evt_membership { - None - } else { - Some(pdu) - } - } else { - Some(pdu) - } -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/members` -/// -/// Lists all joined users in a room (TODO: at a specific point in time, with a -/// specific membership). -/// -/// - Only works if the user is currently joined -pub(crate) async fn get_member_events_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let membership = body.membership.as_ref(); - let not_membership = body.not_membership.as_ref(); - - if !services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) - .await - { - return Err!(Request(Forbidden("You don't have permission to view this room."))); - } - - Ok(get_member_events::v3::Response { - chunk: services - .rooms - .state_accessor - .room_state_full(&body.room_id) - .ready_filter_map(Result::ok) - .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) - .map(at!(1)) - .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) - .map(PduEvent::into_member_event) - .collect() - .await, - }) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` -/// -/// Lists all members of a room. -/// -/// - The sender user must be in the room -/// - TODO: An appservice just needs a puppet joined -pub(crate) async fn joined_members_route( - State(services): State, - body: Ruma, -) -> Result { - if !services - .rooms - .state_accessor - .user_can_see_state_events(body.sender_user(), &body.room_id) - .await - { - return Err!(Request(Forbidden("You don't have permission to view this room."))); - } - - Ok(joined_members::v3::Response { - joined: services - .rooms - .state_cache - .room_members(&body.room_id) - .map(ToOwned::to_owned) - .broad_then(|user_id| async move { - let member = RoomMember { - display_name: services.users.displayname(&user_id).await.ok(), - avatar_url: services.users.avatar_url(&user_id).await.ok(), - }; - - (user_id, member) - }) - .collect() - .await, - }) -} - -pub async fn join_room_by_id_helper( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - third_party_signed: Option<&ThirdPartySigned>, - appservice_info: &Option, -) -> Result { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let user_is_guest = services - .users - .is_deactivated(sender_user) - .await - .unwrap_or(false) - && appservice_info.is_none(); - - if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await { - return Err!(Request(Forbidden("Guests are not allowed to join this room"))); - } - - if services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already joined in {room_id}"); - return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); - } - - if let Ok(membership) = services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - { - if membership.membership == MembershipState::Ban { - debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); - return Err!(Request(Forbidden("You are banned from the room."))); - } - } - - let server_in_room = services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .await; - - let local_join = server_in_room - || servers.is_empty() - || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); - - if local_join { - join_room_by_id_helper_local( - services, - sender_user, - room_id, - reason, - servers, - third_party_signed, - state_lock, - ) - .boxed() - .await?; - } else { - // Ask a remote server if we are not participating in this room - join_room_by_id_helper_remote( - services, - sender_user, - room_id, - reason, - servers, - third_party_signed, - state_lock, - ) - .boxed() - .await?; - } - - Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) -} - -#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")] -async fn join_room_by_id_helper_remote( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - _third_party_signed: Option<&ThirdPartySigned>, - state_lock: RoomMutexGuard, -) -> Result { - info!("Joining {room_id} over federation."); - - let (make_join_response, remote_server) = - make_join_request(services, sender_user, room_id, servers).await?; - - info!("make_join finished"); - - let Some(room_version_id) = make_join_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); - }; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut join_event_stub: CanonicalJsonObject = - serde_json::from_str(make_join_response.event.get()).map_err(|e| { - err!(BadServerResponse(warn!( - "Invalid make_join event json received from server: {e:?}" - ))) - })?; - - let join_authorized_via_users_server = { - use RoomVersionId::*; - if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { - join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()) - } else { - None - } - }; - - join_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - join_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - join_authorized_via_users_server: join_authorized_via_users_server.clone(), - ..RoomMemberEventContent::new(MembershipState::Join) - }) - .expect("event is valid, we just created it"), - ); - - // We keep the "event_id" in the pdu only in v1 or - // v2 rooms - match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { - join_event_stub.remove("event_id"); - }, - } - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&join_event_stub, &room_version_id)?; - - // Add event_id back - join_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let mut join_event = join_event_stub; - - info!("Asking {remote_server} for send_join in room {room_id}"); - let send_join_request = federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - omit_members: false, - pdu: services - .sending - .convert_to_outgoing_federation_event(join_event.clone()) - .await, - }; - - let send_join_response = match services - .sending - .send_synapse_request(&remote_server, send_join_request) - .await - { - | Ok(response) => response, - | Err(e) => { - error!("send_join failed: {e}"); - return Err(e); - }, - }; - - info!("send_join finished"); - - if join_authorized_via_users_server.is_some() { - if let Some(signed_raw) = &send_join_response.room_state.event { - debug_info!( - "There is a signed event with join_authorized_via_users_server. This room is \ - probably using restricted joins. Adding signature to our event" - ); - - let (signed_event_id, signed_value) = - gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| { - err!(Request(BadJson(warn!( - "Could not convert event to canonical JSON: {e}" - )))) - })?; - - if signed_event_id != event_id { - return Err!(Request(BadJson(warn!( - %signed_event_id, %event_id, - "Server {remote_server} sent event with wrong event ID" - )))); - } - - match signed_value["signatures"] - .as_object() - .ok_or_else(|| { - err!(BadServerResponse(warn!( - "Server {remote_server} sent invalid signatures type" - ))) - }) - .and_then(|e| { - e.get(remote_server.as_str()).ok_or_else(|| { - err!(BadServerResponse(warn!( - "Server {remote_server} did not send its signature for a restricted \ - room" - ))) - }) - }) { - | Ok(signature) => { - join_event - .get_mut("signatures") - .expect("we created a valid pdu") - .as_object_mut() - .expect("we created a valid pdu") - .insert(remote_server.to_string(), signature.clone()); - }, - | Err(e) => { - warn!( - "Server {remote_server} sent invalid signature in send_join signatures \ - for event {signed_value:?}: {e:?}", - ); - }, - } - } - } - - services - .rooms - .short - .get_or_create_shortroomid(room_id) - .await; - - info!("Parsing join event"); - let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone()) - .map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?; - - info!("Acquiring server signing keys for response events"); - let resp_events = &send_join_response.room_state; - let resp_state = &resp_events.state; - let resp_auth = &resp_events.auth_chain; - services - .server_keys - .acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter())) - .await; - - info!("Going through send_join response room_state"); - let cork = services.db.cork_and_flush(); - let state = send_join_response - .room_state - .state - .iter() - .stream() - .then(|pdu| { - services - .server_keys - .validate_and_add_event_id_no_fetch(pdu, &room_version_id) - }) - .ready_filter_map(Result::ok) - .fold(HashMap::new(), |mut state, (event_id, value)| async move { - let pdu = match PduEvent::from_id_val(&event_id, value.clone()) { - | Ok(pdu) => pdu, - | Err(e) => { - debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}"); - return state; - }, - }; - - services.rooms.outlier.add_pdu_outlier(&event_id, &value); - if let Some(state_key) = &pdu.state_key { - let shortstatekey = services - .rooms - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) - .await; - - state.insert(shortstatekey, pdu.event_id.clone()); - } - - state - }) - .await; - - drop(cork); - - info!("Going through send_join response auth_chain"); - let cork = services.db.cork_and_flush(); - send_join_response - .room_state - .auth_chain - .iter() - .stream() - .then(|pdu| { - services - .server_keys - .validate_and_add_event_id_no_fetch(pdu, &room_version_id) - }) - .ready_filter_map(Result::ok) - .ready_for_each(|(event_id, value)| { - services.rooms.outlier.add_pdu_outlier(&event_id, &value); - }) - .await; - - drop(cork); - - debug!("Running send_join auth check"); - let fetch_state = &state; - let state_fetch = |k: StateEventType, s: StateKey| async move { - let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; - - let event_id = fetch_state.get(&shortstatekey)?; - services.rooms.timeline.get_pdu(event_id).await.ok() - }; - - let auth_check = state_res::event_auth::auth_check( - &state_res::RoomVersion::new(&room_version_id)?, - &parsed_join_pdu, - None, // TODO: third party invite - |k, s| state_fetch(k.clone(), s.into()), - ) - .await - .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; - - if !auth_check { - return Err!(Request(Forbidden("Auth check failed"))); - } - - info!("Compressing state from send_join"); - let compressed: CompressedState = services - .rooms - .state_compressor - .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) - .collect() - .await; - - debug!("Saving compressed state"); - let HashSetCompressStateEvent { - shortstatehash: statehash_before_join, - added, - removed, - } = services - .rooms - .state_compressor - .save_state(room_id, Arc::new(compressed)) - .await?; - - debug!("Forcing state for new room"); - services - .rooms - .state - .force_state(room_id, statehash_before_join, added, removed, &state_lock) - .await?; - - info!("Updating joined counts for new room"); - services - .rooms - .state_cache - .update_joined_count(room_id) - .await; - - // We append to state before appending the pdu, so we don't have a moment in - // time with the pdu without it's state. This is okay because append_pdu can't - // fail. - let statehash_after_join = services - .rooms - .state - .append_to_state(&parsed_join_pdu) - .await?; - - info!("Appending new room join event"); - services - .rooms - .timeline - .append_pdu( - &parsed_join_pdu, - join_event, - once(parsed_join_pdu.event_id.borrow()), - &state_lock, - ) - .await?; - - info!("Setting final room state for new room"); - // We set the room state after inserting the pdu, so that we never have a moment - // in time where events in the current room state do not exist - services - .rooms - .state - .set_room_state(room_id, statehash_after_join, &state_lock); - - Ok(()) -} - -#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")] -async fn join_room_by_id_helper_local( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - _third_party_signed: Option<&ThirdPartySigned>, - state_lock: RoomMutexGuard, -) -> Result { - debug_info!("We can join locally"); - - let join_rules_event_content = services - .rooms - .state_accessor - .room_state_get_content::( - room_id, - &StateEventType::RoomJoinRules, - "", - ) - .await; - - let restriction_rooms = match join_rules_event_content { - | Ok(RoomJoinRulesEventContent { - join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted), - }) => restricted - .allow - .into_iter() - .filter_map(|a| match a { - | AllowRule::RoomMembership(r) => Some(r.room_id), - | _ => None, - }) - .collect(), - | _ => Vec::new(), - }; - - let join_authorized_via_users_server: Option = { - if restriction_rooms - .iter() - .stream() - .any(|restriction_room_id| { - services - .rooms - .state_cache - .is_joined(sender_user, restriction_room_id) - }) - .await - { - services - .rooms - .state_cache - .local_users_in_room(room_id) - .filter(|user| { - services.rooms.state_accessor.user_can_invite( - room_id, - user, - sender_user, - &state_lock, - ) - }) - .boxed() - .next() - .await - .map(ToOwned::to_owned) - } else { - None - } - }; - - let content = RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason: reason.clone(), - join_authorized_via_users_server, - ..RoomMemberEventContent::new(MembershipState::Join) - }; - - // Try normal join first - let Err(error) = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(sender_user.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await - else { - return Ok(()); - }; - - if restriction_rooms.is_empty() - && (servers.is_empty() - || servers.len() == 1 && services.globals.server_is_ours(&servers[0])) - { - return Err(error); - } - - warn!( - "We couldn't do the join locally, maybe federation can help to satisfy the restricted \ - join requirements" - ); - let Ok((make_join_response, remote_server)) = - make_join_request(services, sender_user, room_id, servers).await - else { - return Err(error); - }; - - let Some(room_version_id) = make_join_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); - }; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut join_event_stub: CanonicalJsonObject = - serde_json::from_str(make_join_response.event.get()).map_err(|e| { - err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")) - })?; - - let join_authorized_via_users_server = join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); - - join_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - join_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - join_authorized_via_users_server, - ..RoomMemberEventContent::new(MembershipState::Join) - }) - .expect("event is valid, we just created it"), - ); - - // We keep the "event_id" in the pdu only in v1 or - // v2 rooms - match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { - join_event_stub.remove("event_id"); - }, - } - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&join_event_stub, &room_version_id)?; - - // Add event_id back - join_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let join_event = join_event_stub; - - let send_join_response = services - .sending - .send_synapse_request( - &remote_server, - federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - omit_members: false, - pdu: services - .sending - .convert_to_outgoing_federation_event(join_event.clone()) - .await, - }, - ) - .await?; - - if let Some(signed_raw) = send_join_response.room_state.event { - let (signed_event_id, signed_value) = - gen_event_id_canonical_json(&signed_raw, &room_version_id).map_err(|e| { - err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) - })?; - - if signed_event_id != event_id { - return Err!(Request(BadJson( - warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") - ))); - } - - drop(state_lock); - services - .rooms - .event_handler - .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) - .boxed() - .await?; - } else { - return Err(error); - } - - Ok(()) -} - -async fn make_join_request( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - servers: &[OwnedServerName], -) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> { - let mut make_join_response_and_server = - Err!(BadServerResponse("No server available to assist in joining.")); - - let mut make_join_counter: usize = 0; - let mut incompatible_room_version_count: usize = 0; - - for remote_server in servers { - if services.globals.server_is_ours(remote_server) { - continue; - } - info!("Asking {remote_server} for make_join ({make_join_counter})"); - let make_join_response = services - .sending - .send_federation_request( - remote_server, - federation::membership::prepare_join_event::v1::Request { - room_id: room_id.to_owned(), - user_id: sender_user.to_owned(), - ver: services.server.supported_room_versions().collect(), - }, - ) - .await; - - trace!("make_join response: {:?}", make_join_response); - make_join_counter = make_join_counter.saturating_add(1); - - if let Err(ref e) = make_join_response { - if matches!( - e.kind(), - ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion - ) { - incompatible_room_version_count = - incompatible_room_version_count.saturating_add(1); - } - - if incompatible_room_version_count > 15 { - info!( - "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or \ - M_UNSUPPORTED_ROOM_VERSION, assuming that conduwuit does not support the \ - room version {room_id}: {e}" - ); - make_join_response_and_server = - Err!(BadServerResponse("Room version is not supported by Conduwuit")); - return make_join_response_and_server; - } - - if make_join_counter > 40 { - warn!( - "40 servers failed to provide valid make_join response, assuming no server \ - can assist in joining." - ); - make_join_response_and_server = - Err!(BadServerResponse("No server available to assist in joining.")); - - return make_join_response_and_server; - } - } - - make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); - - if make_join_response_and_server.is_ok() { - break; - } - } - - make_join_response_and_server -} - -pub(crate) async fn invite_helper( - services: &Services, - sender_user: &UserId, - user_id: &UserId, - room_id: &RoomId, - reason: Option, - is_direct: bool, -) -> Result { - if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { - info!( - "User {sender_user} is not an admin and attempted to send an invite to room \ - {room_id}" - ); - return Err!(Request(Forbidden("Invites are not allowed on this server."))); - } - - if !services.globals.user_is_local(user_id) { - let (pdu, pdu_json, invite_room_state) = { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let content = RoomMemberEventContent { - avatar_url: services.users.avatar_url(user_id).await.ok(), - is_direct: Some(is_direct), - reason, - ..RoomMemberEventContent::new(MembershipState::Invite) - }; - - let (pdu, pdu_json) = services - .rooms - .timeline - .create_hash_and_sign_event( - PduBuilder::state(user_id.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await?; - - let invite_room_state = services.rooms.state.summary_stripped(&pdu).await; - - drop(state_lock); - - (pdu, pdu_json, invite_room_state) - }; - - let room_version_id = services.rooms.state.get_room_version(room_id).await?; - - let response = services - .sending - .send_federation_request(user_id.server_name(), create_invite::v2::Request { - room_id: room_id.to_owned(), - event_id: (*pdu.event_id).to_owned(), - room_version: room_version_id.clone(), - event: services - .sending - .convert_to_outgoing_federation_event(pdu_json.clone()) - .await, - invite_room_state, - via: services - .rooms - .state_cache - .servers_route_via(room_id) - .await - .ok(), - }) - .await?; - - // We do not add the event_id field to the pdu here because of signature and - // hashes checks - let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id) - .map_err(|e| { - err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) - })?; - - if pdu.event_id != event_id { - return Err!(Request(BadJson(warn!( - %pdu.event_id, %event_id, - "Server {} sent event with wrong event ID", - user_id.server_name() - )))); - } - - let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, - )?) - .map_err(|e| { - err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) - })?; - - let pdu_id = services - .rooms - .event_handler - .handle_incoming_pdu(&origin, room_id, &event_id, value, true) - .boxed() - .await? - .ok_or_else(|| { - err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))) - })?; - - return services.sending.send_pdu_room(room_id, &pdu_id).await; - } - - if !services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - return Err!(Request(Forbidden( - "You must be joined in the room you are trying to invite from." - ))); - } - - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let content = RoomMemberEventContent { - displayname: services.users.displayname(user_id).await.ok(), - avatar_url: services.users.avatar_url(user_id).await.ok(), - blurhash: services.users.blurhash(user_id).await.ok(), - is_direct: Some(is_direct), - reason, - ..RoomMemberEventContent::new(MembershipState::Invite) - }; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(user_id.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(()) -} - -// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, -// and ignores errors -pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { - let rooms_joined = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(ToOwned::to_owned); - - let rooms_invited = services - .rooms - .state_cache - .rooms_invited(user_id) - .map(|(r, _)| r); - - let rooms_knocked = services - .rooms - .state_cache - .rooms_knocked(user_id) - .map(|(r, _)| r); - - let all_rooms: Vec<_> = rooms_joined - .chain(rooms_invited) - .chain(rooms_knocked) - .collect() - .await; - - for room_id in all_rooms { - // ignore errors - if let Err(e) = leave_room(services, user_id, &room_id, None).await { - warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); - } - - services.rooms.state_cache.forget(&room_id, user_id); - } -} - -pub async fn leave_room( - services: &Services, - user_id: &UserId, - room_id: &RoomId, - reason: Option, -) -> Result { - let default_member_content = RoomMemberEventContent { - membership: MembershipState::Leave, - reason: reason.clone(), - join_authorized_via_users_server: None, - is_direct: None, - avatar_url: None, - displayname: None, - third_party_invite: None, - blurhash: None, - }; - - let is_banned = services.rooms.metadata.is_banned(room_id); - let is_disabled = services.rooms.metadata.is_disabled(room_id); - - pin_mut!(is_banned, is_disabled); - if is_banned.or(is_disabled).await { - // the room is banned/disabled, the room must be rejected locally since we - // cant/dont want to federate with this server - services - .rooms - .state_cache - .update_membership( - room_id, - user_id, - default_member_content, - user_id, - None, - None, - true, - ) - .await?; - - return Ok(()); - } - - let dont_have_room = services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .eq(&false); - - let not_knocked = services - .rooms - .state_cache - .is_knocked(user_id, room_id) - .eq(&false); - - // Ask a remote server if we don't have this room and are not knocking on it - if dont_have_room.and(not_knocked).await { - if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) - .boxed() - .await - { - warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); - // Don't tell the client about this error - } - - let last_state = services - .rooms - .state_cache - .invite_state(user_id, room_id) - .or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id)) - .or_else(|_| services.rooms.state_cache.left_state(user_id, room_id)) - .await - .ok(); - - // We always drop the invite, we can't rely on other servers - services - .rooms - .state_cache - .update_membership( - room_id, - user_id, - default_member_content, - user_id, - last_state, - None, - true, - ) - .await?; - } else { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let Ok(event) = services - .rooms - .state_accessor - .room_state_get_content::( - room_id, - &StateEventType::RoomMember, - user_id.as_str(), - ) - .await - else { - debug_warn!( - "Trying to leave a room you are not a member of, marking room as left locally." - ); - - return services - .rooms - .state_cache - .update_membership( - room_id, - user_id, - default_member_content, - user_id, - None, - None, - true, - ) - .await; - }; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Leave, - reason, - join_authorized_via_users_server: None, - is_direct: None, - ..event - }), - user_id, - room_id, - &state_lock, - ) - .await?; - } - - Ok(()) -} - -async fn remote_leave_room( - services: &Services, - user_id: &UserId, - room_id: &RoomId, - reason: Option, -) -> Result<()> { - let mut make_leave_response_and_server = - Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); - - let mut servers: HashSet = services - .rooms - .state_cache - .servers_invite_via(room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - match services - .rooms - .state_cache - .invite_state(user_id, room_id) - .await - { - | Ok(invite_state) => { - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - }, - | _ => { - match services - .rooms - .state_cache - .knock_state(user_id, room_id) - .await - { - | Ok(knock_state) => { - servers.extend( - knock_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .filter_map(|sender| { - if !services.globals.user_is_local(sender) { - Some(sender.server_name().to_owned()) - } else { - None - } - }), - ); - }, - | _ => {}, - } - }, - } - - if let Some(room_id_server_name) = room_id.server_name() { - servers.insert(room_id_server_name.to_owned()); - } - - debug_info!("servers in remote_leave_room: {servers:?}"); - - for remote_server in servers { - let make_leave_response = services - .sending - .send_federation_request( - &remote_server, - federation::membership::prepare_leave_event::v1::Request { - room_id: room_id.to_owned(), - user_id: user_id.to_owned(), - }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let Some(room_version_id) = make_leave_response.room_version else { - return Err!(BadServerResponse(warn!( - "No room version was returned by {remote_server} for {room_id}, room version is \ - likely not supported by conduwuit" - ))); - }; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse(warn!( - "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", - ))); - } - - let mut leave_event_stub = serde_json::from_str::( - make_leave_response.event.get(), - ) - .map_err(|e| { - err!(BadServerResponse(warn!( - "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" - ))) - })?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // Inject the reason key into the event content dict if it exists - if let Some(reason) = reason { - if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { - content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); - } - } - - // room v3 and above removed the "event_id" field from remote PDU format - match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { - leave_event_stub.remove("event_id"); - }, - } - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&leave_event_stub, &room_version_id)?; - - // Add event_id back - leave_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - services - .sending - .send_federation_request( - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id: room_id.to_owned(), - event_id, - pdu: services - .sending - .convert_to_outgoing_federation_event(leave_event.clone()) - .await, - }, - ) - .await?; - - Ok(()) -} - -async fn knock_room_by_id_helper( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], -) -> Result { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - if services - .rooms - .state_cache - .is_invited(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already invited in {room_id} but attempted to knock"); - return Err!(Request(Forbidden( - "You cannot knock on a room you are already invited/accepted to." - ))); - } - - if services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already joined in {room_id} but attempted to knock"); - return Err!(Request(Forbidden("You cannot knock on a room you are already joined in."))); - } - - if services - .rooms - .state_cache - .is_knocked(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already knocked in {room_id}"); - return Ok(knock_room::v3::Response { room_id: room_id.into() }); - } - - if let Ok(membership) = services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - { - if membership.membership == MembershipState::Ban { - debug_warn!("{sender_user} is banned from {room_id} but attempted to knock"); - return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); - } - } - - let server_in_room = services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .await; - - let local_knock = server_in_room - || servers.is_empty() - || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); - - if local_knock { - knock_room_helper_local(services, sender_user, room_id, reason, servers, state_lock) - .boxed() - .await?; - } else { - knock_room_helper_remote(services, sender_user, room_id, reason, servers, state_lock) - .boxed() - .await?; - } - - Ok(knock_room::v3::Response::new(room_id.to_owned())) -} - -async fn knock_room_helper_local( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - state_lock: RoomMutexGuard, -) -> Result { - debug_info!("We can knock locally"); - - let room_version_id = services.rooms.state.get_room_version(room_id).await?; - - if matches!( - room_version_id, - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - ) { - return Err!(Request(Forbidden("This room does not support knocking."))); - } - - let content = RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason: reason.clone(), - ..RoomMemberEventContent::new(MembershipState::Knock) - }; - - // Try normal knock first - let Err(error) = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(sender_user.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await - else { - return Ok(()); - }; - - if servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) - { - return Err(error); - } - - warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock"); - - let (make_knock_response, remote_server) = - make_knock_request(services, sender_user, room_id, servers).await?; - - info!("make_knock finished"); - - let room_version_id = make_knock_response.room_version; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut knock_event_stub = serde_json::from_str::( - make_knock_response.event.get(), - ) - .map_err(|e| { - err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) - })?; - - knock_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - knock_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - knock_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - ..RoomMemberEventContent::new(MembershipState::Knock) - }) - .expect("event is valid, we just created it"), - ); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; - - // Add event_id - knock_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let knock_event = knock_event_stub; - - info!("Asking {remote_server} for send_knock in room {room_id}"); - let send_knock_request = federation::knock::send_knock::v1::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - pdu: services - .sending - .convert_to_outgoing_federation_event(knock_event.clone()) - .await, - }; - - let send_knock_response = services - .sending - .send_federation_request(&remote_server, send_knock_request) - .await?; - - info!("send_knock finished"); - - services - .rooms - .short - .get_or_create_shortroomid(room_id) - .await; - - info!("Parsing knock event"); - - let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) - .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; - - info!("Updating membership locally to knock state with provided stripped state events"); - services - .rooms - .state_cache - .update_membership( - room_id, - sender_user, - parsed_knock_pdu - .get_content::() - .expect("we just created this"), - sender_user, - Some(send_knock_response.knock_room_state), - None, - false, - ) - .await?; - - info!("Appending room knock event locally"); - services - .rooms - .timeline - .append_pdu( - &parsed_knock_pdu, - knock_event, - once(parsed_knock_pdu.event_id.borrow()), - &state_lock, - ) - .await?; - - Ok(()) -} - -async fn knock_room_helper_remote( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - state_lock: RoomMutexGuard, -) -> Result { - info!("Knocking {room_id} over federation."); - - let (make_knock_response, remote_server) = - make_knock_request(services, sender_user, room_id, servers).await?; - - info!("make_knock finished"); - - let room_version_id = make_knock_response.room_version; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut knock_event_stub: CanonicalJsonObject = - serde_json::from_str(make_knock_response.event.get()).map_err(|e| { - err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) - })?; - - knock_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - knock_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - knock_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - ..RoomMemberEventContent::new(MembershipState::Knock) - }) - .expect("event is valid, we just created it"), - ); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; - - // Add event_id - knock_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let knock_event = knock_event_stub; - - info!("Asking {remote_server} for send_knock in room {room_id}"); - let send_knock_request = federation::knock::send_knock::v1::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - pdu: services - .sending - .convert_to_outgoing_federation_event(knock_event.clone()) - .await, - }; - - let send_knock_response = services - .sending - .send_federation_request(&remote_server, send_knock_request) - .await?; - - info!("send_knock finished"); - - services - .rooms - .short - .get_or_create_shortroomid(room_id) - .await; - - info!("Parsing knock event"); - let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) - .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; - - info!("Going through send_knock response knock state events"); - let state = send_knock_response - .knock_room_state - .iter() - .map(|event| serde_json::from_str::(event.clone().into_json().get())) - .filter_map(Result::ok); - - let mut state_map: HashMap = HashMap::new(); - - for event in state { - let Some(state_key) = event.get("state_key") else { - debug_warn!("send_knock stripped state event missing state_key: {event:?}"); - continue; - }; - let Some(event_type) = event.get("type") else { - debug_warn!("send_knock stripped state event missing event type: {event:?}"); - continue; - }; - - let Ok(state_key) = serde_json::from_value::(state_key.clone().into()) else { - debug_warn!("send_knock stripped state event has invalid state_key: {event:?}"); - continue; - }; - let Ok(event_type) = serde_json::from_value::(event_type.clone().into()) - else { - debug_warn!("send_knock stripped state event has invalid event type: {event:?}"); - continue; - }; - - let event_id = gen_event_id(&event, &room_version_id)?; - let shortstatekey = services - .rooms - .short - .get_or_create_shortstatekey(&event_type, &state_key) - .await; - - services.rooms.outlier.add_pdu_outlier(&event_id, &event); - state_map.insert(shortstatekey, event_id.clone()); - } - - info!("Compressing state from send_knock"); - let compressed: CompressedState = services - .rooms - .state_compressor - .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) - .collect() - .await; - - debug!("Saving compressed state"); - let HashSetCompressStateEvent { - shortstatehash: statehash_before_knock, - added, - removed, - } = services - .rooms - .state_compressor - .save_state(room_id, Arc::new(compressed)) - .await?; - - debug!("Forcing state for new room"); - services - .rooms - .state - .force_state(room_id, statehash_before_knock, added, removed, &state_lock) - .await?; - - let statehash_after_knock = services - .rooms - .state - .append_to_state(&parsed_knock_pdu) - .await?; - - info!("Updating membership locally to knock state with provided stripped state events"); - services - .rooms - .state_cache - .update_membership( - room_id, - sender_user, - parsed_knock_pdu - .get_content::() - .expect("we just created this"), - sender_user, - Some(send_knock_response.knock_room_state), - None, - false, - ) - .await?; - - info!("Appending room knock event locally"); - services - .rooms - .timeline - .append_pdu( - &parsed_knock_pdu, - knock_event, - once(parsed_knock_pdu.event_id.borrow()), - &state_lock, - ) - .await?; - - info!("Setting final room state for new room"); - // We set the room state after inserting the pdu, so that we never have a moment - // in time where events in the current room state do not exist - services - .rooms - .state - .set_room_state(room_id, statehash_after_knock, &state_lock); - - Ok(()) -} - -async fn make_knock_request( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - servers: &[OwnedServerName], -) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> { - let mut make_knock_response_and_server = - Err!(BadServerResponse("No server available to assist in knocking.")); - - let mut make_knock_counter: usize = 0; - - for remote_server in servers { - if services.globals.server_is_ours(remote_server) { - continue; - } - - info!("Asking {remote_server} for make_knock ({make_knock_counter})"); - - let make_knock_response = services - .sending - .send_federation_request( - remote_server, - federation::knock::create_knock_event_template::v1::Request { - room_id: room_id.to_owned(), - user_id: sender_user.to_owned(), - ver: services.server.supported_room_versions().collect(), - }, - ) - .await; - - trace!("make_knock response: {make_knock_response:?}"); - make_knock_counter = make_knock_counter.saturating_add(1); - - make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone())); - - if make_knock_response_and_server.is_ok() { - break; - } - - if make_knock_counter > 40 { - warn!( - "50 servers failed to provide valid make_knock response, assuming no server can \ - assist in knocking." - ); - make_knock_response_and_server = - Err!(BadServerResponse("No server available to assist in knocking.")); - - return make_knock_response_and_server; - } - } - - make_knock_response_and_server -} diff --git a/src/api/client/message.rs b/src/api/client/message.rs deleted file mode 100644 index 16b1796a..00000000 --- a/src/api/client/message.rs +++ /dev/null @@ -1,319 +0,0 @@ -use core::panic; - -use axum::extract::State; -use conduwuit::{ - Err, Result, at, - matrix::{ - Event, - pdu::{PduCount, PduEvent}, - }, - utils::{ - IterStream, ReadyExt, - result::{FlatOk, LogErr}, - stream::{BroadbandExt, TryIgnore, WidebandExt}, - }, -}; -use conduwuit_service::{ - Services, - rooms::{ - lazy_loading, - lazy_loading::{Options, Witness}, - timeline::PdusIterItem, - }, -}; -use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; -use ruma::{ - DeviceId, RoomId, UserId, - api::{ - Direction, - client::{filter::RoomEventFilter, message::get_message_events}, - }, - events::{ - AnyStateEvent, StateEventType, - TimelineEventType::{self, *}, - }, - serde::Raw, -}; - -use crate::Ruma; - -/// list of safe and common non-state events to ignore if the user is ignored -const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[ - Audio, - CallInvite, - Emote, - File, - Image, - KeyVerificationStart, - Location, - PollStart, - UnstablePollStart, - Beacon, - Reaction, - RoomEncrypted, - RoomMessage, - Sticker, - Video, - Voice, - CallNotify, -]; - -const LIMIT_MAX: usize = 100; -const LIMIT_DEFAULT: usize = 10; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` -/// -/// Allows paginating through room history. -/// -/// - Only works if the user is joined (TODO: always allow, but only show events -/// where the user was joined, depending on `history_visibility`) -pub(crate) async fn get_message_events_route( - State(services): State, - body: Ruma, -) -> Result { - debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted"); - let sender_user = body.sender_user(); - let sender_device = body.sender_device.as_ref(); - let room_id = &body.room_id; - let filter = &body.filter; - - if !services.rooms.metadata.exists(room_id).await { - return Err!(Request(Forbidden("Room does not exist to this server"))); - } - - let from: PduCount = body - .from - .as_deref() - .map(str::parse) - .transpose()? - .unwrap_or_else(|| match body.dir { - | Direction::Forward => PduCount::min(), - | Direction::Backward => PduCount::max(), - }); - - let to: Option = body.to.as_deref().map(str::parse).flat_ok(); - - let limit: usize = body - .limit - .try_into() - .unwrap_or(LIMIT_DEFAULT) - .min(LIMIT_MAX); - - if matches!(body.dir, Direction::Backward) { - services - .rooms - .timeline - .backfill_if_required(room_id, from) - .boxed() - .await - .log_err() - .ok(); - } - - let it = match body.dir { - | Direction::Forward => services - .rooms - .timeline - .pdus(Some(sender_user), room_id, Some(from)) - .ignore_err() - .boxed(), - - | Direction::Backward => services - .rooms - .timeline - .pdus_rev(Some(sender_user), room_id, Some(from)) - .ignore_err() - .boxed(), - }; - - let events: Vec<_> = it - .ready_take_while(|(count, _)| Some(*count) != to) - .ready_filter_map(|item| event_filter(item, filter)) - .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) - .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) - .take(limit) - .collect() - .await; - - let lazy_loading_context = lazy_loading::Context { - user_id: sender_user, - device_id: match sender_device { - | Some(device_id) => device_id, - | None => - if let Some(registration) = body.appservice_info.as_ref() { - <&DeviceId>::from(registration.registration.id.as_str()) - } else { - panic!("No device_id provided and no appservice registration found, this should be unreachable"); - }, - }, - room_id, - token: Some(from.into_unsigned()), - options: Some(&filter.lazy_load_options), - }; - - let witness: OptionFuture<_> = filter - .lazy_load_options - .is_enabled() - .then(|| lazy_loading_witness(&services, &lazy_loading_context, events.iter())) - .into(); - - let state = witness - .map(Option::into_iter) - .map(|option| option.flat_map(Witness::into_iter)) - .map(IterStream::stream) - .into_stream() - .flatten() - .broad_filter_map(|user_id| async move { - get_member_event(&services, room_id, &user_id).await - }) - .collect() - .await; - - let next_token = events.last().map(at!(0)); - - let chunk = events - .into_iter() - .map(at!(1)) - .map(PduEvent::into_room_event) - .collect(); - - Ok(get_message_events::v3::Response { - start: from.to_string(), - end: next_token.as_ref().map(ToString::to_string), - chunk, - state, - }) -} - -pub(crate) async fn lazy_loading_witness<'a, I>( - services: &Services, - lazy_loading_context: &lazy_loading::Context<'_>, - events: I, -) -> Witness -where - I: Iterator + Clone + Send, -{ - let oldest = events - .clone() - .map(|(count, _)| count) - .copied() - .min() - .unwrap_or_else(PduCount::max); - - let newest = events - .clone() - .map(|(count, _)| count) - .copied() - .max() - .unwrap_or_else(PduCount::max); - - let receipts = services - .rooms - .read_receipt - .readreceipts_since(lazy_loading_context.room_id, oldest.into_unsigned()); - - pin_mut!(receipts); - let witness: Witness = events - .stream() - .map(|(_, pdu)| pdu.sender.clone()) - .chain( - receipts - .ready_take_while(|(_, c, _)| *c <= newest.into_unsigned()) - .map(|(user_id, ..)| user_id.to_owned()), - ) - .collect() - .await; - - services - .rooms - .lazy_loading - .witness_retain(witness, lazy_loading_context) - .await -} - -async fn get_member_event( - services: &Services, - room_id: &RoomId, - user_id: &UserId, -) -> Option> { - services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) - .map_ok(PduEvent::into_state_event) - .await - .ok() -} - -#[inline] -pub(crate) async fn ignored_filter( - services: &Services, - item: PdusIterItem, - user_id: &UserId, -) -> Option { - let (_, ref pdu) = item; - - is_ignored_pdu(services, pdu, user_id) - .await - .eq(&false) - .then_some(item) -} - -#[inline] -pub(crate) async fn is_ignored_pdu( - services: &Services, - pdu: &PduEvent, - user_id: &UserId, -) -> bool { - // exclude Synapse's dummy events from bloating up response bodies. clients - // don't need to see this. - if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { - return true; - } - - let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); - - let ignored_server = services - .moderation - .is_remote_server_ignored(pdu.sender().server_name()); - - if ignored_type - && (ignored_server - || (!services.config.send_messages_from_ignored_users_to_client - && services.users.user_is_ignored(&pdu.sender, user_id).await)) - { - return true; - } - - false -} - -#[inline] -pub(crate) async fn visibility_filter( - services: &Services, - item: PdusIterItem, - user_id: &UserId, -) -> Option { - let (_, pdu) = &item; - - services - .rooms - .state_accessor - .user_can_see_event(user_id, &pdu.room_id, &pdu.event_id) - .await - .then_some(item) -} - -#[inline] -pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { - let (_, pdu) = &item; - pdu.matches(filter).then_some(item) -} - -#[cfg_attr(debug_assertions, conduwuit::ctor)] -fn _is_sorted() { - debug_assert!( - IGNORED_MESSAGE_TYPES.is_sorted(), - "IGNORED_MESSAGE_TYPES must be sorted by the developer" - ); -} diff --git a/src/api/client/mod.rs b/src/api/client/mod.rs deleted file mode 100644 index be54e65f..00000000 --- a/src/api/client/mod.rs +++ /dev/null @@ -1,93 +0,0 @@ -pub(super) mod account; -pub(super) mod account_data; -pub(super) mod alias; -pub(super) mod appservice; -pub(super) mod backup; -pub(super) mod capabilities; -pub(super) mod context; -pub(super) mod device; -pub(super) mod directory; -pub(super) mod filter; -pub(super) mod keys; -pub(super) mod media; -pub(super) mod media_legacy; -pub(super) mod membership; -pub(super) mod message; -pub(super) mod openid; -pub(super) mod presence; -pub(super) mod profile; -pub(super) mod push; -pub(super) mod read_marker; -pub(super) mod redact; -pub(super) mod relations; -pub(super) mod report; -pub(super) mod room; -pub(super) mod search; -pub(super) mod send; -pub(super) mod session; -pub(super) mod space; -pub(super) mod state; -pub(super) mod sync; -pub(super) mod tag; -pub(super) mod thirdparty; -pub(super) mod threads; -pub(super) mod to_device; -pub(super) mod typing; -pub(super) mod unstable; -pub(super) mod unversioned; -pub(super) mod user_directory; -pub(super) mod voip; -pub(super) mod well_known; - -pub use account::full_user_deactivate; -pub(super) use account::*; -pub(super) use account_data::*; -pub(super) use alias::*; -pub(super) use appservice::*; -pub(super) use backup::*; -pub(super) use capabilities::*; -pub(super) use context::*; -pub(super) use device::*; -pub(super) use directory::*; -pub(super) use filter::*; -pub(super) use keys::*; -pub(super) use media::*; -pub(super) use media_legacy::*; -pub(super) use membership::*; -pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room}; -pub(super) use message::*; -pub(super) use openid::*; -pub(super) use presence::*; -pub(super) use profile::*; -pub use profile::{update_all_rooms, update_avatar_url, update_displayname}; -pub(super) use push::*; -pub(super) use read_marker::*; -pub(super) use redact::*; -pub(super) use relations::*; -pub(super) use report::*; -pub(super) use room::*; -pub(super) use search::*; -pub(super) use send::*; -pub(super) use session::*; -pub(super) use space::*; -pub(super) use state::*; -pub(super) use sync::*; -pub(super) use tag::*; -pub(super) use thirdparty::*; -pub(super) use threads::*; -pub(super) use to_device::*; -pub(super) use typing::*; -pub(super) use unstable::*; -pub(super) use unversioned::*; -pub(super) use user_directory::*; -pub(super) use voip::*; -pub(super) use well_known::*; - -/// generated device ID length -const DEVICE_ID_LENGTH: usize = 10; - -/// generated user access token length -const TOKEN_LENGTH: usize = 32; - -/// generated user session ID length -const SESSION_ID_LENGTH: usize = service::uiaa::SESSION_ID_LENGTH; diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs deleted file mode 100644 index 8d2de68d..00000000 --- a/src/api/client/openid.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::time::Duration; - -use axum::extract::State; -use conduwuit::{Error, Result, utils}; -use ruma::{ - api::client::{account, error::ErrorKind}, - authentication::TokenType, -}; - -use super::TOKEN_LENGTH; -use crate::Ruma; - -/// # `POST /_matrix/client/v3/user/{userId}/openid/request_token` -/// -/// Request an OpenID token to verify identity with third-party services. -/// -/// - The token generated is only valid for the OpenID API -pub(crate) async fn create_openid_token_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if sender_user != &body.user_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to request OpenID tokens on behalf of other users", - )); - } - - let access_token = utils::random_string(TOKEN_LENGTH); - - let expires_in = services - .users - .create_openid_token(&body.user_id, &access_token)?; - - Ok(account::request_openid_token::v3::Response { - access_token, - token_type: TokenType::Bearer, - matrix_server_name: services.server.name.clone(), - expires_in: Duration::from_secs(expires_in), - }) -} diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs deleted file mode 100644 index 548e5cce..00000000 --- a/src/api/client/presence.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::time::Duration; - -use axum::extract::State; -use conduwuit::{Err, Result}; -use ruma::api::client::presence::{get_presence, set_presence}; - -use crate::Ruma; - -/// # `PUT /_matrix/client/r0/presence/{userId}/status` -/// -/// Sets the presence state of the sender user. -pub(crate) async fn set_presence_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.config.allow_local_presence { - return Err!(Request(Forbidden("Presence is disabled on this server"))); - } - - if body.sender_user() != body.user_id && body.appservice_info.is_none() { - return Err!(Request(InvalidParam("Not allowed to set presence of other users"))); - } - - services - .presence - .set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone()) - .await?; - - Ok(set_presence::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/presence/{userId}/status` -/// -/// Gets the presence state of the given user. -/// -/// - Only works if you share a room with the user -pub(crate) async fn get_presence_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.config.allow_local_presence { - return Err!(Request(Forbidden("Presence is disabled on this server",))); - } - - let mut presence_event = None; - let has_shared_rooms = services - .rooms - .state_cache - .user_sees_user(body.sender_user(), &body.user_id) - .await; - - if has_shared_rooms { - if let Ok(presence) = services.presence.get_presence(&body.user_id).await { - presence_event = Some(presence); - } - } - - match presence_event { - | Some(presence) => { - let status_msg = if presence - .content - .status_msg - .as_ref() - .is_some_and(String::is_empty) - { - None - } else { - presence.content.status_msg - }; - - let last_active_ago = match presence.content.currently_active { - | Some(true) => None, - | _ => presence - .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), - }; - - Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? - status_msg, - currently_active: presence.content.currently_active, - last_active_ago, - presence: presence.content.presence, - }) - }, - | _ => Err!(Request(NotFound("Presence state for this user was not found"))), - } -} diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs deleted file mode 100644 index 3699b590..00000000 --- a/src/api/client/profile.rs +++ /dev/null @@ -1,426 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{ - Err, Error, Result, - matrix::pdu::PduBuilder, - utils::{IterStream, stream::TryIgnore}, - warn, -}; -use conduwuit_service::Services; -use futures::{StreamExt, TryStreamExt, future::join3}; -use ruma::{ - OwnedMxcUri, OwnedRoomId, UserId, - api::{ - client::{ - error::ErrorKind, - profile::{ - get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, - }, - }, - federation, - }, - events::room::member::{MembershipState, RoomMemberEventContent}, - presence::PresenceState, -}; - -use crate::Ruma; - -/// # `PUT /_matrix/client/r0/profile/{userId}/displayname` -/// -/// Updates the displayname. -/// -/// - Also makes sure other users receive the update using presence EDUs -pub(crate) async fn set_displayname_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if *sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot update the profile of another user"))); - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(&body.user_id) - .map(ToOwned::to_owned) - .collect() - .await; - - update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms) - .await; - - if services.config.allow_local_presence { - // Presence update - services - .presence - .ping_presence(&body.user_id, &PresenceState::Online) - .await?; - } - - Ok(set_display_name::v3::Response {}) -} - -/// # `GET /_matrix/client/v3/profile/{userId}/displayname` -/// -/// Returns the displayname of the user. -/// -/// - If user is on another server and we do not have a local copy already fetch -/// displayname over federation -pub(crate) async fn get_displayname_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.globals.user_is_local(&body.user_id) { - // Create and update our local copy of the user - if let Ok(response) = services - .sending - .send_federation_request( - body.user_id.server_name(), - federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), - field: None, // we want the full user's profile to update locally too - }, - ) - .await - { - if !services.users.exists(&body.user_id).await { - services.users.create(&body.user_id, None)?; - } - - services - .users - .set_displayname(&body.user_id, response.displayname.clone()); - services - .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()); - services - .users - .set_blurhash(&body.user_id, response.blurhash.clone()); - - return Ok(get_display_name::v3::Response { displayname: response.displayname }); - } - } - - if !services.users.exists(&body.user_id).await { - // Return 404 if this user doesn't exist and we couldn't fetch it over - // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); - } - - Ok(get_display_name::v3::Response { - displayname: services.users.displayname(&body.user_id).await.ok(), - }) -} - -/// # `PUT /_matrix/client/v3/profile/{userId}/avatar_url` -/// -/// Updates the `avatar_url` and `blurhash`. -/// -/// - Also makes sure other users receive the update using presence EDUs -pub(crate) async fn set_avatar_url_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if *sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot update the profile of another user"))); - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(&body.user_id) - .map(ToOwned::to_owned) - .collect() - .await; - - update_avatar_url( - &services, - &body.user_id, - body.avatar_url.clone(), - body.blurhash.clone(), - &all_joined_rooms, - ) - .await; - - if services.config.allow_local_presence { - // Presence update - services - .presence - .ping_presence(&body.user_id, &PresenceState::Online) - .await - .ok(); - } - - Ok(set_avatar_url::v3::Response {}) -} - -/// # `GET /_matrix/client/v3/profile/{userId}/avatar_url` -/// -/// Returns the `avatar_url` and `blurhash` of the user. -/// -/// - If user is on another server and we do not have a local copy already fetch -/// `avatar_url` and blurhash over federation -pub(crate) async fn get_avatar_url_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.globals.user_is_local(&body.user_id) { - // Create and update our local copy of the user - if let Ok(response) = services - .sending - .send_federation_request( - body.user_id.server_name(), - federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), - field: None, // we want the full user's profile to update locally as well - }, - ) - .await - { - if !services.users.exists(&body.user_id).await { - services.users.create(&body.user_id, None)?; - } - - services - .users - .set_displayname(&body.user_id, response.displayname.clone()); - - services - .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()); - - services - .users - .set_blurhash(&body.user_id, response.blurhash.clone()); - - return Ok(get_avatar_url::v3::Response { - avatar_url: response.avatar_url, - blurhash: response.blurhash, - }); - } - } - - if !services.users.exists(&body.user_id).await { - // Return 404 if this user doesn't exist and we couldn't fetch it over - // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); - } - - Ok(get_avatar_url::v3::Response { - avatar_url: services.users.avatar_url(&body.user_id).await.ok(), - blurhash: services.users.blurhash(&body.user_id).await.ok(), - }) -} - -/// # `GET /_matrix/client/v3/profile/{userId}` -/// -/// Returns the displayname, avatar_url, blurhash, and tz of the user. -/// -/// - If user is on another server and we do not have a local copy already, -/// fetch profile over federation. -pub(crate) async fn get_profile_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.globals.user_is_local(&body.user_id) { - // Create and update our local copy of the user - if let Ok(response) = services - .sending - .send_federation_request( - body.user_id.server_name(), - federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), - field: None, - }, - ) - .await - { - if !services.users.exists(&body.user_id).await { - services.users.create(&body.user_id, None)?; - } - - services - .users - .set_displayname(&body.user_id, response.displayname.clone()); - - services - .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()); - - services - .users - .set_blurhash(&body.user_id, response.blurhash.clone()); - - services - .users - .set_timezone(&body.user_id, response.tz.clone()); - - for (profile_key, profile_key_value) in &response.custom_profile_fields { - services.users.set_profile_key( - &body.user_id, - profile_key, - Some(profile_key_value.clone()), - ); - } - - return Ok(get_profile::v3::Response { - displayname: response.displayname, - avatar_url: response.avatar_url, - blurhash: response.blurhash, - tz: response.tz, - custom_profile_fields: response.custom_profile_fields, - }); - } - } - - if !services.users.exists(&body.user_id).await { - // Return 404 if this user doesn't exist and we couldn't fetch it over - // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); - } - - let mut custom_profile_fields: BTreeMap = services - .users - .all_profile_keys(&body.user_id) - .collect() - .await; - - // services.users.timezone will collect the MSC4175 timezone key if it exists - custom_profile_fields.remove("us.cloke.msc4175.tz"); - custom_profile_fields.remove("m.tz"); - - Ok(get_profile::v3::Response { - avatar_url: services.users.avatar_url(&body.user_id).await.ok(), - blurhash: services.users.blurhash(&body.user_id).await.ok(), - displayname: services.users.displayname(&body.user_id).await.ok(), - tz: services.users.timezone(&body.user_id).await.ok(), - custom_profile_fields, - }) -} - -pub async fn update_displayname( - services: &Services, - user_id: &UserId, - displayname: Option, - all_joined_rooms: &[OwnedRoomId], -) { - let (current_avatar_url, current_blurhash, current_displayname) = join3( - services.users.avatar_url(user_id), - services.users.blurhash(user_id), - services.users.displayname(user_id), - ) - .await; - - let current_avatar_url = current_avatar_url.ok(); - let current_blurhash = current_blurhash.ok(); - let current_displayname = current_displayname.ok(); - - if displayname == current_displayname { - return; - } - - services.users.set_displayname(user_id, displayname.clone()); - - // Send a new join membership event into all joined rooms - let avatar_url = ¤t_avatar_url; - let blurhash = ¤t_blurhash; - let displayname = &displayname; - let all_joined_rooms: Vec<_> = all_joined_rooms - .iter() - .try_stream() - .and_then(|room_id: &OwnedRoomId| async move { - let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { - displayname: displayname.clone(), - membership: MembershipState::Join, - avatar_url: avatar_url.clone(), - blurhash: blurhash.clone(), - join_authorized_via_users_server: None, - reason: None, - is_direct: None, - third_party_invite: None, - }); - - Ok((pdu, room_id)) - }) - .ignore_err() - .collect() - .await; - - update_all_rooms(services, all_joined_rooms, user_id).await; -} - -pub async fn update_avatar_url( - services: &Services, - user_id: &UserId, - avatar_url: Option, - blurhash: Option, - all_joined_rooms: &[OwnedRoomId], -) { - let (current_avatar_url, current_blurhash, current_displayname) = join3( - services.users.avatar_url(user_id), - services.users.blurhash(user_id), - services.users.displayname(user_id), - ) - .await; - - let current_avatar_url = current_avatar_url.ok(); - let current_blurhash = current_blurhash.ok(); - let current_displayname = current_displayname.ok(); - - if current_avatar_url == avatar_url && current_blurhash == blurhash { - return; - } - - services.users.set_avatar_url(user_id, avatar_url.clone()); - services.users.set_blurhash(user_id, blurhash.clone()); - - // Send a new join membership event into all joined rooms - let avatar_url = &avatar_url; - let blurhash = &blurhash; - let displayname = ¤t_displayname; - let all_joined_rooms: Vec<_> = all_joined_rooms - .iter() - .try_stream() - .and_then(|room_id: &OwnedRoomId| async move { - let pdu = PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { - avatar_url: avatar_url.clone(), - blurhash: blurhash.clone(), - membership: MembershipState::Join, - displayname: displayname.clone(), - join_authorized_via_users_server: None, - reason: None, - is_direct: None, - third_party_invite: None, - }); - - Ok((pdu, room_id)) - }) - .ignore_err() - .collect() - .await; - - update_all_rooms(services, all_joined_rooms, user_id).await; -} - -pub async fn update_all_rooms( - services: &Services, - all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, - user_id: &UserId, -) { - for (pdu_builder, room_id) in all_joined_rooms { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - if let Err(e) = services - .rooms - .timeline - .build_and_append_pdu(pdu_builder, user_id, room_id, &state_lock) - .await - { - warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}"); - } - } -} diff --git a/src/api/client/push.rs b/src/api/client/push.rs deleted file mode 100644 index 81020ffa..00000000 --- a/src/api/client/push.rs +++ /dev/null @@ -1,536 +0,0 @@ -use axum::extract::State; -use conduwuit::{Err, Error, Result, err}; -use conduwuit_service::Services; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, - api::client::{ - error::ErrorKind, - push::{ - delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, - get_pushrule_enabled, get_pushrules_all, get_pushrules_global_scope, set_pusher, - set_pushrule, set_pushrule_actions, set_pushrule_enabled, - }, - }, - events::{ - GlobalAccountDataEventType, - push_rules::{PushRulesEvent, PushRulesEventContent}, - }, - push::{ - InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, - RemovePushRuleError, Ruleset, - }, -}; - -use crate::Ruma; - -/// # `GET /_matrix/client/r0/pushrules/` -/// -/// Retrieves the push rules event for this user. -pub(crate) async fn get_pushrules_all_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - let Some(content_value) = services - .account_data - .get_global::(sender_user, GlobalAccountDataEventType::PushRules) - .await - .ok() - .and_then(|event| event.get("content").cloned()) - .filter(CanonicalJsonValue::is_object) - else { - // user somehow has non-existent push rule event. recreate it and return server - // default silently - return recreate_push_rules_and_return(&services, sender_user).await; - }; - - let account_data_content = - serde_json::from_value::(content_value.into()).map_err(|e| { - err!(Database(warn!("Invalid push rules account data event in database: {e}"))) - })?; - - let mut global_ruleset = account_data_content.global; - - // remove old deprecated mentions push rules as per MSC4210 - // and update the stored server default push rules - #[allow(deprecated)] - { - use ruma::push::RuleKind::*; - if global_ruleset - .get(Override, PredefinedOverrideRuleId::ContainsDisplayName.as_str()) - .is_some() - || global_ruleset - .get(Override, PredefinedOverrideRuleId::RoomNotif.as_str()) - .is_some() - || global_ruleset - .get(Content, PredefinedContentRuleId::ContainsUserName.as_str()) - .is_some() - { - global_ruleset - .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) - .ok(); - global_ruleset - .remove(Override, PredefinedOverrideRuleId::RoomNotif) - .ok(); - global_ruleset - .remove(Content, PredefinedContentRuleId::ContainsUserName) - .ok(); - - global_ruleset.update_with_server_default(Ruleset::server_default(sender_user)); - - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { global: global_ruleset.clone() }, - }) - .expect("to json always works"), - ) - .await?; - } - }; - - Ok(get_pushrules_all::v3::Response { global: global_ruleset }) -} - -/// # `GET /_matrix/client/r0/pushrules/global/` -/// -/// Retrieves the push rules event for this user. -/// -/// This appears to be the exact same as `GET /_matrix/client/r0/pushrules/`. -pub(crate) async fn get_pushrules_global_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let Some(content_value) = services - .account_data - .get_global::(sender_user, GlobalAccountDataEventType::PushRules) - .await - .ok() - .and_then(|event| event.get("content").cloned()) - .filter(CanonicalJsonValue::is_object) - else { - // user somehow has non-existent push rule event. recreate it and return server - // default silently - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { - global: Ruleset::server_default(sender_user), - }, - }) - .expect("to json always works"), - ) - .await?; - - return Ok(get_pushrules_global_scope::v3::Response { - global: Ruleset::server_default(sender_user), - }); - }; - - let account_data_content = - serde_json::from_value::(content_value.into()).map_err(|e| { - err!(Database(warn!("Invalid push rules account data event in database: {e}"))) - })?; - - let mut global_ruleset = account_data_content.global; - - // remove old deprecated mentions push rules as per MSC4210 - // and update the stored server default push rules - #[allow(deprecated)] - { - use ruma::push::RuleKind::*; - if global_ruleset - .get(Override, PredefinedOverrideRuleId::ContainsDisplayName.as_str()) - .is_some() - || global_ruleset - .get(Override, PredefinedOverrideRuleId::RoomNotif.as_str()) - .is_some() - || global_ruleset - .get(Content, PredefinedContentRuleId::ContainsUserName.as_str()) - .is_some() - { - global_ruleset - .remove(Override, PredefinedOverrideRuleId::ContainsDisplayName) - .ok(); - global_ruleset - .remove(Override, PredefinedOverrideRuleId::RoomNotif) - .ok(); - global_ruleset - .remove(Content, PredefinedContentRuleId::ContainsUserName) - .ok(); - - global_ruleset.update_with_server_default(Ruleset::server_default(sender_user)); - - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { global: global_ruleset.clone() }, - }) - .expect("to json always works"), - ) - .await?; - } - }; - - Ok(get_pushrules_global_scope::v3::Response { global: global_ruleset }) -} - -/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` -/// -/// Retrieves a single specified push rule for this user. -pub(crate) async fn get_pushrule_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - // remove old deprecated mentions push rules as per MSC4210 - #[allow(deprecated)] - if body.rule_id.as_str() == PredefinedContentRuleId::ContainsUserName.as_str() - || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() - || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() - { - return Err!(Request(NotFound("Push rule not found."))); - } - - let event: PushRulesEvent = services - .account_data - .get_global(sender_user, GlobalAccountDataEventType::PushRules) - .await - .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - - let rule = event - .content - .global - .get(body.kind.clone(), &body.rule_id) - .map(Into::into); - - if let Some(rule) = rule { - Ok(get_pushrule::v3::Response { rule }) - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")) - } -} - -/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}` -/// -/// Creates a single specified push rule for this user. -pub(crate) async fn set_pushrule_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; - - let mut account_data: PushRulesEvent = services - .account_data - .get_global(sender_user, GlobalAccountDataEventType::PushRules) - .await - .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - - if let Err(error) = account_data.content.global.insert( - body.rule.clone(), - body.after.as_deref(), - body.before.as_deref(), - ) { - let err = match error { - | InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest( - ErrorKind::InvalidParam, - "Rule IDs starting with a dot are reserved for server-default rules.", - ), - | InsertPushRuleError::InvalidRuleId => Error::BadRequest( - ErrorKind::InvalidParam, - "Rule ID containing invalid characters.", - ), - | InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest( - ErrorKind::InvalidParam, - "Can't place a push rule relatively to a server-default rule.", - ), - | InsertPushRuleError::UnknownRuleId => Error::BadRequest( - ErrorKind::NotFound, - "The before or after rule could not be found.", - ), - | InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest( - ErrorKind::InvalidParam, - "The before rule has a higher priority than the after rule.", - ), - | _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), - }; - - return Err(err); - } - - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) - .await?; - - Ok(set_pushrule::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions` -/// -/// Gets the actions of a single specified push rule for this user. -pub(crate) async fn get_pushrule_actions_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - // remove old deprecated mentions push rules as per MSC4210 - #[allow(deprecated)] - if body.rule_id.as_str() == PredefinedContentRuleId::ContainsUserName.as_str() - || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() - || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() - { - return Err!(Request(NotFound("Push rule not found."))); - } - - let event: PushRulesEvent = services - .account_data - .get_global(sender_user, GlobalAccountDataEventType::PushRules) - .await - .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - - let actions = event - .content - .global - .get(body.kind.clone(), &body.rule_id) - .map(|rule| rule.actions().to_owned()) - .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; - - Ok(get_pushrule_actions::v3::Response { actions }) -} - -/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/actions` -/// -/// Sets the actions of a single specified push rule for this user. -pub(crate) async fn set_pushrule_actions_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut account_data: PushRulesEvent = services - .account_data - .get_global(sender_user, GlobalAccountDataEventType::PushRules) - .await - .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - - if account_data - .content - .global - .set_actions(body.kind.clone(), &body.rule_id, body.actions.clone()) - .is_err() - { - return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); - } - - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) - .await?; - - Ok(set_pushrule_actions::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled` -/// -/// Gets the enabled status of a single specified push rule for this user. -pub(crate) async fn get_pushrule_enabled_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - // remove old deprecated mentions push rules as per MSC4210 - #[allow(deprecated)] - if body.rule_id.as_str() == PredefinedContentRuleId::ContainsUserName.as_str() - || body.rule_id.as_str() == PredefinedOverrideRuleId::ContainsDisplayName.as_str() - || body.rule_id.as_str() == PredefinedOverrideRuleId::RoomNotif.as_str() - { - return Ok(get_pushrule_enabled::v3::Response { enabled: false }); - } - - let event: PushRulesEvent = services - .account_data - .get_global(sender_user, GlobalAccountDataEventType::PushRules) - .await - .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - - let enabled = event - .content - .global - .get(body.kind.clone(), &body.rule_id) - .map(ruma::push::AnyPushRuleRef::enabled) - .ok_or_else(|| err!(Request(NotFound("Push rule not found."))))?; - - Ok(get_pushrule_enabled::v3::Response { enabled }) -} - -/// # `PUT /_matrix/client/r0/pushrules/global/{kind}/{ruleId}/enabled` -/// -/// Sets the enabled status of a single specified push rule for this user. -pub(crate) async fn set_pushrule_enabled_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut account_data: PushRulesEvent = services - .account_data - .get_global(sender_user, GlobalAccountDataEventType::PushRules) - .await - .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - - if account_data - .content - .global - .set_enabled(body.kind.clone(), &body.rule_id, body.enabled) - .is_err() - { - return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); - } - - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) - .await?; - - Ok(set_pushrule_enabled::v3::Response {}) -} - -/// # `DELETE /_matrix/client/r0/pushrules/global/{kind}/{ruleId}` -/// -/// Deletes a single specified push rule for this user. -pub(crate) async fn delete_pushrule_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut account_data: PushRulesEvent = services - .account_data - .get_global(sender_user, GlobalAccountDataEventType::PushRules) - .await - .map_err(|_| err!(Request(NotFound("PushRules event not found."))))?; - - if let Err(error) = account_data - .content - .global - .remove(body.kind.clone(), &body.rule_id) - { - let err = match error { - | RemovePushRuleError::ServerDefault => Error::BadRequest( - ErrorKind::InvalidParam, - "Cannot delete a server-default pushrule.", - ), - | RemovePushRuleError::NotFound => - Error::BadRequest(ErrorKind::NotFound, "Push rule not found."), - | _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), - }; - - return Err(err); - } - - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) - .await?; - - Ok(delete_pushrule::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/pushers` -/// -/// Gets all currently active pushers for the sender user. -pub(crate) async fn get_pushers_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - Ok(get_pushers::v3::Response { - pushers: services.pusher.get_pushers(sender_user).await, - }) -} - -/// # `POST /_matrix/client/r0/pushers/set` -/// -/// Adds a pusher for the sender user. -/// -/// - TODO: Handle `append` -pub(crate) async fn set_pushers_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - services - .pusher - .set_pusher(sender_user, body.sender_device(), &body.action) - .await?; - - Ok(set_pusher::v3::Response::new()) -} - -/// user somehow has bad push rules, these must always exist per spec. -/// so recreate it and return server default silently -async fn recreate_push_rules_and_return( - services: &Services, - sender_user: &ruma::UserId, -) -> Result { - services - .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { - global: Ruleset::server_default(sender_user), - }, - }) - .expect("to json always works"), - ) - .await?; - - Ok(get_pushrules_all::v3::Response { - global: Ruleset::server_default(sender_user), - }) -} diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs deleted file mode 100644 index fbfc8fea..00000000 --- a/src/api/client/read_marker.rs +++ /dev/null @@ -1,209 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{Err, PduCount, Result, err}; -use ruma::{ - MilliSecondsSinceUnixEpoch, - api::client::{read_marker::set_read_marker, receipt::create_receipt}, - events::{ - RoomAccountDataEventType, - receipt::{ReceiptThread, ReceiptType}, - }, -}; - -use crate::Ruma; - -/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` -/// -/// Sets different types of read markers. -/// -/// - Updates fully-read account data event to `fully_read` -/// - If `read_receipt` is set: Update private marker and public read receipt -/// EDU -pub(crate) async fn set_read_marker_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if let Some(event) = &body.fully_read { - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { event_id: event.clone() }, - }; - - services - .account_data - .update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), - ) - .await?; - } - - if body.private_read_receipt.is_some() || body.read_receipt.is_some() { - services - .rooms - .user - .reset_notification_counts(sender_user, &body.room_id); - } - - // ping presence - if services.config.allow_local_presence { - services - .presence - .ping_presence(sender_user, &ruma::presence::PresenceState::Online) - .await?; - } - - if let Some(event) = &body.read_receipt { - let receipt_content = BTreeMap::from_iter([( - event.to_owned(), - BTreeMap::from_iter([( - ReceiptType::Read, - BTreeMap::from_iter([(sender_user.to_owned(), ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - })]), - )]), - )]); - - services - .rooms - .read_receipt - .readreceipt_update( - sender_user, - &body.room_id, - &ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - ) - .await; - } - - if let Some(event) = &body.private_read_receipt { - let count = services - .rooms - .timeline - .get_pdu_count(event) - .await - .map_err(|_| err!(Request(NotFound("Event not found."))))?; - - let PduCount::Normal(count) = count else { - return Err!(Request(InvalidParam( - "Event is a backfilled PDU and cannot be marked as read." - ))); - }; - - services - .rooms - .read_receipt - .private_read_set(&body.room_id, sender_user, count); - } - - Ok(set_read_marker::v3::Response {}) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` -/// -/// Sets private read marker and public read receipt EDU. -pub(crate) async fn create_receipt_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if matches!( - &body.receipt_type, - create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate - ) { - services - .rooms - .user - .reset_notification_counts(sender_user, &body.room_id); - } - - // ping presence - if services.config.allow_local_presence { - services - .presence - .ping_presence(sender_user, &ruma::presence::PresenceState::Online) - .await?; - } - - match body.receipt_type { - | create_receipt::v3::ReceiptType::FullyRead => { - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: body.event_id.clone(), - }, - }; - services - .account_data - .update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), - ) - .await?; - }, - | create_receipt::v3::ReceiptType::Read => { - let receipt_content = BTreeMap::from_iter([( - body.event_id.clone(), - BTreeMap::from_iter([( - ReceiptType::Read, - BTreeMap::from_iter([( - sender_user.to_owned(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - }, - )]), - )]), - )]); - - services - .rooms - .read_receipt - .readreceipt_update( - sender_user, - &body.room_id, - &ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - ) - .await; - }, - | create_receipt::v3::ReceiptType::ReadPrivate => { - let count = services - .rooms - .timeline - .get_pdu_count(&body.event_id) - .await - .map_err(|_| err!(Request(NotFound("Event not found."))))?; - - let PduCount::Normal(count) = count else { - return Err!(Request(InvalidParam( - "Event is a backfilled PDU and cannot be marked as read." - ))); - }; - - services - .rooms - .read_receipt - .private_read_set(&body.room_id, sender_user, count); - }, - | _ => { - return Err!(Request(InvalidParam(warn!( - "Received unknown read receipt type: {}", - &body.receipt_type - )))); - }, - } - - Ok(create_receipt::v3::Response {}) -} diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs deleted file mode 100644 index 8dbe47a6..00000000 --- a/src/api/client/redact.rs +++ /dev/null @@ -1,43 +0,0 @@ -use axum::extract::State; -use conduwuit::{Result, matrix::pdu::PduBuilder}; -use ruma::{ - api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, -}; - -use crate::Ruma; - -/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` -/// -/// Tries to send a redaction event into the room. -/// -/// - TODO: Handle txn id -pub(crate) async fn redact_event_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let event_id = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - redacts: Some(body.event_id.clone()), - ..PduBuilder::timeline(&RoomRedactionEventContent { - redacts: Some(body.event_id.clone()), - reason: body.reason.clone(), - }) - }, - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(redact_event::v3::Response { event_id }) -} diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs deleted file mode 100644 index b8c2dd4d..00000000 --- a/src/api/client/relations.rs +++ /dev/null @@ -1,188 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Result, at, - matrix::pdu::PduCount, - utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, -}; -use conduwuit_service::{Services, rooms::timeline::PdusIterItem}; -use futures::StreamExt; -use ruma::{ - EventId, RoomId, UInt, UserId, - api::{ - Direction, - client::relations::{ - get_relating_events, get_relating_events_with_rel_type, - get_relating_events_with_rel_type_and_event_type, - }, - }, - events::{TimelineEventType, relation::RelationType}, -}; - -use crate::Ruma; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` -pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( - State(services): State, - body: Ruma, -) -> Result { - paginate_relations_with_filter( - &services, - body.sender_user(), - &body.room_id, - &body.event_id, - body.event_type.clone().into(), - body.rel_type.clone().into(), - body.from.as_deref(), - body.to.as_deref(), - body.limit, - body.recurse, - body.dir, - ) - .await - .map(|res| get_relating_events_with_rel_type_and_event_type::v1::Response { - chunk: res.chunk, - next_batch: res.next_batch, - prev_batch: res.prev_batch, - recursion_depth: res.recursion_depth, - }) -} - -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` -pub(crate) async fn get_relating_events_with_rel_type_route( - State(services): State, - body: Ruma, -) -> Result { - paginate_relations_with_filter( - &services, - body.sender_user(), - &body.room_id, - &body.event_id, - None, - body.rel_type.clone().into(), - body.from.as_deref(), - body.to.as_deref(), - body.limit, - body.recurse, - body.dir, - ) - .await - .map(|res| get_relating_events_with_rel_type::v1::Response { - chunk: res.chunk, - next_batch: res.next_batch, - prev_batch: res.prev_batch, - recursion_depth: res.recursion_depth, - }) -} - -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` -pub(crate) async fn get_relating_events_route( - State(services): State, - body: Ruma, -) -> Result { - paginate_relations_with_filter( - &services, - body.sender_user(), - &body.room_id, - &body.event_id, - None, - None, - body.from.as_deref(), - body.to.as_deref(), - body.limit, - body.recurse, - body.dir, - ) - .await -} - -#[allow(clippy::too_many_arguments)] -async fn paginate_relations_with_filter( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - target: &EventId, - filter_event_type: Option, - filter_rel_type: Option, - from: Option<&str>, - to: Option<&str>, - limit: Option, - recurse: bool, - dir: Direction, -) -> Result { - let start: PduCount = from - .map(str::parse) - .transpose()? - .unwrap_or_else(|| match dir { - | Direction::Forward => PduCount::min(), - | Direction::Backward => PduCount::max(), - }); - - let to: Option = to.map(str::parse).flat_ok(); - - // Use limit or else 30, with maximum 100 - let limit: usize = limit - .map(TryInto::try_into) - .flat_ok() - .unwrap_or(30) - .min(100); - - // Spec (v1.10) recommends depth of at least 3 - let depth: u8 = if recurse { 3 } else { 1 }; - - let events: Vec = services - .rooms - .pdu_metadata - .get_relations(sender_user, room_id, target, start, limit, depth, dir) - .await - .into_iter() - .filter(|(_, pdu)| { - filter_event_type - .as_ref() - .is_none_or(|kind| *kind == pdu.kind) - }) - .filter(|(_, pdu)| { - filter_rel_type - .as_ref() - .is_none_or(|rel_type| pdu.relation_type_equal(rel_type)) - }) - .stream() - .ready_take_while(|(count, _)| Some(*count) != to) - .wide_filter_map(|item| visibility_filter(services, sender_user, item)) - .take(limit) - .collect() - .await; - - let next_batch = match dir { - | Direction::Forward => events.last(), - | Direction::Backward => events.first(), - } - .map(at!(0)) - .as_ref() - .map(ToString::to_string); - - Ok(get_relating_events::v1::Response { - next_batch, - prev_batch: from.map(Into::into), - recursion_depth: recurse.then_some(depth.into()), - chunk: events - .into_iter() - .map(at!(1)) - .map(|pdu| pdu.to_message_like_event()) - .collect(), - }) -} - -async fn visibility_filter( - services: &Services, - sender_user: &UserId, - item: PdusIterItem, -) -> Option { - let (_, pdu) = &item; - - services - .rooms - .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) - .await - .then_some(item) -} diff --git a/src/api/client/report.rs b/src/api/client/report.rs deleted file mode 100644 index 4ee8ebe5..00000000 --- a/src/api/client/report.rs +++ /dev/null @@ -1,200 +0,0 @@ -use std::time::Duration; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; -use conduwuit_service::Services; -use rand::Rng; -use ruma::{ - EventId, RoomId, UserId, - api::client::{ - error::ErrorKind, - room::{report_content, report_room}, - }, - events::room::message, - int, -}; -use tokio::time::sleep; - -use crate::Ruma; - -/// # `POST /_matrix/client/v3/rooms/{roomId}/report` -/// -/// Reports an abusive room to homeserver admins -#[tracing::instrument(skip_all, fields(%client), name = "report_room")] -pub(crate) async fn report_room_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - // user authentication - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - info!( - "Received room report by user {sender_user} for room {} with reason: \"{}\"", - body.room_id, - body.reason.as_deref().unwrap_or("") - ); - - if body.reason.as_ref().is_some_and(|s| s.len() > 750) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Reason too long, should be 750 characters or fewer", - )); - } - - delay_response().await; - - if !services - .rooms - .state_cache - .server_in_room(&services.server.name, &body.room_id) - .await - { - return Err!(Request(NotFound( - "Room does not exist to us, no local users have joined at all" - ))); - } - - // send admin room message that we received the report with an @room ping for - // urgency - services - .admin - .send_message(message::RoomMessageEventContent::text_markdown(format!( - "@room Room report received from {} -\n\nRoom ID: {}\n\nReport Reason: {}", - sender_user.to_owned(), - body.room_id, - body.reason.as_deref().unwrap_or("") - ))) - .await - .ok(); - - Ok(report_room::v3::Response {}) -} - -/// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}` -/// -/// Reports an inappropriate event to homeserver admins -#[tracing::instrument(skip_all, fields(%client), name = "report_event")] -pub(crate) async fn report_event_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - // user authentication - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - info!( - "Received event report by user {sender_user} for room {} and event ID {}, with reason: \ - \"{}\"", - body.room_id, - body.event_id, - body.reason.as_deref().unwrap_or("") - ); - - delay_response().await; - - // check if we know about the reported event ID or if it's invalid - let Ok(pdu) = services.rooms.timeline.get_pdu(&body.event_id).await else { - return Err!(Request(NotFound("Event ID is not known to us or Event ID is invalid"))); - }; - - is_event_report_valid( - &services, - &pdu.event_id, - &body.room_id, - sender_user, - body.reason.as_ref(), - body.score, - &pdu, - ) - .await?; - - // send admin room message that we received the report with an @room ping for - // urgency - services - .admin - .send_message(message::RoomMessageEventContent::text_markdown(format!( - "@room Event report received from {} -\n\nEvent ID: {}\nRoom ID: {}\nSent By: \ - {}\n\nReport Score: {}\nReport Reason: {}", - sender_user.to_owned(), - pdu.event_id, - pdu.room_id, - pdu.sender, - body.score.unwrap_or_else(|| ruma::Int::from(0)), - body.reason.as_deref().unwrap_or("") - ))) - .await - .ok(); - - Ok(report_content::v3::Response {}) -} - -/// in the following order: -/// -/// check if the room ID from the URI matches the PDU's room ID -/// check if score is in valid range -/// check if report reasoning is less than or equal to 750 characters -/// check if reporting user is in the reporting room -async fn is_event_report_valid( - services: &Services, - event_id: &EventId, - room_id: &RoomId, - sender_user: &UserId, - reason: Option<&String>, - score: Option, - pdu: &PduEvent, -) -> Result<()> { - debug_info!( - "Checking if report from user {sender_user} for event {event_id} in room {room_id} is \ - valid" - ); - - if room_id != pdu.room_id { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Event ID does not belong to the reported room", - )); - } - - if score.is_some_and(|s| s > int!(0) || s < int!(-100)) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid score, must be within 0 to -100", - )); - } - - if reason.as_ref().is_some_and(|s| s.len() > 750) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Reason too long, should be 750 characters or fewer", - )); - } - - if !services - .rooms - .state_cache - .room_members(room_id) - .ready_any(|user_id| user_id == sender_user) - .await - { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "You are not in the room you are reporting.", - )); - } - - Ok(()) -} - -/// even though this is kinda security by obscurity, let's still make a small -/// random delay sending a response per spec suggestion regarding -/// enumerating for potential events existing in our server. -async fn delay_response() { - let time_to_wait = rand::thread_rng().gen_range(2..5); - debug_info!( - "Got successful /report request, waiting {time_to_wait} seconds before sending \ - successful response." - ); - sleep(Duration::from_secs(time_to_wait)).await; -} diff --git a/src/api/client/room/aliases.rs b/src/api/client/room/aliases.rs deleted file mode 100644 index 3f0016af..00000000 --- a/src/api/client/room/aliases.rs +++ /dev/null @@ -1,41 +0,0 @@ -use axum::extract::State; -use conduwuit::{Error, Result}; -use futures::StreamExt; -use ruma::api::client::{error::ErrorKind, room::aliases}; - -use crate::Ruma; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` -/// -/// Lists all aliases of the room. -/// -/// - Only users joined to the room are allowed to call this, or if -/// `history_visibility` is world readable in the room -pub(crate) async fn get_room_aliases_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); - } - - Ok(aliases::v3::Response { - aliases: services - .rooms - .alias - .local_aliases_for_room(&body.room_id) - .map(ToOwned::to_owned) - .collect() - .await, - }) -} diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs deleted file mode 100644 index be3fd23b..00000000 --- a/src/api/client/room/create.rs +++ /dev/null @@ -1,646 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{ - Err, Error, Result, debug_info, debug_warn, err, error, info, - matrix::{StateKey, pdu::PduBuilder}, - warn, -}; -use conduwuit_service::{Services, appservice::RegistrationInfo}; -use futures::FutureExt; -use ruma::{ - CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, - api::client::{ - error::ErrorKind, - room::{self, create_room}, - }, - events::{ - TimelineEventType, - room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - topic::RoomTopicEventContent, - }, - }, - int, - serde::{JsonObject, Raw}, -}; -use serde_json::{json, value::to_raw_value}; - -use crate::{Ruma, client::invite_helper}; - -/// # `POST /_matrix/client/v3/createRoom` -/// -/// Creates a new room. -/// -/// - Room ID is randomly generated -/// - Create alias if `room_alias_name` is set -/// - Send create event -/// - Join sender user -/// - Send power levels event -/// - Send canonical room alias -/// - Send join rules -/// - Send history visibility -/// - Send guest access -/// - Send events listed in initial state -/// - Send events implied by `name` and `topic` -/// - Send invite events -#[allow(clippy::large_stack_frames)] -pub(crate) async fn create_room_route( - State(services): State, - body: Ruma, -) -> Result { - use create_room::v3::RoomPreset; - - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !services.globals.allow_room_creation() - && body.appservice_info.is_none() - && !services.users.is_admin(sender_user).await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Room creation has been disabled.", - )); - } - - let room_id: OwnedRoomId = match &body.room_id { - | Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?, - | _ => RoomId::new(&services.server.name), - }; - - // check if room ID doesn't already exist instead of erroring on auth check - if services.rooms.short.get_shortroomid(&room_id).await.is_ok() { - return Err(Error::BadRequest( - ErrorKind::RoomInUse, - "Room with that custom room ID already exists", - )); - } - - if body.visibility == room::Visibility::Public - && services.server.config.lockdown_public_room_directory - && !services.users.is_admin(sender_user).await - && body.appservice_info.is_none() - { - info!( - "Non-admin user {sender_user} tried to publish {0} to the room directory while \ - \"lockdown_public_room_directory\" is enabled", - &room_id - ); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "Non-admin user {sender_user} tried to publish {0} to the room directory \ - while \"lockdown_public_room_directory\" is enabled", - &room_id - )) - .await; - } - - return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed"))); - } - let _short_id = services - .rooms - .short - .get_or_create_shortroomid(&room_id) - .await; - let state_lock = services.rooms.state.mutex.lock(&room_id).await; - - let alias: Option = match body.room_alias_name.as_ref() { - | Some(alias) => - Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?), - | _ => None, - }; - - let room_version = match body.room_version.clone() { - | Some(room_version) => - if services.server.supported_room_version(&room_version) { - room_version - } else { - return Err(Error::BadRequest( - ErrorKind::UnsupportedRoomVersion, - "This server does not support that room version.", - )); - }, - | None => services.server.config.default_room_version.clone(), - }; - - let create_content = match &body.creation_content { - | Some(content) => { - use RoomVersionId::*; - - let mut content = content - .deserialize_as::() - .map_err(|e| { - error!("Failed to deserialise content as canonical JSON: {}", e); - Error::bad_database("Failed to deserialise content as canonical JSON.") - })?; - match room_version { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - content.insert( - "creator".into(), - json!(&sender_user).try_into().map_err(|e| { - info!("Invalid creation content: {e}"); - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") - })?, - ); - }, - | _ => { - // V11+ removed the "creator" key - }, - } - content.insert( - "room_version".into(), - json!(room_version.as_str()).try_into().map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") - })?, - ); - content - }, - | None => { - use RoomVersionId::*; - - let content = match room_version { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => - RoomCreateEventContent::new_v1(sender_user.clone()), - | _ => RoomCreateEventContent::new_v11(), - }; - let mut content = serde_json::from_str::( - to_raw_value(&content) - .expect("we just created this as content was None") - .get(), - ) - .unwrap(); - content.insert( - "room_version".into(), - json!(room_version.as_str()) - .try_into() - .expect("we just created this as content was None"), - ); - content - }, - }; - - // 1. The room create event - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&create_content) - .expect("create event content serialization"), - state_key: Some(StateKey::new()), - ..Default::default() - }, - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 2. Let the room creator join - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(sender_user.to_string(), &RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - is_direct: Some(body.is_direct), - ..RoomMemberEventContent::new(MembershipState::Join) - }), - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 3. Power levels - - // Figure out preset. We need it for preset specific events - let preset = body.preset.clone().unwrap_or(match &body.visibility { - | room::Visibility::Public => RoomPreset::PublicChat, - | _ => RoomPreset::PrivateChat, // Room visibility should not be custom - }); - - let mut users = BTreeMap::from_iter([(sender_user.clone(), int!(100))]); - - if preset == RoomPreset::TrustedPrivateChat { - for invite in &body.invite { - if services.users.user_is_ignored(sender_user, invite).await { - continue; - } else if services.users.user_is_ignored(invite, sender_user).await { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - continue; - } - - users.insert(invite.clone(), int!(100)); - } - } - - let power_levels_content = default_power_levels_content( - body.power_level_content_override.as_ref(), - &body.visibility, - users, - )?; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_content) - .expect("serialized power_levels event content"), - state_key: Some(StateKey::new()), - ..Default::default() - }, - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 4. Canonical room alias - if let Some(room_alias_id) = &alias { - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomCanonicalAliasEventContent { - alias: Some(room_alias_id.to_owned()), - alt_aliases: vec![], - }), - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - } - - // 5. Events set by preset - - // 5.1 Join Rules - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomJoinRulesEventContent::new(match preset { - | RoomPreset::PublicChat => JoinRule::Public, - // according to spec "invite" is the default - | _ => JoinRule::Invite, - }), - ), - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 5.2 History Visibility - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared), - ), - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 5.3 Guest Access - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomGuestAccessEventContent::new(match preset { - | RoomPreset::PublicChat => GuestAccess::Forbidden, - | _ => GuestAccess::CanJoin, - }), - ), - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 6. Events listed in initial_state - for event in &body.initial_state { - let mut pdu_builder = event.deserialize_as::().map_err(|e| { - warn!("Invalid initial state event: {:?}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") - })?; - - debug_info!("Room creation initial state event: {event:?}"); - - // client/appservice workaround: if a user sends an initial_state event with a - // state event in there with the content of literally `{}` (not null or empty - // string), let's just skip it over and warn. - if pdu_builder.content.get().eq("{}") { - info!("skipping empty initial state event with content of `{{}}`: {event:?}"); - debug_warn!("content: {}", pdu_builder.content.get()); - continue; - } - - // Implicit state key defaults to "" - pdu_builder.state_key.get_or_insert_with(StateKey::new); - - // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == TimelineEventType::RoomEncryption - && !services.config.allow_encryption - { - continue; - } - - services - .rooms - .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) - .boxed() - .await?; - } - - // 7. Events implied by name and topic - if let Some(name) = &body.name { - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())), - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - } - - if let Some(topic) = &body.topic { - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }), - sender_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - } - - // 8. Events implied by invite (and TODO: invite_3pid) - drop(state_lock); - for user_id in &body.invite { - if services.users.user_is_ignored(sender_user, user_id).await { - continue; - } else if services.users.user_is_ignored(user_id, sender_user).await { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - continue; - } - - if let Err(e) = - invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct) - .boxed() - .await - { - warn!(%e, "Failed to send invite"); - } - } - - // Homeserver specific stuff - if let Some(alias) = alias { - services - .rooms - .alias - .set_alias(&alias, &room_id, sender_user)?; - } - - if body.visibility == room::Visibility::Public { - services.rooms.directory.set_public(&room_id); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "{sender_user} made {} public to the room directory", - &room_id - )) - .await; - } - info!("{sender_user} made {0} public to the room directory", &room_id); - } - - info!("{sender_user} created a room with room ID {room_id}"); - - Ok(create_room::v3::Response::new(room_id)) -} - -/// creates the power_levels_content for the PDU builder -fn default_power_levels_content( - power_level_content_override: Option<&Raw>, - visibility: &room::Visibility, - users: BTreeMap, -) -> Result { - let mut power_levels_content = - serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() }) - .expect("event is valid, we just created it"); - - // secure proper defaults of sensitive/dangerous permissions that moderators - // (power level 50) should not have easy access to - power_levels_content["events"]["m.room.power_levels"] = - serde_json::to_value(100).expect("100 is valid Value"); - power_levels_content["events"]["m.room.server_acl"] = - serde_json::to_value(100).expect("100 is valid Value"); - power_levels_content["events"]["m.room.tombstone"] = - serde_json::to_value(100).expect("100 is valid Value"); - power_levels_content["events"]["m.room.encryption"] = - serde_json::to_value(100).expect("100 is valid Value"); - power_levels_content["events"]["m.room.history_visibility"] = - serde_json::to_value(100).expect("100 is valid Value"); - - // always allow users to respond (not post new) to polls. this is primarily - // useful in read-only announcement rooms that post a public poll. - power_levels_content["events"]["org.matrix.msc3381.poll.response"] = - serde_json::to_value(0).expect("0 is valid Value"); - power_levels_content["events"]["m.poll.response"] = - serde_json::to_value(0).expect("0 is valid Value"); - - // synapse does this too. clients do not expose these permissions. it prevents - // default users from calling public rooms, for obvious reasons. - if *visibility == room::Visibility::Public { - power_levels_content["events"]["m.call.invite"] = - serde_json::to_value(50).expect("50 is valid Value"); - power_levels_content["events"]["m.call"] = - serde_json::to_value(50).expect("50 is valid Value"); - power_levels_content["events"]["m.call.member"] = - serde_json::to_value(50).expect("50 is valid Value"); - power_levels_content["events"]["org.matrix.msc3401.call"] = - serde_json::to_value(50).expect("50 is valid Value"); - power_levels_content["events"]["org.matrix.msc3401.call.member"] = - serde_json::to_value(50).expect("50 is valid Value"); - } - - if let Some(power_level_content_override) = power_level_content_override { - let json: JsonObject = serde_json::from_str(power_level_content_override.json().get()) - .map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") - })?; - - for (key, value) in json { - power_levels_content[key] = value; - } - } - - Ok(power_levels_content) -} - -/// if a room is being created with a room alias, run our checks -async fn room_alias_check( - services: &Services, - room_alias_name: &str, - appservice_info: Option<&RegistrationInfo>, -) -> Result { - // Basic checks on the room alias validity - if room_alias_name.contains(':') { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room alias contained `:` which is not allowed. Please note that this expects a \ - localpart, not the full room alias.", - )); - } else if room_alias_name.contains(char::is_whitespace) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room alias contained spaces which is not a valid room alias.", - )); - } - - // check if room alias is forbidden - if services - .globals - .forbidden_alias_names() - .is_match(room_alias_name) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Room alias name is forbidden.")); - } - - let server_name = services.globals.server_name(); - let full_room_alias = OwnedRoomAliasId::parse(format!("#{room_alias_name}:{server_name}")) - .map_err(|e| { - err!(Request(InvalidParam(debug_error!( - ?e, - ?room_alias_name, - "Failed to parse room alias.", - )))) - })?; - - if services - .rooms - .alias - .resolve_local_alias(&full_room_alias) - .await - .is_ok() - { - return Err(Error::BadRequest(ErrorKind::RoomInUse, "Room alias already exists.")); - } - - if let Some(info) = appservice_info { - if !info.aliases.is_match(full_room_alias.as_str()) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "Room alias is not in namespace.", - )); - } - } else if services - .appservice - .is_exclusive_alias(&full_room_alias) - .await - { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "Room alias reserved by appservice.", - )); - } - - debug_info!("Full room alias: {full_room_alias}"); - - Ok(full_room_alias) -} - -/// if a room is being created with a custom room ID, run our checks against it -fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result { - // apply forbidden room alias checks to custom room IDs too - if services - .globals - .forbidden_alias_names() - .is_match(custom_room_id) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Custom room ID is forbidden.")); - } - - let server_name = services.globals.server_name(); - let mut room_id = custom_room_id.to_owned(); - if custom_room_id.contains(':') { - if !custom_room_id.starts_with('!') { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Custom room ID contains an unexpected `:` which is not allowed.", - )); - } - } else if custom_room_id.starts_with('!') { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room ID is prefixed with !, but is not fully qualified. You likely did not want \ - this.", - )); - } else { - room_id = format!("!{custom_room_id}:{server_name}"); - } - OwnedRoomId::parse(room_id) - .map_err(Into::into) - .and_then(|full_room_id| { - if full_room_id - .server_name() - .expect("failed to extract server name from room ID") - != server_name - { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Custom room ID must be on this server.", - )) - } else { - Ok(full_room_id) - } - }) - .inspect(|full_room_id| { - debug_info!(?full_room_id, "Full custom room ID"); - }) - .inspect_err(|e| warn!(?e, ?custom_room_id, "Failed to create room with custom room ID",)) -} diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs deleted file mode 100644 index 2b115b5c..00000000 --- a/src/api/client/room/event.rs +++ /dev/null @@ -1,44 +0,0 @@ -use axum::extract::State; -use conduwuit::{Err, Event, Result, err}; -use futures::{FutureExt, TryFutureExt, future::try_join}; -use ruma::api::client::room::get_room_event; - -use crate::{Ruma, client::is_ignored_pdu}; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` -/// -/// Gets a single event. -pub(crate) async fn get_room_event_route( - State(ref services): State, - ref body: Ruma, -) -> Result { - let event_id = &body.event_id; - let room_id = &body.room_id; - - let event = services - .rooms - .timeline - .get_pdu(event_id) - .map_err(|_| err!(Request(NotFound("Event {} not found.", event_id)))); - - let visible = services - .rooms - .state_accessor - .user_can_see_event(body.sender_user(), room_id, event_id) - .map(Ok); - - let (mut event, visible) = try_join(event, visible).await?; - - if !visible || is_ignored_pdu(services, &event, body.sender_user()).await { - return Err!(Request(Forbidden("You don't have permission to view this event."))); - } - - debug_assert!( - event.event_id() == event_id && event.room_id() == room_id, - "Fetched PDU must match requested" - ); - - event.add_age().ok(); - - Ok(get_room_event::v3::Response { event: event.into_room_event() }) -} diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs deleted file mode 100644 index ca63610b..00000000 --- a/src/api/client/room/initial_sync.rs +++ /dev/null @@ -1,74 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Err, PduEvent, Result, at, - utils::{BoolExt, stream::TryTools}, -}; -use futures::TryStreamExt; -use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; - -use crate::Ruma; - -const LIMIT_MAX: usize = 100; - -pub(crate) async fn room_initial_sync_route( - State(services): State, - body: Ruma, -) -> Result { - let room_id = &body.room_id; - - if !services - .rooms - .state_accessor - .user_can_see_state_events(body.sender_user(), room_id) - .await - { - return Err!(Request(Forbidden("No room preview available."))); - } - - let limit = LIMIT_MAX; - let events: Vec<_> = services - .rooms - .timeline - .pdus_rev(None, room_id, None) - .try_take(limit) - .try_collect() - .await?; - - let state: Vec<_> = services - .rooms - .state_accessor - .room_state_full_pdus(room_id) - .map_ok(PduEvent::into_state_event) - .try_collect() - .await?; - - let messages = PaginationChunk { - start: events.last().map(at!(0)).as_ref().map(ToString::to_string), - - end: events - .first() - .map(at!(0)) - .as_ref() - .map(ToString::to_string) - .unwrap_or_default(), - - chunk: events - .into_iter() - .map(at!(1)) - .map(PduEvent::into_room_event) - .collect(), - }; - - Ok(Response { - room_id: room_id.to_owned(), - account_data: None, - state: state.into(), - messages: messages.chunk.is_empty().or_some(messages), - visibility: services.rooms.directory.visibility(room_id).await.into(), - membership: services - .rooms - .state_cache - .user_membership(body.sender_user(), room_id) - .await, - }) -} diff --git a/src/api/client/room/mod.rs b/src/api/client/room/mod.rs deleted file mode 100644 index 86d68f7e..00000000 --- a/src/api/client/room/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -mod aliases; -mod create; -mod event; -mod initial_sync; -mod summary; -mod upgrade; - -pub(crate) use self::{ - aliases::get_room_aliases_route, - create::create_room_route, - event::get_room_event_route, - initial_sync::room_initial_sync_route, - summary::{get_room_summary, get_room_summary_legacy}, - upgrade::upgrade_room_route, -}; diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs deleted file mode 100644 index 67d2e2ad..00000000 --- a/src/api/client/room/summary.rs +++ /dev/null @@ -1,330 +0,0 @@ -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Result, debug_warn, trace, - utils::{IterStream, future::TryExtExt}, -}; -use futures::{ - FutureExt, StreamExt, - future::{OptionFuture, join3}, - stream::FuturesUnordered, -}; -use ruma::{ - OwnedServerName, RoomId, UserId, - api::{ - client::room::get_summary, - federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, - }, - events::room::member::MembershipState, - space::SpaceRoomJoinRule::{self, *}, -}; -use service::Services; - -use crate::{Ruma, RumaResponse}; - -/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` -/// -/// Returns a short description of the state of a room. -/// -/// This is the "wrong" endpoint that some implementations/clients may use -/// according to the MSC. Request and response bodies are the same as -/// `get_room_summary`. -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -pub(crate) async fn get_room_summary_legacy( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_room_summary(State(services), InsecureClientIp(client), body) - .boxed() - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` -/// -/// Returns a short description of the state of a room. -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] -pub(crate) async fn get_room_summary( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let (room_id, servers) = services - .rooms - .alias - .resolve_with_servers(&body.room_id_or_alias, Some(body.via.clone())) - .await?; - - if services.rooms.metadata.is_banned(&room_id).await { - return Err!(Request(Forbidden("This room is banned on this homeserver."))); - } - - room_summary_response(&services, &room_id, &servers, body.sender_user.as_deref()) - .boxed() - .await -} - -async fn room_summary_response( - services: &Services, - room_id: &RoomId, - servers: &[OwnedServerName], - sender_user: Option<&UserId>, -) -> Result { - if services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .await - { - return local_room_summary_response(services, room_id, sender_user) - .boxed() - .await; - } - - let room = - remote_room_summary_hierarchy_response(services, room_id, servers, sender_user).await?; - - Ok(get_summary::msc3266::Response { - room_id: room_id.to_owned(), - canonical_alias: room.canonical_alias, - avatar_url: room.avatar_url, - guest_can_join: room.guest_can_join, - name: room.name, - num_joined_members: room.num_joined_members, - topic: room.topic, - world_readable: room.world_readable, - join_rule: room.join_rule, - room_type: room.room_type, - room_version: room.room_version, - encryption: room.encryption, - allowed_room_ids: room.allowed_room_ids, - membership: sender_user.is_some().then_some(MembershipState::Leave), - }) -} - -async fn local_room_summary_response( - services: &Services, - room_id: &RoomId, - sender_user: Option<&UserId>, -) -> Result { - trace!(?sender_user, "Sending local room summary response for {room_id:?}"); - let join_rule = services.rooms.state_accessor.get_join_rules(room_id); - let world_readable = services.rooms.state_accessor.is_world_readable(room_id); - let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); - - let (join_rule, world_readable, guest_can_join) = - join3(join_rule, world_readable, guest_can_join).await; - trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}"); - - user_can_see_summary( - services, - room_id, - &join_rule.clone().into(), - guest_can_join, - world_readable, - join_rule.allowed_rooms(), - sender_user, - ) - .await?; - - let canonical_alias = services - .rooms - .state_accessor - .get_canonical_alias(room_id) - .ok(); - - let name = services.rooms.state_accessor.get_name(room_id).ok(); - - let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); - - let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); - - let avatar_url = services - .rooms - .state_accessor - .get_avatar(room_id) - .map(|res| res.into_option().unwrap_or_default().url); - - let room_version = services.rooms.state.get_room_version(room_id).ok(); - - let encryption = services - .rooms - .state_accessor - .get_room_encryption(room_id) - .ok(); - - let num_joined_members = services - .rooms - .state_cache - .room_joined_count(room_id) - .unwrap_or(0); - - let membership: OptionFuture<_> = sender_user - .map(|sender_user| { - services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .map_ok_or(MembershipState::Leave, |content| content.membership) - }) - .into(); - - let ( - canonical_alias, - name, - num_joined_members, - topic, - avatar_url, - room_type, - room_version, - encryption, - membership, - ) = futures::join!( - canonical_alias, - name, - num_joined_members, - topic, - avatar_url, - room_type, - room_version, - encryption, - membership, - ); - - Ok(get_summary::msc3266::Response { - room_id: room_id.to_owned(), - canonical_alias, - avatar_url, - guest_can_join, - name, - num_joined_members: num_joined_members.try_into().unwrap_or_default(), - topic, - world_readable, - room_type, - room_version, - encryption, - membership, - allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), - join_rule: join_rule.into(), - }) -} - -/// used by MSC3266 to fetch a room's info if we do not know about it -async fn remote_room_summary_hierarchy_response( - services: &Services, - room_id: &RoomId, - servers: &[OwnedServerName], - sender_user: Option<&UserId>, -) -> Result { - trace!(?sender_user, ?servers, "Sending remote room summary response for {room_id:?}"); - if !services.config.allow_federation { - return Err!(Request(Forbidden("Federation is disabled."))); - } - - if services.rooms.metadata.is_disabled(room_id).await { - return Err!(Request(Forbidden( - "Federaton of room {room_id} is currently disabled on this server." - ))); - } - - let request = get_hierarchy::v1::Request::new(room_id.to_owned()); - - let mut requests: FuturesUnordered<_> = servers - .iter() - .map(|server| { - services - .sending - .send_federation_request(server, request.clone()) - }) - .collect(); - - while let Some(Ok(response)) = requests.next().await { - trace!("{response:?}"); - let room = response.room.clone(); - if room.room_id != room_id { - debug_warn!( - "Room ID {} returned does not belong to the requested room ID {}", - room.room_id, - room_id - ); - continue; - } - - return user_can_see_summary( - services, - room_id, - &room.join_rule, - room.guest_can_join, - room.world_readable, - room.allowed_room_ids.iter().map(AsRef::as_ref), - sender_user, - ) - .await - .map(|()| room); - } - - Err!(Request(NotFound( - "Room is unknown to this server and was unable to fetch over federation with the \ - provided servers available" - ))) -} - -async fn user_can_see_summary<'a, I>( - services: &Services, - room_id: &RoomId, - join_rule: &SpaceRoomJoinRule, - guest_can_join: bool, - world_readable: bool, - allowed_room_ids: I, - sender_user: Option<&UserId>, -) -> Result -where - I: Iterator + Send, -{ - let is_public_room = matches!(join_rule, Public | Knock | KnockRestricted); - match sender_user { - | Some(sender_user) => { - let user_can_see_state_events = services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, room_id); - let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); - let user_in_allowed_restricted_room = allowed_room_ids - .stream() - .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); - - let (user_can_see_state_events, is_guest, user_in_allowed_restricted_room) = - join3(user_can_see_state_events, is_guest, user_in_allowed_restricted_room) - .boxed() - .await; - - if user_can_see_state_events - || (is_guest && guest_can_join) - || is_public_room - || user_in_allowed_restricted_room - { - return Ok(()); - } - - Err!(Request(Forbidden( - "Room is not world readable, not publicly accessible/joinable, restricted room \ - conditions not met, and guest access is forbidden. Not allowed to see details \ - of this room." - ))) - }, - | None => { - if is_public_room || world_readable { - return Ok(()); - } - - Err!(Request(Forbidden( - "Room is not world readable or publicly accessible/joinable, authentication is \ - required" - ))) - }, - } -} diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs deleted file mode 100644 index 9ec0b3bb..00000000 --- a/src/api/client/room/upgrade.rs +++ /dev/null @@ -1,294 +0,0 @@ -use std::cmp::max; - -use axum::extract::State; -use conduwuit::{ - Error, Result, err, info, - matrix::{StateKey, pdu::PduBuilder}, -}; -use futures::StreamExt; -use ruma::{ - CanonicalJsonObject, RoomId, RoomVersionId, - api::client::{error::ErrorKind, room::upgrade_room}, - events::{ - StateEventType, TimelineEventType, - room::{ - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - tombstone::RoomTombstoneEventContent, - }, - }, - int, -}; -use serde_json::{json, value::to_raw_value}; - -use crate::Ruma; - -/// Recommended transferable state events list from the spec -const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 9] = &[ - StateEventType::RoomAvatar, - StateEventType::RoomEncryption, - StateEventType::RoomGuestAccess, - StateEventType::RoomHistoryVisibility, - StateEventType::RoomJoinRules, - StateEventType::RoomName, - StateEventType::RoomPowerLevels, - StateEventType::RoomServerAcl, - StateEventType::RoomTopic, -]; - -/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` -/// -/// Upgrades the room. -/// -/// - Creates a replacement room -/// - Sends a tombstone event into the current room -/// - Sender user joins the room -/// - Transfers some state events -/// - Moves local aliases -/// - Modifies old room power levels to prevent users from speaking -pub(crate) async fn upgrade_room_route( - State(services): State, - body: Ruma, -) -> Result { - debug_assert!( - TRANSFERABLE_STATE_EVENTS.is_sorted(), - "TRANSFERABLE_STATE_EVENTS is not sorted" - ); - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !services.server.supported_room_version(&body.new_version) { - return Err(Error::BadRequest( - ErrorKind::UnsupportedRoomVersion, - "This server does not support that room version.", - )); - } - - // Create a replacement room - let replacement_room = RoomId::new(services.globals.server_name()); - - let _short_id = services - .rooms - .short - .get_or_create_shortroomid(&replacement_room) - .await; - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - // Send a m.room.tombstone event to the old room to indicate that it is not - // intended to be used any further Fail if the sender does not have the required - // permissions - let tombstone_event_id = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent { - body: "This room has been replaced".to_owned(), - replacement_room: replacement_room.clone(), - }), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - // Change lock to replacement room - drop(state_lock); - let state_lock = services.rooms.state.mutex.lock(&replacement_room).await; - - // Get the old room creation event - let mut create_event_content: CanonicalJsonObject = services - .rooms - .state_accessor - .room_state_get_content(&body.room_id, &StateEventType::RoomCreate, "") - .await - .map_err(|_| err!(Database("Found room without m.room.create event.")))?; - - // Use the m.room.tombstone event as the predecessor - let predecessor = Some(ruma::events::room::create::PreviousRoom::new( - body.room_id.clone(), - Some(tombstone_event_id), - )); - - // Send a m.room.create event containing a predecessor field and the applicable - // room_version - { - use RoomVersionId::*; - match body.new_version { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - create_event_content.insert( - "creator".into(), - json!(&sender_user).try_into().map_err(|e| { - info!("Error forming creation event: {e}"); - Error::BadRequest(ErrorKind::BadJson, "Error forming creation event") - })?, - ); - }, - | _ => { - // "creator" key no longer exists in V11+ rooms - create_event_content.remove("creator"); - }, - } - } - - create_event_content.insert( - "room_version".into(), - json!(&body.new_version) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, - ); - create_event_content.insert( - "predecessor".into(), - json!(predecessor) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, - ); - - // Validate creation event content - if serde_json::from_str::( - to_raw_value(&create_event_content) - .expect("Error forming creation event") - .get(), - ) - .is_err() - { - return Err(Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")); - } - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&create_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(StateKey::new()), - redacts: None, - timestamp: None, - }, - sender_user, - &replacement_room, - &state_lock, - ) - .await?; - - // Join the new room - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - is_direct: None, - third_party_invite: None, - blurhash: services.users.blurhash(sender_user).await.ok(), - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.as_str().into()), - redacts: None, - timestamp: None, - }, - sender_user, - &replacement_room, - &state_lock, - ) - .await?; - - // Replicate transferable state events to the new room - for event_type in TRANSFERABLE_STATE_EVENTS { - let event_content = match services - .rooms - .state_accessor - .room_state_get(&body.room_id, event_type, "") - .await - { - | Ok(v) => v.content.clone(), - | Err(_) => continue, // Skipping missing events. - }; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: event_content, - state_key: Some(StateKey::new()), - ..Default::default() - }, - sender_user, - &replacement_room, - &state_lock, - ) - .await?; - } - - // Moves any local aliases to the new room - let mut local_aliases = services - .rooms - .alias - .local_aliases_for_room(&body.room_id) - .boxed(); - - while let Some(alias) = local_aliases.next().await { - services - .rooms - .alias - .remove_alias(alias, sender_user) - .await?; - - services - .rooms - .alias - .set_alias(alias, &replacement_room, sender_user)?; - } - - // Get the old room power levels - let power_levels_event_content: RoomPowerLevelsEventContent = services - .rooms - .state_accessor - .room_state_get_content(&body.room_id, &StateEventType::RoomPowerLevels, "") - .await - .map_err(|_| err!(Database("Found room without m.room.power_levels event.")))?; - - // Setting events_default and invite to the greater of 50 and users_default + 1 - let new_level = max( - int!(50), - power_levels_event_content - .users_default - .checked_add(int!(1)) - .ok_or_else(|| { - err!(Request(BadJson("users_default power levels event content is not valid"))) - })?, - ); - - // Modify the power levels in the old room to prevent sending of events and - // inviting new users - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(StateKey::new(), &RoomPowerLevelsEventContent { - events_default: new_level, - invite: new_level, - ..power_levels_event_content - }), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - // Return the replacement room id - Ok(upgrade_room::v3::Response { replacement_room }) -} diff --git a/src/api/client/search.rs b/src/api/client/search.rs deleted file mode 100644 index d4dcde57..00000000 --- a/src/api/client/search.rs +++ /dev/null @@ -1,219 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{ - Err, Result, at, is_true, - matrix::pdu::PduEvent, - result::FlatOk, - utils::{IterStream, stream::ReadyExt}, -}; -use conduwuit_service::{Services, rooms::search::RoomQuery}; -use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; -use ruma::{ - OwnedRoomId, RoomId, UInt, UserId, - api::client::search::search_events::{ - self, - v3::{Criteria, EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, - }, - events::AnyStateEvent, - serde::Raw, -}; -use search_events::v3::{Request, Response}; - -use crate::Ruma; - -type RoomStates = BTreeMap; -type RoomState = Vec>; - -const LIMIT_DEFAULT: usize = 10; -const LIMIT_MAX: usize = 100; -const BATCH_MAX: usize = 20; - -/// # `POST /_matrix/client/r0/search` -/// -/// Searches rooms for messages. -/// -/// - Only works if the user is currently joined to the room (TODO: Respect -/// history visibility) -pub(crate) async fn search_events_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let next_batch = body.next_batch.as_deref(); - let room_events_result: OptionFuture<_> = body - .search_categories - .room_events - .as_ref() - .map(|criteria| category_room_events(&services, sender_user, next_batch, criteria)) - .into(); - - Ok(Response { - search_categories: ResultCategories { - room_events: room_events_result - .await - .unwrap_or_else(|| Ok(ResultRoomEvents::default()))?, - }, - }) -} - -#[allow(clippy::map_unwrap_or)] -async fn category_room_events( - services: &Services, - sender_user: &UserId, - next_batch: Option<&str>, - criteria: &Criteria, -) -> Result { - let filter = &criteria.filter; - - let limit: usize = filter - .limit - .map(TryInto::try_into) - .flat_ok() - .unwrap_or(LIMIT_DEFAULT) - .min(LIMIT_MAX); - - let next_batch: usize = next_batch - .map(str::parse) - .transpose()? - .unwrap_or(0) - .min(limit.saturating_mul(BATCH_MAX)); - - let rooms = filter - .rooms - .clone() - .map(IntoIterator::into_iter) - .map(IterStream::stream) - .map(StreamExt::boxed) - .unwrap_or_else(|| { - services - .rooms - .state_cache - .rooms_joined(sender_user) - .map(ToOwned::to_owned) - .boxed() - }); - - let results: Vec<_> = rooms - .filter_map(|room_id| async move { - check_room_visible(services, sender_user, &room_id, criteria) - .await - .is_ok() - .then_some(room_id) - }) - .filter_map(|room_id| async move { - let query = RoomQuery { - room_id: &room_id, - user_id: Some(sender_user), - criteria, - skip: next_batch, - limit, - }; - - let (count, results) = services.rooms.search.search_pdus(&query).await.ok()?; - - results - .collect::>() - .map(|results| (room_id.clone(), count, results)) - .map(Some) - .await - }) - .collect() - .await; - - let total: UInt = results - .iter() - .fold(0, |a: usize, (_, count, _)| a.saturating_add(*count)) - .try_into()?; - - let state: RoomStates = results - .iter() - .stream() - .ready_filter(|_| criteria.include_state.is_some_and(is_true!())) - .filter_map(|(room_id, ..)| async move { - procure_room_state(services, room_id) - .map_ok(|state| (room_id.clone(), state)) - .await - .ok() - }) - .collect() - .await; - - let results: Vec = results - .into_iter() - .map(at!(2)) - .flatten() - .stream() - .map(PduEvent::into_room_event) - .map(|result| SearchResult { - rank: None, - result: Some(result), - context: EventContextResult { - profile_info: BTreeMap::new(), //TODO - events_after: Vec::new(), //TODO - events_before: Vec::new(), //TODO - start: None, //TODO - end: None, //TODO - }, - }) - .collect() - .await; - - let highlights = criteria - .search_term - .split_terminator(|c: char| !c.is_alphanumeric()) - .map(str::to_lowercase) - .collect(); - - let next_batch = (results.len() >= limit) - .then_some(next_batch.saturating_add(results.len())) - .as_ref() - .map(ToString::to_string); - - Ok(ResultRoomEvents { - count: Some(total), - next_batch, - results, - state, - highlights, - groups: BTreeMap::new(), // TODO - }) -} - -async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result { - let state = services - .rooms - .state_accessor - .room_state_full_pdus(room_id) - .map_ok(PduEvent::into_state_event) - .try_collect() - .await?; - - Ok(state) -} - -async fn check_room_visible( - services: &Services, - user_id: &UserId, - room_id: &RoomId, - search: &Criteria, -) -> Result { - let check_visible = search.filter.rooms.is_some(); - let check_state = check_visible && search.include_state.is_some_and(is_true!()); - - let is_joined = - !check_visible || services.rooms.state_cache.is_joined(user_id, room_id).await; - - let state_visible = !check_state - || services - .rooms - .state_accessor - .user_can_see_state_events(user_id, room_id) - .await; - - if !is_joined || !state_visible { - return Err!(Request(Forbidden("You don't have permission to view {room_id:?}"))); - } - - Ok(()) -} diff --git a/src/api/client/send.rs b/src/api/client/send.rs deleted file mode 100644 index f753fa65..00000000 --- a/src/api/client/send.rs +++ /dev/null @@ -1,95 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils}; -use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; -use serde_json::from_str; - -use crate::Ruma; - -/// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` -/// -/// Send a message event into the room. -/// -/// - Is a NOOP if the txn id was already used before and returns the same event -/// id again -/// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is -/// allowed -pub(crate) async fn send_message_event_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let sender_device = body.sender_device.as_deref(); - let appservice_info = body.appservice_info.as_ref(); - - // Forbid m.room.encrypted if encryption is disabled - if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption - { - return Err!(Request(Forbidden("Encryption has been disabled"))); - } - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - if body.event_type == MessageLikeEventType::CallInvite - && services.rooms.directory.is_public_room(&body.room_id).await - { - return Err!(Request(Forbidden("Room call invites are not allowed in public rooms"))); - } - - // Check if this is a new transaction id - if let Ok(response) = services - .transaction_ids - .existing_txnid(sender_user, sender_device, &body.txn_id) - .await - { - // The client might have sent a txnid of the /sendToDevice endpoint - // This txnid has no response associated with it - if response.is_empty() { - return Err!(Request(InvalidParam( - "Tried to use txn id already used for an incompatible endpoint." - ))); - } - - return Ok(send_message_event::v3::Response { - event_id: utils::string_from_bytes(&response) - .map(TryInto::try_into) - .map_err(|e| err!(Database("Invalid event_id in txnid data: {e:?}")))??, - }); - } - - let mut unsigned = BTreeMap::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - - let content = from_str(body.body.body.json().get()) - .map_err(|e| err!(Request(BadJson("Invalid JSON body: {e}"))))?; - - let event_id = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: body.event_type.clone().into(), - content, - unsigned: Some(unsigned), - timestamp: appservice_info.and(body.timestamp), - ..Default::default() - }, - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - services.transaction_ids.add_txnid( - sender_user, - sender_device, - &body.txn_id, - event_id.as_bytes(), - ); - - drop(state_lock); - - Ok(send_message_event::v3::Response { event_id }) -} diff --git a/src/api/client/session.rs b/src/api/client/session.rs deleted file mode 100644 index 2499a43d..00000000 --- a/src/api/client/session.rs +++ /dev/null @@ -1,377 +0,0 @@ -use std::time::Duration; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Error, Result, debug, err, info, utils, - utils::{ReadyExt, hash}, -}; -use conduwuit_service::uiaa::SESSION_ID_LENGTH; -use futures::StreamExt; -use ruma::{ - UserId, - api::client::{ - session::{ - get_login_token, - get_login_types::{ - self, - v3::{ApplicationServiceLoginType, PasswordLoginType, TokenLoginType}, - }, - login::{ - self, - v3::{DiscoveryInfo, HomeserverInfo}, - }, - logout, logout_all, - }, - uiaa, - }, -}; - -use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::Ruma; - -/// # `GET /_matrix/client/v3/login` -/// -/// Get the supported login types of this server. One of these should be used as -/// the `type` field when logging in. -#[tracing::instrument(skip_all, fields(%client), name = "login")] -pub(crate) async fn get_login_types_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - _body: Ruma, -) -> Result { - Ok(get_login_types::v3::Response::new(vec![ - get_login_types::v3::LoginType::Password(PasswordLoginType::default()), - get_login_types::v3::LoginType::ApplicationService(ApplicationServiceLoginType::default()), - get_login_types::v3::LoginType::Token(TokenLoginType { - get_login_token: services.server.config.login_via_existing_session, - }), - ])) -} - -/// # `POST /_matrix/client/v3/login` -/// -/// Authenticates the user and returns an access token it can use in subsequent -/// requests. -/// -/// - The user needs to authenticate using their password (or if enabled using a -/// json web token) -/// - If `device_id` is known: invalidates old access token of that device -/// - If `device_id` is unknown: creates a new device -/// - Returns access token that is associated with the user and device -/// -/// Note: You can use [`GET -/// /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see -/// supported login types. -#[tracing::instrument(skip_all, fields(%client), name = "login")] -pub(crate) async fn login_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let emergency_mode_enabled = services.config.emergency_password.is_some(); - - // Validate login method - // TODO: Other login methods - let user_id = match &body.login_info { - #[allow(deprecated)] - | login::v3::LoginInfo::Password(login::v3::Password { - identifier, - password, - user, - .. - }) => { - debug!("Got password login type"); - let user_id = - if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name(user_id, &services.config.server_name) - } else if let Some(user) = user { - UserId::parse_with_server_name(user, &services.config.server_name) - } else { - return Err!(Request(Unknown( - debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") - ))); - } - .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; - - let lowercased_user_id = UserId::parse_with_server_name( - user_id.localpart().to_lowercase(), - &services.config.server_name, - )?; - - if !services.globals.user_is_local(&user_id) - || !services.globals.user_is_local(&lowercased_user_id) - { - return Err!(Request(Unknown("User ID does not belong to this homeserver"))); - } - - // first try the username as-is - let hash = services - .users - .password_hash(&user_id) - .await - .inspect_err(|e| debug!("{e}")); - - match hash { - | Ok(hash) => { - if hash.is_empty() { - return Err!(Request(UserDeactivated("The user has been deactivated"))); - } - - hash::verify_password(password, &hash) - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; - - user_id - }, - | Err(_e) => { - let hash_lowercased_user_id = services - .users - .password_hash(&lowercased_user_id) - .await - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; - - if hash_lowercased_user_id.is_empty() { - return Err!(Request(UserDeactivated("The user has been deactivated"))); - } - - hash::verify_password(password, &hash_lowercased_user_id) - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; - - lowercased_user_id - }, - } - }, - | login::v3::LoginInfo::Token(login::v3::Token { token }) => { - debug!("Got token login type"); - if !services.server.config.login_via_existing_session { - return Err!(Request(Unknown("Token login is not enabled."))); - } - services.users.find_from_login_token(token).await? - }, - #[allow(deprecated)] - | login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { - identifier, - user, - }) => { - debug!("Got appservice login type"); - - let Some(ref info) = body.appservice_info else { - return Err!(Request(MissingToken("Missing appservice token."))); - }; - - let user_id = - if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name(user_id, &services.config.server_name) - } else if let Some(user) = user { - UserId::parse_with_server_name(user, &services.config.server_name) - } else { - return Err!(Request(Unknown( - debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") - ))); - } - .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; - - if !services.globals.user_is_local(&user_id) { - return Err!(Request(Unknown("User ID does not belong to this homeserver"))); - } - - if !info.is_user_match(&user_id) && !emergency_mode_enabled { - return Err!(Request(Exclusive("Username is not in an appservice namespace."))); - } - - user_id - }, - | _ => { - debug!("/login json_body: {:?}", &body.json_body); - return Err!(Request(Unknown( - debug_warn!(?body.login_info, "Invalid or unsupported login type") - ))); - }, - }; - - // Generate new device id if the user didn't specify one - let device_id = body - .device_id - .clone() - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); - - // Generate a new token for the device - let token = utils::random_string(TOKEN_LENGTH); - - // Determine if device_id was provided and exists in the db for this user - let device_exists = if body.device_id.is_some() { - services - .users - .all_device_ids(&user_id) - .ready_any(|v| v == device_id) - .await - } else { - false - }; - - if device_exists { - services - .users - .set_token(&user_id, &device_id, &token) - .await?; - } else { - services - .users - .create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - Some(client.to_string()), - ) - .await?; - } - - // send client well-known if specified so the client knows to reconfigure itself - let client_discovery_info: Option = services - .server - .config - .well_known - .client - .as_ref() - .map(|server| DiscoveryInfo::new(HomeserverInfo::new(server.to_string()))); - - info!("{user_id} logged in"); - - #[allow(deprecated)] - Ok(login::v3::Response { - user_id, - access_token: token, - device_id, - well_known: client_discovery_info, - expires_in: None, - home_server: Some(services.config.server_name.clone()), - refresh_token: None, - }) -} - -/// # `POST /_matrix/client/v1/login/get_token` -/// -/// Allows a logged-in user to get a short-lived token which can be used -/// to log in with the m.login.token flow. -/// -/// -#[tracing::instrument(skip_all, fields(%client), name = "login_token")] -pub(crate) async fn login_token_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - if !services.server.config.login_via_existing_session { - return Err!(Request(Forbidden("Login via an existing session is not enabled"))); - } - - let sender_user = body.sender_user(); - let sender_device = body.sender_device(); - - // This route SHOULD have UIA - // TODO: How do we make only UIA sessions that have not been used before valid? - - let mut uiaainfo = uiaa::UiaaInfo { - flows: vec![uiaa::AuthFlow { stages: vec![uiaa::AuthType::Password] }], - completed: Vec::new(), - params: Box::default(), - session: None, - auth_error: None, - }; - - match &body.auth { - | Some(auth) => { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; - - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - - // Success! - }, - | _ => match body.json_body.as_ref() { - | Some(json) => { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); - - return Err(Error::Uiaa(uiaainfo)); - }, - | _ => { - return Err!(Request(NotJson("No JSON body was sent when required."))); - }, - }, - } - - let login_token = utils::random_string(TOKEN_LENGTH); - let expires_in = services.users.create_login_token(sender_user, &login_token); - - Ok(get_login_token::v1::Response { - expires_in: Duration::from_millis(expires_in), - login_token, - }) -} - -/// # `POST /_matrix/client/v3/logout` -/// -/// Log out the current device. -/// -/// - Invalidates access token -/// - Deletes device metadata (device id, device display name, last seen ip, -/// last seen ts) -/// - Forgets to-device events -/// - Triggers device list updates -#[tracing::instrument(skip_all, fields(%client), name = "logout")] -pub(crate) async fn logout_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - - services - .users - .remove_device(sender_user, sender_device) - .await; - - Ok(logout::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/logout/all` -/// -/// Log out all devices of this user. -/// -/// - Invalidates all access tokens -/// - Deletes all device metadata (device id, device display name, last seen ip, -/// last seen ts) -/// - Forgets all to-device events -/// - Triggers device list updates -/// -/// Note: This is equivalent to calling [`GET -/// /_matrix/client/r0/logout`](fn.logout_route.html) from each device of this -/// user. -#[tracing::instrument(skip_all, fields(%client), name = "logout")] -pub(crate) async fn logout_all_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - services - .users - .all_device_ids(sender_user) - .for_each(|device_id| services.users.remove_device(sender_user, device_id)) - .await; - - Ok(logout_all::v3::Response::new()) -} diff --git a/src/api/client/space.rs b/src/api/client/space.rs deleted file mode 100644 index 92768926..00000000 --- a/src/api/client/space.rs +++ /dev/null @@ -1,195 +0,0 @@ -use std::{ - collections::{BTreeSet, VecDeque}, - str::FromStr, -}; - -use axum::extract::State; -use conduwuit::{ - Err, Result, - utils::{future::TryExtExt, stream::IterStream}, -}; -use conduwuit_service::{ - Services, - rooms::spaces::{ - PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, - }, -}; -use futures::{StreamExt, TryFutureExt, future::OptionFuture}; -use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, -}; - -use crate::Ruma; - -/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy` -/// -/// Paginates over the space tree in a depth-first manner to locate child rooms -/// of a given space. -pub(crate) async fn get_hierarchy_route( - State(services): State, - body: Ruma, -) -> Result { - let limit = body - .limit - .unwrap_or_else(|| UInt::from(10_u32)) - .min(UInt::from(100_u32)); - - let max_depth = body - .max_depth - .unwrap_or_else(|| UInt::from(3_u32)) - .min(UInt::from(10_u32)); - - let key = body - .from - .as_ref() - .and_then(|s| PaginationToken::from_str(s).ok()); - - // Should prevent unexpeded behaviour in (bad) clients - if let Some(ref token) = key { - if token.suggested_only != body.suggested_only || token.max_depth != max_depth { - return Err!(Request(InvalidParam( - "suggested_only and max_depth cannot change on paginated requests" - ))); - } - } - - get_client_hierarchy( - &services, - body.sender_user(), - &body.room_id, - limit.try_into().unwrap_or(10), - max_depth.try_into().unwrap_or(usize::MAX), - body.suggested_only, - key.as_ref() - .into_iter() - .flat_map(|t| t.short_room_ids.iter()), - ) - .await -} - -async fn get_client_hierarchy<'a, ShortRoomIds>( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - limit: usize, - max_depth: usize, - suggested_only: bool, - short_room_ids: ShortRoomIds, -) -> Result -where - ShortRoomIds: Iterator + Clone + Send + Sync + 'a, -{ - type Via = Vec; - type Entry = (OwnedRoomId, Via); - type Rooms = VecDeque; - - let mut queue: Rooms = [( - room_id.to_owned(), - room_id - .server_name() - .map(ToOwned::to_owned) - .into_iter() - .collect(), - )] - .into(); - - let mut rooms = Vec::with_capacity(limit); - let mut parents = BTreeSet::new(); - while let Some((current_room, via)) = queue.pop_front() { - let summary = services - .rooms - .spaces - .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) - .await?; - - match (summary, current_room == room_id) { - | (None | Some(SummaryAccessibility::Inaccessible), false) => { - // Just ignore other unavailable rooms - }, - | (None, true) => { - return Err!(Request(Forbidden("The requested room was not found"))); - }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err!(Request(Forbidden("The requested room is inaccessible"))); - }, - | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let populate = parents.len() >= short_room_ids.clone().count(); - - let mut children: Vec = get_parent_children_via(&summary, suggested_only) - .filter(|(room, _)| !parents.contains(room)) - .rev() - .map(|(key, val)| (key, val.collect())) - .collect(); - - if !populate { - children = children - .iter() - .rev() - .stream() - .skip_while(|(room, _)| { - services - .rooms - .short - .get_shortroomid(room) - .map_ok(|short| { - Some(&short) != short_room_ids.clone().nth(parents.len()) - }) - .unwrap_or_else(|_| false) - }) - .map(Clone::clone) - .collect::>() - .await - .into_iter() - .rev() - .collect(); - } - - if populate { - rooms.push(summary_to_chunk(summary.clone())); - } else if queue.is_empty() && children.is_empty() { - return Err!(Request(InvalidParam("Room IDs in token were not found."))); - } - - parents.insert(current_room.clone()); - if rooms.len() >= limit { - break; - } - - if parents.len() > max_depth { - continue; - } - - queue.extend(children); - }, - } - } - - let next_batch: OptionFuture<_> = queue - .pop_front() - .map(|(room, _)| async move { - parents.insert(room); - - let next_short_room_ids: Vec<_> = parents - .iter() - .stream() - .filter_map(|room_id| services.rooms.short.get_shortroomid(room_id).ok()) - .collect() - .await; - - (next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty()) - .then_some(PaginationToken { - short_room_ids: next_short_room_ids, - limit: limit.try_into().ok()?, - max_depth: max_depth.try_into().ok()?, - suggested_only, - }) - .as_ref() - .map(PaginationToken::to_string) - }) - .into(); - - Ok(get_hierarchy::v1::Response { - next_batch: next_batch.await.flatten(), - rooms, - }) -} diff --git a/src/api/client/state.rs b/src/api/client/state.rs deleted file mode 100644 index 2ddc8f14..00000000 --- a/src/api/client/state.rs +++ /dev/null @@ -1,403 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Err, Result, err, - matrix::pdu::{PduBuilder, PduEvent}, - utils::BoolExt, -}; -use conduwuit_service::Services; -use futures::TryStreamExt; -use ruma::{ - OwnedEventId, RoomId, UserId, - api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, - events::{ - AnyStateEventContent, StateEventType, - room::{ - canonical_alias::RoomCanonicalAliasEventContent, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - server_acl::RoomServerAclEventContent, - }, - }, - serde::Raw, -}; - -use crate::{Ruma, RumaResponse}; - -/// # `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}/{stateKey}` -/// -/// Sends a state event into the room. -pub(crate) async fn send_state_event_for_key_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - Ok(send_state_event::v3::Response { - event_id: send_state_event_for_key_helper( - &services, - sender_user, - &body.room_id, - &body.event_type, - &body.body.body, - &body.state_key, - if body.appservice_info.is_some() { - body.timestamp - } else { - None - }, - ) - .await?, - }) -} - -/// # `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}` -/// -/// Sends a state event into the room. -pub(crate) async fn send_state_event_for_empty_key_route( - State(services): State, - body: Ruma, -) -> Result> { - send_state_event_for_key_route(State(services), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/client/v3/rooms/{roomid}/state` -/// -/// Get all state events for a room. -/// -/// - If not joined: Only works if current room history visibility is world -/// readable -pub(crate) async fn get_state_events_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) - .await - { - return Err!(Request(Forbidden("You don't have permission to view the room state."))); - } - - Ok(get_state_events::v3::Response { - room_state: services - .rooms - .state_accessor - .room_state_full_pdus(&body.room_id) - .map_ok(PduEvent::into_state_event) - .try_collect() - .await?, - }) -} - -/// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}/{stateKey}` -/// -/// Get single state event of a room with the specified state key. -/// The optional query parameter `?format=event|content` allows returning the -/// full room state event or just the state event's content (default behaviour) -/// -/// - If not joined: Only works if current room history visibility is world -/// readable -pub(crate) async fn get_state_events_for_key_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if !services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) - .await - { - return Err!(Request(NotFound(debug_warn!( - "You don't have permission to view the room state." - )))); - } - - let event = services - .rooms - .state_accessor - .room_state_get(&body.room_id, &body.event_type, &body.state_key) - .await - .map_err(|_| { - err!(Request(NotFound(debug_warn!( - room_id = ?body.room_id, - event_type = ?body.event_type, - "State event not found in room.", - )))) - })?; - - let event_format = body - .format - .as_ref() - .is_some_and(|f| f.to_lowercase().eq("event")); - - Ok(get_state_events_for_key::v3::Response { - content: event_format.or(|| event.get_content_as_value()), - event: event_format.then(|| event.into_state_event_value()), - }) -} - -/// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}` -/// -/// Get single state event of a room. -/// The optional query parameter `?format=event|content` allows returning the -/// full room state event or just the state event's content (default behaviour) -/// -/// - If not joined: Only works if current room history visibility is world -/// readable -pub(crate) async fn get_state_events_for_empty_key_route( - State(services): State, - body: Ruma, -) -> Result> { - get_state_events_for_key_route(State(services), body) - .await - .map(RumaResponse) -} - -async fn send_state_event_for_key_helper( - services: &Services, - sender: &UserId, - room_id: &RoomId, - event_type: &StateEventType, - json: &Raw, - state_key: &str, - timestamp: Option, -) -> Result { - allowed_to_send_state_event(services, room_id, event_type, state_key, json).await?; - let state_lock = services.rooms.state.mutex.lock(room_id).await; - let event_id = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: serde_json::from_str(json.json().get())?, - state_key: Some(state_key.into()), - timestamp, - ..Default::default() - }, - sender, - room_id, - &state_lock, - ) - .await?; - - Ok(event_id) -} - -async fn allowed_to_send_state_event( - services: &Services, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - json: &Raw, -) -> Result { - match event_type { - | StateEventType::RoomCreate => { - return Err!(Request(BadJson(debug_warn!( - ?room_id, - "You cannot update m.room.create after a room has been created." - )))); - }, - | StateEventType::RoomServerAcl => { - // prevents common ACL paw-guns as ACL management is difficult and prone to - // irreversible mistakes - match json.deserialize_as::() { - | Ok(acl_content) => { - if acl_content.allow_is_empty() { - return Err!(Request(BadJson(debug_warn!( - ?room_id, - "Sending an ACL event with an empty allow key will permanently \ - brick the room for non-conduwuit's as this equates to no servers \ - being allowed to participate in this room." - )))); - } - - if acl_content.deny_contains("*") && acl_content.allow_contains("*") { - return Err!(Request(BadJson(debug_warn!( - ?room_id, - "Sending an ACL event with a deny and allow key value of \"*\" will \ - permanently brick the room for non-conduwuit's as this equates to \ - no servers being allowed to participate in this room." - )))); - } - - if acl_content.deny_contains("*") - && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content.allow_contains(services.globals.server_name().as_str()) - { - return Err!(Request(BadJson(debug_warn!( - ?room_id, - "Sending an ACL event with a deny key value of \"*\" and without \ - your own server name in the allow key will result in you being \ - unable to participate in this room." - )))); - } - - if !acl_content.allow_contains("*") - && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content.allow_contains(services.globals.server_name().as_str()) - { - return Err!(Request(BadJson(debug_warn!( - ?room_id, - "Sending an ACL event for an allow key without \"*\" and without \ - your own server name in the allow key will result in you being \ - unable to participate in this room." - )))); - } - }, - | Err(e) => { - return Err!(Request(BadJson(debug_warn!( - "Room server ACL event is invalid: {e}" - )))); - }, - } - }, - | StateEventType::RoomEncryption => - // Forbid m.room.encryption if encryption is disabled - if !services.config.allow_encryption { - return Err!(Request(Forbidden("Encryption is disabled on this homeserver."))); - }, - | StateEventType::RoomJoinRules => { - // admin room is a sensitive room, it should not ever be made public - if let Ok(admin_room_id) = services.admin.get_admin_room().await { - if admin_room_id == room_id { - match json.deserialize_as::() { - | Ok(join_rule) => - if join_rule.join_rule == JoinRule::Public { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made public" - ))); - }, - | Err(e) => { - return Err!(Request(BadJson(debug_warn!( - "Room join rules event is invalid: {e}" - )))); - }, - } - } - } - }, - | StateEventType::RoomHistoryVisibility => { - // admin room is a sensitive room, it should not ever be made world readable - if let Ok(admin_room_id) = services.admin.get_admin_room().await { - match json.deserialize_as::() { - | Ok(visibility_content) => { - if admin_room_id == room_id - && visibility_content.history_visibility - == HistoryVisibility::WorldReadable - { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made world \ - readable (public room history)." - ))); - } - }, - | Err(e) => { - return Err!(Request(BadJson(debug_warn!( - "Room history visibility event is invalid: {e}" - )))); - }, - } - } - }, - | StateEventType::RoomCanonicalAlias => { - match json.deserialize_as::() { - | Ok(canonical_alias_content) => { - let mut aliases = canonical_alias_content.alt_aliases.clone(); - - if let Some(alias) = canonical_alias_content.alias { - aliases.push(alias); - } - - for alias in aliases { - let (alias_room_id, _servers) = services - .rooms - .alias - .resolve_alias(&alias, None) - .await - .map_err(|e| { - err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}"))) - })?; - - if alias_room_id != room_id { - return Err!(Request(BadAlias( - "Room alias {alias} does not belong to room {room_id}" - ))); - } - } - }, - | Err(e) => { - return Err!(Request(InvalidParam(debug_warn!( - "Room canonical alias event is invalid: {e}" - )))); - }, - } - }, - | StateEventType::RoomMember => match json.deserialize_as::() { - | Ok(membership_content) => { - let Ok(state_key) = UserId::parse(state_key) else { - return Err!(Request(BadJson( - "Membership event has invalid or non-existent state key" - ))); - }; - - if let Some(authorising_user) = - membership_content.join_authorized_via_users_server - { - if membership_content.membership != MembershipState::Join { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } - - if services - .rooms - .state_cache - .is_joined(state_key, room_id) - .await - { - return Err!(Request(InvalidParam( - "{state_key} is already joined, an authorising user is not required." - ))); - } - - if !services.globals.user_is_local(&authorising_user) { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} does not belong to this \ - homeserver" - ))); - } - - if !services - .rooms - .state_cache - .is_joined(&authorising_user, room_id) - .await - { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} is not in the room, they \ - cannot authorise the join." - ))); - } - } - }, - | Err(e) => { - return Err!(Request(BadJson( - "Membership content must have a valid JSON body with at least a valid \ - membership state: {e}" - ))); - }, - }, - | _ => (), - } - - Ok(()) -} diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs deleted file mode 100644 index 40370160..00000000 --- a/src/api/client/sync/mod.rs +++ /dev/null @@ -1,85 +0,0 @@ -mod v3; -mod v4; -mod v5; - -use conduwuit::{ - Error, PduCount, Result, - matrix::pdu::PduEvent, - utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, -}; -use conduwuit_service::Services; -use futures::{StreamExt, pin_mut}; -use ruma::{ - RoomId, UserId, - events::TimelineEventType::{ - self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, - }, -}; - -pub(crate) use self::{ - v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, -}; - -pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = - &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; - -async fn load_timeline( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - roomsincecount: PduCount, - next_batch: Option, - limit: usize, -) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { - let last_timeline_count = services - .rooms - .timeline - .last_timeline_count(Some(sender_user), room_id) - .await?; - - if last_timeline_count <= roomsincecount { - return Ok((Vec::new(), false)); - } - - let non_timeline_pdus = services - .rooms - .timeline - .pdus_rev(Some(sender_user), room_id, None) - .ignore_err() - .ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max)) - .ready_take_while(|&(pducount, _)| pducount > roomsincecount); - - // Take the last events for the timeline - pin_mut!(non_timeline_pdus); - let timeline_pdus: Vec<_> = non_timeline_pdus.by_ref().take(limit).collect().await; - - let timeline_pdus: Vec<_> = timeline_pdus.into_iter().rev().collect(); - - // They /sync response doesn't always return all messages, so we say the output - // is limited unless there are events in non_timeline_pdus - let limited = non_timeline_pdus.next().await.is_some(); - - Ok((timeline_pdus, limited)) -} - -async fn share_encrypted_room( - services: &Services, - sender_user: &UserId, - user_id: &UserId, - ignore_room: Option<&RoomId>, -) -> bool { - services - .rooms - .state_cache - .get_shared_rooms(sender_user, user_id) - .ready_filter(|&room_id| Some(room_id) != ignore_room) - .map(ToOwned::to_owned) - .broad_any(|other_room_id| async move { - services - .rooms - .state_accessor - .is_encrypted_room(&other_room_id) - .await - }) - .await -} diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs deleted file mode 100644 index 8eac6b66..00000000 --- a/src/api/client/sync/v3.rs +++ /dev/null @@ -1,1244 +0,0 @@ -use std::{ - cmp::{self}, - collections::{BTreeMap, HashMap, HashSet}, - time::Duration, -}; - -use axum::extract::State; -use conduwuit::{ - Result, at, err, error, extract_variant, is_equal_to, - matrix::{ - Event, - pdu::{EventHash, PduCount, PduEvent}, - }, - pair_of, ref_at, - result::FlatOk, - utils::{ - self, BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, - future::{OptionStream, ReadyEqExt}, - math::ruma_from_u64, - stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, - }, - warn, -}; -use conduwuit_service::{ - Services, - rooms::{ - lazy_loading, - lazy_loading::{Options, Witness}, - short::ShortStateHash, - }, -}; -use futures::{ - FutureExt, StreamExt, TryFutureExt, TryStreamExt, - future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, - pin_mut, -}; -use ruma::{ - DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, - api::client::{ - filter::FilterDefinition, - sync::sync_events::{ - self, DeviceLists, UnreadNotificationsCount, - v3::{ - Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, - KnockState, KnockedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, - State as RoomState, Timeline, ToDevice, - }, - }, - uiaa::UiaaResponse, - }, - events::{ - AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, - TimelineEventType::*, - presence::{PresenceEvent, PresenceEventContent}, - room::member::{MembershipState, RoomMemberEventContent}, - }, - serde::Raw, - uint, -}; -use service::rooms::short::{ShortEventId, ShortStateKey}; - -use super::{load_timeline, share_encrypted_room}; -use crate::{Ruma, RumaResponse, client::ignored_filter}; - -#[derive(Default)] -struct StateChanges { - heroes: Option>, - joined_member_count: Option, - invited_member_count: Option, - state_events: Vec, - device_list_updates: HashSet, - left_encrypted_users: HashSet, -} - -type PresenceUpdates = HashMap; - -/// # `GET /_matrix/client/r0/sync` -/// -/// Synchronize the client's state with the latest state on the server. -/// -/// - This endpoint takes a `since` parameter which should be the `next_batch` -/// value from a previous request for incremental syncs. -/// -/// Calling this endpoint without a `since` parameter returns: -/// - Some of the most recent events of each timeline -/// - Notification counts for each room -/// - Joined and invited member counts, heroes -/// - All state events -/// -/// Calling this endpoint with a `since` parameter from a previous `next_batch` -/// returns: For joined rooms: -/// - Some of the most recent events of each timeline that happened after since -/// - If user joined the room after since: All state events (unless lazy loading -/// is activated) and all device list updates in that room -/// - If the user was already in the room: A list of all events that are in the -/// state now, but were not in the state at `since` -/// - If the state we send contains a member event: Joined and invited member -/// counts, heroes -/// - Device list updates that happened after `since` -/// - If there are events in the timeline we send or the user send updated his -/// read mark: Notification counts -/// - EDUs that are active now (read receipts, typing updates, presence) -/// - TODO: Allow multiple sync streams to support Pantalaimon -/// -/// For invited rooms: -/// - If the user was invited after `since`: A subset of the state of the room -/// at the point of the invite -/// -/// For left rooms: -/// - If the user left after `since`: `prev_batch` token, empty state (TODO: -/// subset of the state at the point of the leave) -#[tracing::instrument( - name = "sync", - level = "debug", - skip_all, - fields( - since = %body.body.since.as_deref().unwrap_or_default(), - ) -)] -pub(crate) async fn sync_events_route( - State(services): State, - body: Ruma, -) -> Result> { - let (sender_user, sender_device) = body.sender(); - - // Presence update - if services.config.allow_local_presence { - services - .presence - .ping_presence(sender_user, &body.body.set_presence) - .await?; - } - - // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(sender_user, sender_device); - - let response = build_sync_events(&services, &body).await?; - if body.body.full_state - || !(response.rooms.is_empty() - && response.presence.is_empty() - && response.account_data.is_empty() - && response.device_lists.is_empty() - && response.to_device.is_empty()) - { - return Ok(response); - } - - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let default = Duration::from_secs(30); - let duration = cmp::min(body.body.timeout.unwrap_or(default), default); - _ = tokio::time::timeout(duration, watcher).await; - - // Retry returning data - build_sync_events(&services, &body).await -} - -pub(crate) async fn build_sync_events( - services: &Services, - body: &Ruma, -) -> Result> { - let (sender_user, sender_device) = body.sender(); - - let next_batch = services.globals.current_count()?; - let since = body - .body - .since - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - let full_state = body.body.full_state; - let filter = match body.body.filter.as_ref() { - | None => FilterDefinition::default(), - | Some(Filter::FilterDefinition(filter)) => filter.clone(), - | Some(Filter::FilterId(filter_id)) => services - .users - .get_filter(sender_user, filter_id) - .await - .unwrap_or_default(), - }; - - let joined_rooms = services - .rooms - .state_cache - .rooms_joined(sender_user) - .map(ToOwned::to_owned) - .broad_filter_map(|room_id| { - load_joined_room( - services, - sender_user, - sender_device, - room_id.clone(), - since, - next_batch, - full_state, - &filter, - ) - .map_ok(move |(joined_room, dlu, jeu)| (room_id, joined_room, dlu, jeu)) - .ok() - }) - .ready_fold( - (BTreeMap::new(), HashSet::new(), HashSet::new()), - |(mut joined_rooms, mut device_list_updates, mut left_encrypted_users), - (room_id, joined_room, dlu, leu)| { - device_list_updates.extend(dlu); - left_encrypted_users.extend(leu); - if !joined_room.is_empty() { - joined_rooms.insert(room_id, joined_room); - } - - (joined_rooms, device_list_updates, left_encrypted_users) - }, - ); - - let left_rooms = services - .rooms - .state_cache - .rooms_left(sender_user) - .broad_filter_map(|(room_id, _)| { - handle_left_room( - services, - since, - room_id.clone(), - sender_user, - next_batch, - full_state, - filter.room.include_leave, - &filter, - ) - .map_ok(move |left_room| (room_id, left_room)) - .ok() - }) - .ready_filter_map(|(room_id, left_room)| left_room.map(|left_room| (room_id, left_room))) - .collect(); - - let invited_rooms = services - .rooms - .state_cache - .rooms_invited(sender_user) - .fold_default(|mut invited_rooms: BTreeMap<_, _>, (room_id, invite_state)| async move { - let invite_count = services - .rooms - .state_cache - .get_invite_count(&room_id, sender_user) - .await - .ok(); - - // Invited before last sync - if Some(since) >= invite_count { - return invited_rooms; - } - - let invited_room = InvitedRoom { - invite_state: InviteState { events: invite_state }, - }; - - invited_rooms.insert(room_id, invited_room); - invited_rooms - }); - - let knocked_rooms = services - .rooms - .state_cache - .rooms_knocked(sender_user) - .fold_default(|mut knocked_rooms: BTreeMap<_, _>, (room_id, knock_state)| async move { - let knock_count = services - .rooms - .state_cache - .get_knock_count(&room_id, sender_user) - .await - .ok(); - - // Knocked before last sync - if Some(since) >= knock_count { - return knocked_rooms; - } - - let knocked_room = KnockedRoom { - knock_state: KnockState { events: knock_state }, - }; - - knocked_rooms.insert(room_id, knocked_room); - knocked_rooms - }); - - let presence_updates: OptionFuture<_> = services - .config - .allow_local_presence - .then(|| process_presence_updates(services, since, sender_user)) - .into(); - - let account_data = services - .account_data - .changes_since(None, sender_user, since, Some(next_batch)) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) - .collect(); - - // Look for device list updates of this account - let keys_changed = services - .users - .keys_changed(sender_user, since, Some(next_batch)) - .map(ToOwned::to_owned) - .collect::>(); - - let to_device_events = services - .users - .get_to_device_events(sender_user, sender_device, Some(since), Some(next_batch)) - .collect::>(); - - let device_one_time_keys_count = services - .users - .count_one_time_keys(sender_user, sender_device); - - // Remove all to-device events the device received *last time* - let remove_to_device_events = - services - .users - .remove_to_device_events(sender_user, sender_device, since); - - let rooms = join4(joined_rooms, left_rooms, invited_rooms, knocked_rooms); - let ephemeral = join3(remove_to_device_events, to_device_events, presence_updates); - let top = join5(account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) - .boxed() - .await; - - let (account_data, ephemeral, device_one_time_keys_count, keys_changed, rooms) = top; - let ((), to_device_events, presence_updates) = ephemeral; - let (joined_rooms, left_rooms, invited_rooms, knocked_rooms) = rooms; - let (joined_rooms, mut device_list_updates, left_encrypted_users) = joined_rooms; - device_list_updates.extend(keys_changed); - - // If the user doesn't share an encrypted room with the target anymore, we need - // to tell them - let device_list_left: HashSet<_> = left_encrypted_users - .into_iter() - .stream() - .broad_filter_map(|user_id| async move { - share_encrypted_room(services, sender_user, &user_id, None) - .await - .eq(&false) - .then_some(user_id) - }) - .collect() - .await; - - let response = sync_events::v3::Response { - account_data: GlobalAccountData { events: account_data }, - device_lists: DeviceLists { - changed: device_list_updates.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, - next_batch: next_batch.to_string(), - presence: Presence { - events: presence_updates - .into_iter() - .flat_map(IntoIterator::into_iter) - .map(|(sender, content)| PresenceEvent { content, sender }) - .map(|ref event| Raw::new(event)) - .filter_map(Result::ok) - .collect(), - }, - rooms: Rooms { - leave: left_rooms, - join: joined_rooms, - invite: invited_rooms, - knock: knocked_rooms, - }, - to_device: ToDevice { events: to_device_events }, - }; - - Ok(response) -} - -#[tracing::instrument(name = "presence", level = "debug", skip_all)] -async fn process_presence_updates( - services: &Services, - since: u64, - syncing_user: &UserId, -) -> PresenceUpdates { - services - .presence - .presence_since(since) - .filter(|(user_id, ..)| { - services - .rooms - .state_cache - .user_sees_user(syncing_user, user_id) - }) - .filter_map(|(user_id, _, presence_bytes)| { - services - .presence - .from_json_bytes_to_event(presence_bytes, user_id) - .map_ok(move |event| (user_id, event)) - .ok() - }) - .map(|(user_id, event)| (user_id.to_owned(), event.content)) - .collect() - .await -} - -#[tracing::instrument( - name = "left", - level = "debug", - skip_all, - fields( - room_id = %room_id, - full = %full_state, - ), -)] -#[allow(clippy::too_many_arguments)] -async fn handle_left_room( - services: &Services, - since: u64, - ref room_id: OwnedRoomId, - sender_user: &UserId, - next_batch: u64, - full_state: bool, - include_leave: bool, - filter: &FilterDefinition, -) -> Result> { - let left_count = services - .rooms - .state_cache - .get_left_count(room_id, sender_user) - .await - .ok(); - - // Left before last sync - if Some(since) >= left_count { - return Ok(None); - } - - let is_not_found = services.rooms.metadata.exists(room_id).eq(&false); - - let is_disabled = services.rooms.metadata.is_disabled(room_id); - - let is_banned = services.rooms.metadata.is_banned(room_id); - - pin_mut!(is_not_found, is_disabled, is_banned); - if is_not_found.or(is_disabled).or(is_banned).await { - // This is just a rejected invite, not a room we know - // Insert a leave event anyways for the client - let event = PduEvent { - event_id: EventId::new(services.globals.server_name()), - sender: sender_user.to_owned(), - origin: None, - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - kind: RoomMember, - content: serde_json::from_str(r#"{"membership":"leave"}"#) - .expect("this is valid JSON"), - state_key: Some(sender_user.as_str().into()), - unsigned: None, - // The following keys are dropped on conversion - room_id: room_id.clone(), - prev_events: vec![], - depth: uint!(1), - auth_events: vec![], - redacts: None, - hashes: EventHash { sha256: String::new() }, - signatures: None, - }; - - return Ok(Some(LeftRoom { - account_data: RoomAccountData { events: Vec::new() }, - timeline: Timeline { - limited: false, - prev_batch: Some(next_batch.to_string()), - events: Vec::new(), - }, - state: RoomState { - events: vec![event.into_sync_state_event()], - }, - })); - } - - let mut left_state_events = Vec::new(); - - let since_shortstatehash = services.rooms.user.get_token_shortstatehash(room_id, since); - - let since_state_ids: HashMap<_, OwnedEventId> = since_shortstatehash - .map_ok(|since_shortstatehash| { - services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .map(Ok) - }) - .try_flatten_stream() - .try_collect() - .await - .unwrap_or_default(); - - let Ok(left_event_id): Result = services - .rooms - .state_accessor - .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) - .await - else { - warn!("Left {room_id} but no left state event"); - return Ok(None); - }; - - let Ok(left_shortstatehash) = services - .rooms - .state_accessor - .pdu_shortstatehash(&left_event_id) - .await - else { - warn!(event_id = %left_event_id, "Leave event has no state in {room_id}"); - return Ok(None); - }; - - let mut left_state_ids: HashMap<_, _> = services - .rooms - .state_accessor - .state_full_ids(left_shortstatehash) - .collect() - .await; - - let leave_shortstatekey = services - .rooms - .short - .get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str()) - .await; - - left_state_ids.insert(leave_shortstatekey, left_event_id); - - for (shortstatekey, event_id) in left_state_ids { - if full_state || since_state_ids.get(&shortstatekey) != Some(&event_id) { - let (event_type, state_key) = services - .rooms - .short - .get_statekey_from_short(shortstatekey) - .await?; - - if filter.room.state.lazy_load_options.is_enabled() - && event_type == StateEventType::RoomMember - && !full_state - && state_key - .as_str() - .try_into() - .is_ok_and(|user_id: &UserId| sender_user != user_id) - { - continue; - } - - let Ok(pdu) = services.rooms.timeline.get_pdu(&event_id).await else { - error!("Pdu in state not found: {event_id}"); - continue; - }; - - if !include_leave && pdu.sender == sender_user { - continue; - } - - left_state_events.push(pdu.into_sync_state_event()); - } - } - - Ok(Some(LeftRoom { - account_data: RoomAccountData { events: Vec::new() }, - timeline: Timeline { - // TODO: support left timeline events so we dont need to set limited to true - limited: true, - prev_batch: Some(next_batch.to_string()), - events: Vec::new(), // and so we dont need to set this to empty vec - }, - state: RoomState { events: left_state_events }, - })) -} - -#[tracing::instrument( - name = "joined", - level = "debug", - skip_all, - fields( - room_id = ?room_id, - ), -)] -#[allow(clippy::too_many_arguments)] -async fn load_joined_room( - services: &Services, - sender_user: &UserId, - sender_device: &DeviceId, - ref room_id: OwnedRoomId, - since: u64, - next_batch: u64, - full_state: bool, - filter: &FilterDefinition, -) -> Result<(JoinedRoom, HashSet, HashSet)> { - let sincecount = PduCount::Normal(since); - let next_batchcount = PduCount::Normal(next_batch); - - let current_shortstatehash = services - .rooms - .state - .get_room_shortstatehash(room_id) - .map_err(|_| err!(Database(error!("Room {room_id} has no state")))); - - let since_shortstatehash = services - .rooms - .user - .get_token_shortstatehash(room_id, since) - .ok() - .map(Ok); - - let timeline = load_timeline( - services, - sender_user, - room_id, - sincecount, - Some(next_batchcount), - 10_usize, - ); - - let receipt_events = services - .rooms - .read_receipt - .readreceipts_since(room_id, since) - .filter_map(|(read_user, _, edu)| async move { - services - .users - .user_is_ignored(read_user, sender_user) - .await - .or_some((read_user.to_owned(), edu)) - }) - .collect::>>() - .map(Ok); - - let (current_shortstatehash, since_shortstatehash, timeline, receipt_events) = - try_join4(current_shortstatehash, since_shortstatehash, timeline, receipt_events) - .boxed() - .await?; - - let (timeline_pdus, limited) = timeline; - let initial = since_shortstatehash.is_none(); - let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - let lazy_loading_context = &lazy_loading::Context { - user_id: sender_user, - device_id: sender_device, - room_id, - token: Some(since), - options: Some(&filter.room.state.lazy_load_options), - }; - - // Reset lazy loading because this is an initial sync - let lazy_load_reset: OptionFuture<_> = initial - .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) - .into(); - - lazy_load_reset.await; - let witness: OptionFuture<_> = lazy_loading_enabled - .then(|| { - let witness: Witness = timeline_pdus - .iter() - .map(ref_at!(1)) - .map(Event::sender) - .map(Into::into) - .chain(receipt_events.keys().map(Into::into)) - .collect(); - - services - .rooms - .lazy_loading - .witness_retain(witness, lazy_loading_context) - }) - .into(); - - let last_notification_read: OptionFuture<_> = timeline_pdus - .is_empty() - .then(|| { - services - .rooms - .user - .last_notification_read(sender_user, room_id) - }) - .into(); - - let since_sender_member: OptionFuture<_> = since_shortstatehash - .map(|short| { - services - .rooms - .state_accessor - .state_get_content(short, &StateEventType::RoomMember, sender_user.as_str()) - .ok() - }) - .into(); - - let (last_notification_read, since_sender_member, witness) = - join3(last_notification_read, since_sender_member, witness).await; - - let joined_since_last_sync = - since_sender_member - .flatten() - .is_none_or(|content: RoomMemberEventContent| { - content.membership != MembershipState::Join - }); - - let StateChanges { - heroes, - joined_member_count, - invited_member_count, - mut state_events, - mut device_list_updates, - left_encrypted_users, - } = calculate_state_changes( - services, - sender_user, - room_id, - full_state, - filter, - since_shortstatehash, - current_shortstatehash, - joined_since_last_sync, - witness.as_ref(), - ) - .boxed() - .await?; - - let is_sender_membership = |pdu: &PduEvent| { - pdu.kind == StateEventType::RoomMember.into() - && pdu - .state_key - .as_deref() - .is_some_and(is_equal_to!(sender_user.as_str())) - }; - - let joined_sender_member: Option<_> = (joined_since_last_sync && timeline_pdus.is_empty()) - .then(|| { - state_events - .iter() - .position(is_sender_membership) - .map(|pos| state_events.swap_remove(pos)) - }) - .flatten(); - - let prev_batch = timeline_pdus.first().map(at!(0)).or_else(|| { - joined_sender_member - .is_some() - .then_some(since) - .map(Into::into) - }); - - let room_events = timeline_pdus - .into_iter() - .stream() - .wide_filter_map(|item| ignored_filter(services, item, sender_user)) - .map(at!(1)) - .chain(joined_sender_member.into_iter().stream()) - .map(|pdu| pdu.to_sync_room_event()) - .collect::>(); - - let account_data_events = services - .account_data - .changes_since(Some(room_id), sender_user, since, Some(next_batch)) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect(); - - // Look for device list updates in this room - let device_updates = services - .users - .room_keys_changed(room_id, since, Some(next_batch)) - .map(|(user_id, _)| user_id) - .map(ToOwned::to_owned) - .collect::>(); - - let send_notification_counts = last_notification_read.is_none_or(|count| count > since); - - let notification_count: OptionFuture<_> = send_notification_counts - .then(|| { - services - .rooms - .user - .notification_count(sender_user, room_id) - .map(TryInto::try_into) - .unwrap_or(uint!(0)) - }) - .into(); - - let highlight_count: OptionFuture<_> = send_notification_counts - .then(|| { - services - .rooms - .user - .highlight_count(sender_user, room_id) - .map(TryInto::try_into) - .unwrap_or(uint!(0)) - }) - .into(); - - let typing_events = services - .rooms - .typing - .last_typing_update(room_id) - .and_then(|count| async move { - if count <= since { - return Ok(Vec::>::new()); - } - - let typings = services - .rooms - .typing - .typings_all(room_id, sender_user) - .await?; - - Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) - }) - .unwrap_or(Vec::new()); - - let unread_notifications = join(notification_count, highlight_count); - let events = join3(room_events, account_data_events, typing_events); - let (unread_notifications, events, device_updates) = - join3(unread_notifications, events, device_updates) - .boxed() - .await; - - let (room_events, account_data_events, typing_events) = events; - let (notification_count, highlight_count) = unread_notifications; - - device_list_updates.extend(device_updates); - - let last_privateread_update = services - .rooms - .read_receipt - .last_privateread_update(sender_user, room_id) - .await > since; - - let private_read_event = if last_privateread_update { - services - .rooms - .read_receipt - .private_read_get(room_id, sender_user) - .await - .ok() - } else { - None - }; - - let edus: Vec> = receipt_events - .into_values() - .chain(typing_events.into_iter()) - .chain(private_read_event.into_iter()) - .collect(); - - // Save the state after this sync so we can send the correct state diff next - // sync - services - .rooms - .user - .associate_token_shortstatehash(room_id, next_batch, current_shortstatehash) - .await; - - let joined_room = JoinedRoom { - account_data: RoomAccountData { events: account_data_events }, - summary: RoomSummary { - joined_member_count: joined_member_count.map(ruma_from_u64), - invited_member_count: invited_member_count.map(ruma_from_u64), - heroes: heroes - .into_iter() - .flatten() - .map(TryInto::try_into) - .filter_map(Result::ok) - .collect(), - }, - unread_notifications: UnreadNotificationsCount { highlight_count, notification_count }, - timeline: Timeline { - limited: limited || joined_since_last_sync, - prev_batch: prev_batch.as_ref().map(ToString::to_string), - events: room_events, - }, - state: RoomState { - events: state_events - .into_iter() - .map(PduEvent::into_sync_state_event) - .collect(), - }, - ephemeral: Ephemeral { events: edus }, - unread_thread_notifications: BTreeMap::new(), - }; - - Ok((joined_room, device_list_updates, left_encrypted_users)) -} - -#[tracing::instrument( - name = "state", - level = "trace", - skip_all, - fields( - full = %full_state, - cs = %current_shortstatehash, - ss = ?since_shortstatehash, - ) -)] -#[allow(clippy::too_many_arguments)] -async fn calculate_state_changes( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - full_state: bool, - filter: &FilterDefinition, - since_shortstatehash: Option, - current_shortstatehash: ShortStateHash, - joined_since_last_sync: bool, - witness: Option<&Witness>, -) -> Result { - if since_shortstatehash.is_none() { - calculate_state_initial( - services, - sender_user, - room_id, - full_state, - filter, - current_shortstatehash, - witness, - ) - .await - } else { - calculate_state_incremental( - services, - sender_user, - room_id, - full_state, - filter, - since_shortstatehash, - current_shortstatehash, - joined_since_last_sync, - witness, - ) - .await - } -} - -#[tracing::instrument(name = "initial", level = "trace", skip_all)] -#[allow(clippy::too_many_arguments)] -async fn calculate_state_initial( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - full_state: bool, - _filter: &FilterDefinition, - current_shortstatehash: ShortStateHash, - witness: Option<&Witness>, -) -> Result { - let (shortstatekeys, event_ids): (Vec<_>, Vec<_>) = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .unzip() - .await; - - let state_events = services - .rooms - .short - .multi_get_statekey_from_short(shortstatekeys.into_iter().stream()) - .zip(event_ids.into_iter().stream()) - .ready_filter_map(|item| Some((item.0.ok()?, item.1))) - .ready_filter_map(|((event_type, state_key), event_id)| { - let lazy = !full_state - && event_type == StateEventType::RoomMember - && state_key.as_str().try_into().is_ok_and(|user_id: &UserId| { - sender_user != user_id - && witness.is_some_and(|witness| !witness.contains(user_id)) - }); - - lazy.or_some(event_id) - }) - .broad_filter_map(|event_id: OwnedEventId| async move { - services.rooms.timeline.get_pdu(&event_id).await.ok() - }) - .collect() - .map(Ok); - - let counts = calculate_counts(services, room_id, sender_user); - let ((joined_member_count, invited_member_count, heroes), state_events) = - try_join(counts, state_events).boxed().await?; - - // The state_events above should contain all timeline_users, let's mark them as - // lazy loaded. - - Ok(StateChanges { - heroes, - joined_member_count, - invited_member_count, - state_events, - ..Default::default() - }) -} - -#[tracing::instrument(name = "incremental", level = "trace", skip_all)] -#[allow(clippy::too_many_arguments)] -async fn calculate_state_incremental<'a>( - services: &Services, - sender_user: &'a UserId, - room_id: &RoomId, - full_state: bool, - _filter: &FilterDefinition, - since_shortstatehash: Option, - current_shortstatehash: ShortStateHash, - joined_since_last_sync: bool, - witness: Option<&'a Witness>, -) -> Result { - let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash); - - let state_changed = since_shortstatehash != current_shortstatehash; - - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok() - .await; - - let state_get_shorteventid = |user_id: &'a UserId| { - services - .rooms - .state_accessor - .state_get_shortid( - current_shortstatehash, - &StateEventType::RoomMember, - user_id.as_str(), - ) - .ok() - }; - - let lazy_state_ids: OptionFuture<_> = witness - .filter(|_| !full_state && !encrypted_room) - .map(|witness| { - StreamExt::into_future( - witness - .iter() - .stream() - .broad_filter_map(|user_id| state_get_shorteventid(user_id)), - ) - }) - .into(); - - let state_diff_ids: OptionFuture<_> = (!full_state && state_changed) - .then(|| { - StreamExt::into_future( - services - .rooms - .state_accessor - .state_added((since_shortstatehash, current_shortstatehash)) - .boxed(), - ) - }) - .into(); - - let current_state_ids: OptionFuture<_> = full_state - .then(|| { - StreamExt::into_future( - services - .rooms - .state_accessor - .state_full_shortids(current_shortstatehash) - .expect_ok() - .boxed(), - ) - }) - .into(); - - let state_events = current_state_ids - .stream() - .chain(state_diff_ids.stream()) - .broad_filter_map(|(shortstatekey, shorteventid)| async move { - if witness.is_none() || encrypted_room { - return Some(shorteventid); - } - - lazy_filter(services, sender_user, shortstatekey, shorteventid).await - }) - .chain(lazy_state_ids.stream()) - .broad_filter_map(|shorteventid| { - services - .rooms - .short - .get_eventid_from_short(shorteventid) - .ok() - }) - .broad_filter_map(|event_id: OwnedEventId| async move { - services.rooms.timeline.get_pdu(&event_id).await.ok() - }) - .collect::>() - .await; - - let (device_list_updates, left_encrypted_users) = state_events - .iter() - .stream() - .ready_filter(|_| encrypted_room) - .ready_filter(|state_event| state_event.kind == RoomMember) - .ready_filter_map(|state_event| { - let content: RoomMemberEventContent = state_event.get_content().ok()?; - let user_id: OwnedUserId = state_event.state_key.as_ref()?.parse().ok()?; - - Some((content, user_id)) - }) - .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { - use MembershipState::*; - - let shares_encrypted_room = - |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); - - match content.membership { - | Leave => leu.insert(user_id), - | Join if joined_since_last_sync || !shares_encrypted_room(&user_id).await => - dlu.insert(user_id), - | _ => false, - }; - - (dlu, leu) - }) - .await; - - let send_member_count = state_events.iter().any(|event| event.kind == RoomMember); - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts(services, room_id, sender_user).await? - } else { - (None, None, None) - }; - - Ok(StateChanges { - heroes, - joined_member_count, - invited_member_count, - state_events, - device_list_updates, - left_encrypted_users, - }) -} - -async fn lazy_filter( - services: &Services, - sender_user: &UserId, - shortstatekey: ShortStateKey, - shorteventid: ShortEventId, -) -> Option { - let (event_type, state_key) = services - .rooms - .short - .get_statekey_from_short(shortstatekey) - .await - .ok()?; - - (event_type != StateEventType::RoomMember || state_key == sender_user.as_str()) - .then_some(shorteventid) -} - -async fn calculate_counts( - services: &Services, - room_id: &RoomId, - sender_user: &UserId, -) -> Result<(Option, Option, Option>)> { - let joined_member_count = services - .rooms - .state_cache - .room_joined_count(room_id) - .unwrap_or(0); - - let invited_member_count = services - .rooms - .state_cache - .room_invited_count(room_id) - .unwrap_or(0); - - let (joined_member_count, invited_member_count) = - join(joined_member_count, invited_member_count).await; - - let small_room = joined_member_count.saturating_add(invited_member_count) <= 5; - - let heroes: OptionFuture<_> = small_room - .then(|| calculate_heroes(services, room_id, sender_user)) - .into(); - - Ok((Some(joined_member_count), Some(invited_member_count), heroes.await)) -} - -async fn calculate_heroes( - services: &Services, - room_id: &RoomId, - sender_user: &UserId, -) -> Vec { - services - .rooms - .timeline - .all_pdus(sender_user, room_id) - .ready_filter(|(_, pdu)| pdu.kind == RoomMember) - .fold_default(|heroes: Vec<_>, (_, pdu)| { - fold_hero(heroes, services, room_id, sender_user, pdu) - }) - .await -} - -async fn fold_hero( - mut heroes: Vec, - services: &Services, - room_id: &RoomId, - sender_user: &UserId, - pdu: PduEvent, -) -> Vec { - let Some(user_id): Option<&UserId> = - pdu.state_key.as_deref().map(TryInto::try_into).flat_ok() - else { - return heroes; - }; - - if user_id == sender_user { - return heroes; - } - - let Ok(content): Result = pdu.get_content() else { - return heroes; - }; - - // The membership was and still is invite or join - if !matches!(content.membership, MembershipState::Join | MembershipState::Invite) { - return heroes; - } - - if heroes.iter().any(is_equal_to!(user_id)) { - return heroes; - } - - let (is_invited, is_joined) = join( - services.rooms.state_cache.is_invited(user_id, room_id), - services.rooms.state_cache.is_joined(user_id, room_id), - ) - .await; - - if !is_joined && is_invited { - return heroes; - } - - heroes.push(user_id.to_owned()); - heroes -} diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs deleted file mode 100644 index f153b2da..00000000 --- a/src/api/client/sync/v4.rs +++ /dev/null @@ -1,838 +0,0 @@ -use std::{ - cmp::{self, Ordering}, - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - time::Duration, -}; - -use axum::extract::State; -use conduwuit::{ - Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, - matrix::TypeStateKey, - utils::{ - BoolExt, IterStream, ReadyExt, TryFutureExtExt, - math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, - }, - warn, -}; -use conduwuit_service::{ - Services, - rooms::read_receipt::pack_receipts, - sync::{into_db_key, into_snake_key}, -}; -use futures::{FutureExt, StreamExt, TryFutureExt}; -use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::sync::sync_events::{ - self, DeviceLists, UnreadNotificationsCount, - v4::{SlidingOp, SlidingSyncRoomHero}, - }, - directory::RoomTypeFilter, - events::{ - AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, - TimelineEventType::*, - room::member::{MembershipState, RoomMemberEventContent}, - }, - serde::Raw, - uint, -}; - -use super::{load_timeline, share_encrypted_room}; -use crate::{ - Ruma, - client::{DEFAULT_BUMP_TYPES, ignored_filter}, -}; - -type TodoRooms = BTreeMap, usize, u64)>; -const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; - -/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` -/// -/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`) -pub(crate) async fn sync_events_v4_route( - State(services): State, - body: Ruma, -) -> Result { - debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - let mut body = body.body; - - // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(sender_user, sender_device); - - let next_batch = services.globals.next_count()?; - - let conn_id = body - .conn_id - .clone() - .unwrap_or_else(|| SINGLE_CONNECTION_SYNC.to_owned()); - - let globalsince = body - .pos - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - let db_key = into_db_key(sender_user, sender_device, conn_id.clone()); - if globalsince != 0 && !services.sync.remembered(&db_key) { - debug!("Restarting sync stream because it was gone from the database"); - return Err!(Request(UnknownPos("Connection data lost since last time"))); - } - - if globalsince == 0 { - services.sync.forget_sync_request_connection(&db_key); - } - - // Get sticky parameters from cache - let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone()); - let known_rooms = services - .sync - .update_sync_request_with_cache(&snake_key, &mut body); - - let all_joined_rooms: Vec<_> = services - .rooms - .state_cache - .rooms_joined(sender_user) - .map(ToOwned::to_owned) - .collect() - .await; - - let all_invited_rooms: Vec<_> = services - .rooms - .state_cache - .rooms_invited(sender_user) - .map(|r| r.0) - .collect() - .await; - - let all_knocked_rooms: Vec<_> = services - .rooms - .state_cache - .rooms_knocked(sender_user) - .map(|r| r.0) - .collect() - .await; - - let all_invited_rooms: Vec<&RoomId> = all_invited_rooms.iter().map(AsRef::as_ref).collect(); - let all_knocked_rooms: Vec<&RoomId> = all_knocked_rooms.iter().map(AsRef::as_ref).collect(); - - let all_rooms: Vec<&RoomId> = all_joined_rooms - .iter() - .map(AsRef::as_ref) - .chain(all_invited_rooms.iter().map(AsRef::as_ref)) - .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) - .collect(); - - let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); - let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); - - if body.extensions.to_device.enabled.unwrap_or(false) { - services - .users - .remove_to_device_events(sender_user, sender_device, globalsince) - .await; - } - - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_changes = HashSet::new(); - let mut device_list_left = HashSet::new(); - - let mut receipts = sync_events::v4::Receipts { rooms: BTreeMap::new() }; - - let mut account_data = sync_events::v4::AccountData { - global: Vec::new(), - rooms: BTreeMap::new(), - }; - if body.extensions.account_data.enabled.unwrap_or(false) { - account_data.global = services - .account_data - .changes_since(None, sender_user, globalsince, Some(next_batch)) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) - .collect() - .await; - - if let Some(rooms) = body.extensions.account_data.rooms { - for room in rooms { - account_data.rooms.insert( - room.clone(), - services - .account_data - .changes_since(Some(&room), sender_user, globalsince, Some(next_batch)) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect() - .await, - ); - } - } - } - - if body.extensions.e2ee.enabled.unwrap_or(false) { - // Look for device list updates of this account - device_list_changes.extend( - services - .users - .keys_changed(sender_user, globalsince, None) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - for room_id in &all_joined_rooms { - let room_id: &&RoomId = room_id; - let Ok(current_shortstatehash) = - services.rooms.state.get_room_shortstatehash(room_id).await - else { - error!("Room {room_id} has no state"); - continue; - }; - - let since_shortstatehash = services - .rooms - .user - .get_token_shortstatehash(room_id, globalsince) - .await - .ok(); - - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .await - .is_ok(); - - if let Some(since_shortstatehash) = since_shortstatehash { - // Skip if there are only timeline changes - if since_shortstatehash == current_shortstatehash { - continue; - } - - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .await; - - let since_sender_member: Option = services - .rooms - .state_accessor - .state_get_content( - since_shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .ok() - .await; - - let joined_since_last_sync = since_sender_member - .as_ref() - .is_none_or(|member| member.membership != MembershipState::Join); - - let new_encrypted_room = encrypted_room && since_encryption.is_err(); - - if encrypted_room { - let current_state_ids: HashMap<_, OwnedEventId> = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .collect() - .await; - - let since_state_ids: HashMap<_, _> = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect() - .await; - - for (key, id) in current_state_ids { - if since_state_ids.get(&key) != Some(&id) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); - continue; - }; - if pdu.kind == RoomMember { - if let Some(Ok(user_id)) = - pdu.state_key.as_deref().map(UserId::parse) - { - if user_id == sender_user { - continue; - } - - let content: RoomMemberEventContent = pdu.get_content()?; - match content.membership { - | MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room( - &services, - sender_user, - user_id, - Some(room_id), - ) - .await - { - device_list_changes.insert(user_id.to_owned()); - } - }, - | MembershipState::Leave => { - // Write down users that have left encrypted rooms we - // are in - left_encrypted_users.insert(user_id.to_owned()); - }, - | _ => {}, - } - } - } - } - } - if joined_since_last_sync || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_changes.extend( - services - .rooms - .state_cache - .room_members(room_id) - // Don't send key updates from the sender to the sender - .ready_filter(|&user_id| sender_user != user_id) - // Only send keys if the sender doesn't share an encrypted room with the target - // already - .filter_map(|user_id| { - share_encrypted_room(&services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect::>() - .await, - ); - } - } - } - // Look for device list updates in this room - device_list_changes.extend( - services - .users - .room_keys_changed(room_id, globalsince, None) - .map(|(user_id, _)| user_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - } - - for user_id in left_encrypted_users { - let dont_share_encrypted_room = - !share_encrypted_room(&services, sender_user, &user_id, None).await; - - // If the user doesn't share an encrypted room with the target anymore, we need - // to tell them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } - } - - let mut lists = BTreeMap::new(); - let mut todo_rooms: TodoRooms = BTreeMap::new(); // and required state - - for (list_id, list) in &body.lists { - let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { - | Some(true) => &all_invited_rooms, - | Some(false) => &all_joined_rooms, - | None => &all_rooms, - }; - - let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { - | Some(filter) if filter.is_empty() => active_rooms.clone(), - | Some(value) => filter_rooms(&services, active_rooms, &value, true).await, - | None => active_rooms.clone(), - }; - - let active_rooms = match list.filters.clone().map(|f| f.room_types) { - | Some(filter) if filter.is_empty() => active_rooms.clone(), - | Some(value) => filter_rooms(&services, &active_rooms, &value, false).await, - | None => active_rooms, - }; - - let mut new_known_rooms: BTreeSet = BTreeSet::new(); - - let ranges = list.ranges.clone(); - lists.insert(list_id.clone(), sync_events::v4::SyncList { - ops: ranges - .into_iter() - .map(|mut r| { - r.0 = r.0.clamp( - uint!(0), - UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), - ); - r.1 = r.1.clamp( - r.0, - UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), - ); - - let room_ids = if !active_rooms.is_empty() { - active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec() - } else { - Vec::new() - }; - - new_known_rooms.extend(room_ids.clone().into_iter().map(ToOwned::to_owned)); - for room_id in &room_ids { - let todo_room = todo_rooms.entry((*room_id).to_owned()).or_insert(( - BTreeSet::new(), - 0_usize, - u64::MAX, - )); - - let limit: usize = list - .room_details - .timeline_limit - .map(u64::from) - .map_or(10, usize_from_u64_truncated) - .min(100); - - todo_room.0.extend( - list.room_details - .required_state - .iter() - .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), - ); - - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(list_id.as_str()) - .and_then(|k| k.get(*room_id)) - .copied() - .unwrap_or(0), - ); - } - sync_events::v4::SyncOp { - op: SlidingOp::Sync, - range: Some(r), - index: None, - room_ids: room_ids.into_iter().map(ToOwned::to_owned).collect(), - room_id: None, - } - }) - .collect(), - count: ruma_from_usize(active_rooms.len()), - }); - - if let Some(conn_id) = &body.conn_id { - let db_key = into_db_key(sender_user, sender_device, conn_id); - services.sync.update_sync_known_rooms( - &db_key, - list_id.clone(), - new_known_rooms, - globalsince, - ); - } - } - - let mut known_subscription_rooms = BTreeSet::new(); - for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - || services.rooms.metadata.is_banned(room_id).await - { - continue; - } - let todo_room = - todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); - - let limit: usize = room - .timeline_limit - .map(u64::from) - .map_or(10, usize_from_u64_truncated) - .min(100); - - todo_room.0.extend( - room.required_state - .iter() - .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), - ); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get("subscriptions") - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - known_subscription_rooms.insert(room_id.clone()); - } - - for r in body.unsubscribe_rooms { - known_subscription_rooms.remove(&r); - body.room_subscriptions.remove(&r); - } - - if let Some(conn_id) = &body.conn_id { - let db_key = into_db_key(sender_user, sender_device, conn_id); - services.sync.update_sync_known_rooms( - &db_key, - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ); - } - - if let Some(conn_id) = body.conn_id.clone() { - let db_key = into_db_key(sender_user, sender_device, conn_id); - services - .sync - .update_sync_subscriptions(&db_key, body.room_subscriptions); - } - - let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { - let roomsincecount = PduCount::Normal(*roomsince); - - let mut timestamp: Option<_> = None; - let mut invite_state = None; - let (timeline_pdus, limited); - let new_room_id: &RoomId = (*room_id).as_ref(); - if all_invited_rooms.contains(&new_room_id) { - // TODO: figure out a timestamp we can use for remote invites - invite_state = services - .rooms - .state_cache - .invite_state(sender_user, room_id) - .await - .ok(); - - (timeline_pdus, limited) = (Vec::new(), true); - } else { - (timeline_pdus, limited) = match load_timeline( - &services, - sender_user, - room_id, - roomsincecount, - None, - *timeline_limit, - ) - .await - { - | Ok(value) => value, - | Err(err) => { - warn!("Encountered missing timeline in {}, error {}", room_id, err); - continue; - }, - }; - } - - account_data.rooms.insert( - room_id.to_owned(), - services - .account_data - .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect() - .await, - ); - - let last_privateread_update = services - .rooms - .read_receipt - .last_privateread_update(sender_user, room_id) - .await > *roomsince; - - let private_read_event = if last_privateread_update { - services - .rooms - .read_receipt - .private_read_get(room_id, sender_user) - .await - .ok() - } else { - None - }; - - let mut vector: Vec> = services - .rooms - .read_receipt - .readreceipts_since(room_id, *roomsince) - .filter_map(|(read_user, _ts, v)| async move { - services - .users - .user_is_ignored(read_user, sender_user) - .await - .or_some(v) - }) - .collect() - .await; - - if let Some(private_read_event) = private_read_event { - vector.push(private_read_event); - } - - let receipt_size = vector.len(); - receipts - .rooms - .insert(room_id.clone(), pack_receipts(Box::new(vector.into_iter()))); - - if roomsince != &0 - && timeline_pdus.is_empty() - && account_data.rooms.get(room_id).is_some_and(Vec::is_empty) - && receipt_size == 0 - { - continue; - } - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - | PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - }, - | PduCount::Normal(c) => c.to_string(), - })) - })? - .or_else(|| { - if roomsince != &0 { - Some(roomsince.to_string()) - } else { - None - } - }); - - let room_events: Vec<_> = timeline_pdus - .iter() - .stream() - .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect() - .await; - - for (_, pdu) in timeline_pdus { - let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts); - if DEFAULT_BUMP_TYPES.binary_search(&pdu.kind).is_ok() - && timestamp.is_none_or(|time| time <= ts) - { - timestamp = Some(ts); - } - } - - let required_state = required_state_request - .iter() - .stream() - .filter_map(|state| async move { - services - .rooms - .state_accessor - .room_state_get(room_id, &state.0, &state.1) - .await - .map(PduEvent::into_sync_state_event) - .ok() - }) - .collect() - .await; - - // Heroes - let heroes: Vec<_> = services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|&member| member != sender_user) - .filter_map(|user_id| { - services - .rooms - .state_accessor - .get_member(room_id, user_id) - .map_ok(|memberevent| SlidingSyncRoomHero { - user_id: user_id.into(), - name: memberevent.displayname, - avatar: memberevent.avatar_url, - }) - .ok() - }) - .take(5) - .collect() - .await; - - let name = match heroes.len().cmp(&(1_usize)) { - | Ordering::Greater => { - let firsts = heroes[1..] - .iter() - .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) - .collect::>() - .join(", "); - - let last = heroes[0] - .name - .clone() - .unwrap_or_else(|| heroes[0].user_id.to_string()); - - Some(format!("{firsts} and {last}")) - }, - | Ordering::Equal => Some( - heroes[0] - .name - .clone() - .unwrap_or_else(|| heroes[0].user_id.to_string()), - ), - | Ordering::Less => None, - }; - - let heroes_avatar = if heroes.len() == 1 { - heroes[0].avatar.clone() - } else { - None - }; - - rooms.insert(room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services - .rooms - .state_accessor - .get_name(room_id) - .await - .ok() - .or(name), - avatar: match heroes_avatar { - | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), - | _ => match services.rooms.state_accessor.get_avatar(room_id).await { - | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), - | ruma::JsOption::Null => ruma::JsOption::Null, - | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - }, - }, - initial: Some(roomsince == &0), - is_dm: None, - invite_state, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services - .rooms - .user - .highlight_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services - .rooms - .user - .notification_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - services - .rooms - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - invited_count: Some( - services - .rooms - .state_cache - .room_invited_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - num_live: None, // Count events in timeline greater than global sync counter - timestamp, - heroes: Some(heroes), - }); - } - - if rooms.iter().all(|(id, r)| { - r.timeline.is_empty() && r.required_state.is_empty() && !receipts.rooms.contains_key(id) - }) { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let default = Duration::from_secs(30); - let duration = cmp::min(body.timeout.unwrap_or(default), default); - _ = tokio::time::timeout(duration, watcher).await; - } - - Ok(sync_events::v4::Response { - initial: globalsince == 0, - txn_id: body.txn_id.clone(), - pos: next_batch.to_string(), - lists, - rooms, - extensions: sync_events::v4::Extensions { - to_device: if body.extensions.to_device.enabled.unwrap_or(false) { - Some(sync_events::v4::ToDevice { - events: services - .users - .get_to_device_events( - sender_user, - sender_device, - Some(globalsince), - Some(next_batch), - ) - .collect() - .await, - next_batch: next_batch.to_string(), - }) - } else { - None - }, - e2ee: sync_events::v4::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count: services - .users - .count_one_time_keys(sender_user, sender_device) - .await, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, - }, - account_data, - receipts, - typing: sync_events::v4::Typing { rooms: BTreeMap::new() }, - }, - delta_token: None, - }) -} - -async fn filter_rooms<'a>( - services: &Services, - rooms: &[&'a RoomId], - filter: &[RoomTypeFilter], - negate: bool, -) -> Vec<&'a RoomId> { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs deleted file mode 100644 index f3fc0f44..00000000 --- a/src/api/client/sync/v5.rs +++ /dev/null @@ -1,953 +0,0 @@ -use std::{ - cmp::{self, Ordering}, - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - ops::Deref, - time::Duration, -}; - -use axum::extract::State; -use conduwuit::{ - Err, Error, Result, error, extract_variant, is_equal_to, - matrix::{ - TypeStateKey, - pdu::{PduCount, PduEvent}, - }, - trace, - utils::{ - BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, - future::ReadyEqExt, - math::{ruma_from_usize, usize_from_ruma}, - }, - warn, -}; -use conduwuit_service::{Services, rooms::read_receipt::pack_receipts, sync::into_snake_key}; -use futures::{ - FutureExt, Stream, StreamExt, TryFutureExt, - future::{OptionFuture, join3, try_join4}, - pin_mut, -}; -use ruma::{ - DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, - directory::RoomTypeFilter, - events::{ - AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, - room::member::{MembershipState, RoomMemberEventContent}, - }, - serde::Raw, - uint, -}; - -use super::share_encrypted_room; -use crate::{ - Ruma, - client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, -}; - -type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); -type TodoRooms = BTreeMap, usize, u64)>; -type KnownRooms = BTreeMap>; - -/// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync` -/// ([MSC4186]) -/// -/// A simplified version of sliding sync ([MSC3575]). -/// -/// Get all new events in a sliding window of rooms since the last sync or a -/// given point in time. -/// -/// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 -/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 -pub(crate) async fn sync_events_v5_route( - State(ref services): State, - body: Ruma, -) -> Result { - debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - let mut body = body.body; - - // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(sender_user, sender_device); - - let next_batch = services.globals.next_count()?; - - let conn_id = body.conn_id.clone(); - - let globalsince = body - .pos - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - let snake_key = into_snake_key(sender_user, sender_device, conn_id); - - if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) { - return Err!(Request(UnknownPos( - "Connection data unknown to server; restarting sync stream." - ))); - } - - // Client / User requested an initial sync - if globalsince == 0 { - services.sync.forget_snake_sync_connection(&snake_key); - } - - // Get sticky parameters from cache - let known_rooms = services - .sync - .update_snake_sync_request_with_cache(&snake_key, &mut body); - - let all_joined_rooms = services - .rooms - .state_cache - .rooms_joined(sender_user) - .map(ToOwned::to_owned) - .collect::>(); - - let all_invited_rooms = services - .rooms - .state_cache - .rooms_invited(sender_user) - .map(|r| r.0) - .collect::>(); - - let all_knocked_rooms = services - .rooms - .state_cache - .rooms_knocked(sender_user) - .map(|r| r.0) - .collect::>(); - - let (all_joined_rooms, all_invited_rooms, all_knocked_rooms) = - join3(all_joined_rooms, all_invited_rooms, all_knocked_rooms).await; - - let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref); - let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref); - let all_knocked_rooms = all_knocked_rooms.iter().map(AsRef::as_ref); - let all_rooms = all_joined_rooms - .clone() - .chain(all_invited_rooms.clone()) - .chain(all_knocked_rooms.clone()); - - let pos = next_batch.clone().to_string(); - - let mut todo_rooms: TodoRooms = BTreeMap::new(); - - let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body); - - let account_data = collect_account_data(services, sync_info).map(Ok); - - let e2ee = collect_e2ee(services, sync_info, all_joined_rooms.clone()); - - let to_device = collect_to_device(services, sync_info, next_batch).map(Ok); - - let receipts = collect_receipts(services).map(Ok); - - let (account_data, e2ee, to_device, receipts) = - try_join4(account_data, e2ee, to_device, receipts).await?; - - let extensions = sync_events::v5::response::Extensions { - account_data, - e2ee, - to_device, - receipts, - typing: sync_events::v5::response::Typing::default(), - }; - - let mut response = sync_events::v5::Response { - txn_id: body.txn_id.clone(), - pos, - lists: BTreeMap::new(), - rooms: BTreeMap::new(), - extensions, - }; - - handle_lists( - services, - sync_info, - all_invited_rooms.clone(), - all_joined_rooms.clone(), - all_rooms, - &mut todo_rooms, - &known_rooms, - &mut response, - ) - .await; - - fetch_subscriptions(services, sync_info, &known_rooms, &mut todo_rooms).await; - - response.rooms = process_rooms( - services, - sender_user, - next_batch, - all_invited_rooms.clone(), - &todo_rooms, - &mut response, - &body, - ) - .await?; - - if response.rooms.iter().all(|(id, r)| { - r.timeline.is_empty() - && r.required_state.is_empty() - && !response.extensions.receipts.rooms.contains_key(id) - }) && response - .extensions - .to_device - .clone() - .is_none_or(|to| to.events.is_empty()) - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let default = Duration::from_secs(30); - let duration = cmp::min(body.timeout.unwrap_or(default), default); - _ = tokio::time::timeout(duration, watcher).await; - } - - trace!( - rooms = ?response.rooms.len(), - account_data = ?response.extensions.account_data.rooms.len(), - receipts = ?response.extensions.receipts.rooms.len(), - "responding to request with" - ); - Ok(response) -} - -async fn fetch_subscriptions( - services: &Services, - (sender_user, sender_device, globalsince, body): SyncInfo<'_>, - known_rooms: &KnownRooms, - todo_rooms: &mut TodoRooms, -) { - let mut known_subscription_rooms = BTreeSet::new(); - for (room_id, room) in &body.room_subscriptions { - let not_exists = services.rooms.metadata.exists(room_id).eq(&false); - - let is_disabled = services.rooms.metadata.is_disabled(room_id); - - let is_banned = services.rooms.metadata.is_banned(room_id); - - pin_mut!(not_exists, is_disabled, is_banned); - if not_exists.or(is_disabled).or(is_banned).await { - continue; - } - - let todo_room = - todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0_usize, u64::MAX)); - - let limit: UInt = room.timeline_limit; - - todo_room.0.extend( - room.required_state - .iter() - .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), - ); - todo_room.1 = todo_room.1.max(usize_from_ruma(limit)); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get("subscriptions") - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - known_subscription_rooms.insert(room_id.clone()); - } - // where this went (protomsc says it was removed) - //for r in body.unsubscribe_rooms { - // known_subscription_rooms.remove(&r); - // body.room_subscriptions.remove(&r); - //} - - if let Some(conn_id) = body.conn_id.clone() { - let snake_key = into_snake_key(sender_user, sender_device, conn_id); - services.sync.update_snake_sync_known_rooms( - &snake_key, - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ); - } -} - -#[allow(clippy::too_many_arguments)] -async fn handle_lists<'a, Rooms, AllRooms>( - services: &Services, - (sender_user, sender_device, globalsince, body): SyncInfo<'_>, - all_invited_rooms: Rooms, - all_joined_rooms: Rooms, - all_rooms: AllRooms, - todo_rooms: &'a mut TodoRooms, - known_rooms: &'a KnownRooms, - response: &'_ mut sync_events::v5::Response, -) -> KnownRooms -where - Rooms: Iterator + Clone + Send + 'a, - AllRooms: Iterator + Clone + Send + 'a, -{ - for (list_id, list) in &body.lists { - let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) { - | None => all_rooms.clone().collect(), - | Some(true) => all_invited_rooms.clone().collect(), - | Some(false) => all_joined_rooms.clone().collect(), - }; - - let active_rooms = match list.filters.as_ref().map(|f| &f.not_room_types) { - | None => active_rooms, - | Some(filter) if filter.is_empty() => active_rooms, - | Some(value) => - filter_rooms( - services, - value, - &true, - active_rooms.iter().stream().map(Deref::deref), - ) - .collect() - .await, - }; - - let mut new_known_rooms: BTreeSet = BTreeSet::new(); - - let ranges = list.ranges.clone(); - - for mut range in ranges { - range.0 = uint!(0); - range.1 = range - .1 - .clamp(range.0, UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX)); - - let room_ids = - active_rooms[usize_from_ruma(range.0)..usize_from_ruma(range.1)].to_vec(); - - let new_rooms: BTreeSet = - room_ids.clone().into_iter().map(From::from).collect(); - - new_known_rooms.extend(new_rooms); - //new_known_rooms.extend(room_ids..cloned()); - for room_id in room_ids { - let todo_room = todo_rooms.entry(room_id.to_owned()).or_insert(( - BTreeSet::new(), - 0_usize, - u64::MAX, - )); - - let limit: usize = usize_from_ruma(list.room_details.timeline_limit).min(100); - - todo_room.0.extend( - list.room_details - .required_state - .iter() - .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), - ); - - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(list_id.as_str()) - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - } - } - response - .lists - .insert(list_id.clone(), sync_events::v5::response::List { - count: ruma_from_usize(active_rooms.len()), - }); - - if let Some(conn_id) = body.conn_id.clone() { - let snake_key = into_snake_key(sender_user, sender_device, conn_id); - services.sync.update_snake_sync_known_rooms( - &snake_key, - list_id.clone(), - new_known_rooms, - globalsince, - ); - } - } - - BTreeMap::default() -} - -async fn process_rooms<'a, Rooms>( - services: &Services, - sender_user: &UserId, - next_batch: u64, - all_invited_rooms: Rooms, - todo_rooms: &TodoRooms, - response: &mut sync_events::v5::Response, - body: &sync_events::v5::Request, -) -> Result> -where - Rooms: Iterator + Clone + Send + 'a, -{ - let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms { - let roomsincecount = PduCount::Normal(*roomsince); - - let mut timestamp: Option<_> = None; - let mut invite_state = None; - let (timeline_pdus, limited); - let new_room_id: &RoomId = (*room_id).as_ref(); - if all_invited_rooms.clone().any(is_equal_to!(new_room_id)) { - // TODO: figure out a timestamp we can use for remote invites - invite_state = services - .rooms - .state_cache - .invite_state(sender_user, room_id) - .await - .ok(); - - (timeline_pdus, limited) = (Vec::new(), true); - } else { - (timeline_pdus, limited) = match load_timeline( - services, - sender_user, - room_id, - roomsincecount, - Some(PduCount::from(next_batch)), - *timeline_limit, - ) - .await - { - | Ok(value) => value, - | Err(err) => { - warn!("Encountered missing timeline in {}, error {}", room_id, err); - continue; - }, - }; - } - - if body.extensions.account_data.enabled == Some(true) { - response.extensions.account_data.rooms.insert( - room_id.to_owned(), - services - .account_data - .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect() - .await, - ); - } - - let last_privateread_update = services - .rooms - .read_receipt - .last_privateread_update(sender_user, room_id) - .await; - - let private_read_event: OptionFuture<_> = (last_privateread_update > *roomsince) - .then(|| { - services - .rooms - .read_receipt - .private_read_get(room_id, sender_user) - .ok() - }) - .into(); - - let mut receipts: Vec> = services - .rooms - .read_receipt - .readreceipts_since(room_id, *roomsince) - .filter_map(|(read_user, _ts, v)| async move { - services - .users - .user_is_ignored(read_user, sender_user) - .await - .or_some(v) - }) - .collect() - .await; - - if let Some(private_read_event) = private_read_event.await.flatten() { - receipts.push(private_read_event); - } - - let receipt_size = receipts.len(); - - if receipt_size > 0 { - response - .extensions - .receipts - .rooms - .insert(room_id.clone(), pack_receipts(Box::new(receipts.into_iter()))); - } - - if roomsince != &0 - && timeline_pdus.is_empty() - && response - .extensions - .account_data - .rooms - .get(room_id) - .is_none_or(Vec::is_empty) - && receipt_size == 0 - { - continue; - } - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - | PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - }, - | PduCount::Normal(c) => c.to_string(), - })) - })? - .or_else(|| { - if roomsince != &0 { - Some(roomsince.to_string()) - } else { - None - } - }); - - let room_events: Vec<_> = timeline_pdus - .iter() - .stream() - .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect() - .await; - - for (_, pdu) in timeline_pdus { - let ts = pdu.origin_server_ts; - if DEFAULT_BUMP_TYPES.binary_search(&pdu.kind).is_ok() - && timestamp.is_none_or(|time| time <= ts) - { - timestamp = Some(ts); - } - } - - let required_state = required_state_request - .iter() - .stream() - .filter_map(|state| async move { - services - .rooms - .state_accessor - .room_state_get(room_id, &state.0, &state.1) - .await - .map(PduEvent::into_sync_state_event) - .ok() - }) - .collect() - .await; - - // Heroes - let heroes: Vec<_> = services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|member| *member != sender_user) - .filter_map(|user_id| { - services - .rooms - .state_accessor - .get_member(room_id, user_id) - .map_ok(|memberevent| sync_events::v5::response::Hero { - user_id: user_id.into(), - name: memberevent.displayname, - avatar: memberevent.avatar_url, - }) - .ok() - }) - .take(5) - .collect() - .await; - - let name = match heroes.len().cmp(&(1_usize)) { - | Ordering::Greater => { - let firsts = heroes[1..] - .iter() - .map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string())) - .collect::>() - .join(", "); - - let last = heroes[0] - .name - .clone() - .unwrap_or_else(|| heroes[0].user_id.to_string()); - - Some(format!("{firsts} and {last}")) - }, - | Ordering::Equal => Some( - heroes[0] - .name - .clone() - .unwrap_or_else(|| heroes[0].user_id.to_string()), - ), - | Ordering::Less => None, - }; - - let heroes_avatar = if heroes.len() == 1 { - heroes[0].avatar.clone() - } else { - None - }; - - rooms.insert(room_id.clone(), sync_events::v5::response::Room { - name: services - .rooms - .state_accessor - .get_name(room_id) - .await - .ok() - .or(name), - avatar: match heroes_avatar { - | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), - | _ => match services.rooms.state_accessor.get_avatar(room_id).await { - | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), - | ruma::JsOption::Null => ruma::JsOption::Null, - | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - }, - }, - initial: Some(roomsince == &0), - is_dm: None, - invite_state, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services - .rooms - .user - .highlight_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services - .rooms - .user - .notification_count(sender_user, room_id) - .await - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - services - .rooms - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - invited_count: Some( - services - .rooms - .state_cache - .room_invited_count(room_id) - .await - .unwrap_or(0) - .try_into() - .unwrap_or_else(|_| uint!(0)), - ), - num_live: None, // Count events in timeline greater than global sync counter - bump_stamp: timestamp, - heroes: Some(heroes), - }); - } - Ok(rooms) -} -async fn collect_account_data( - services: &Services, - (sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request), -) -> sync_events::v5::response::AccountData { - let mut account_data = sync_events::v5::response::AccountData { - global: Vec::new(), - rooms: BTreeMap::new(), - }; - - if !body.extensions.account_data.enabled.unwrap_or(false) { - return sync_events::v5::response::AccountData::default(); - } - - account_data.global = services - .account_data - .changes_since(None, sender_user, globalsince, None) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) - .collect() - .await; - - if let Some(rooms) = &body.extensions.account_data.rooms { - for room in rooms { - account_data.rooms.insert( - room.clone(), - services - .account_data - .changes_since(Some(room), sender_user, globalsince, None) - .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) - .collect() - .await, - ); - } - } - - account_data -} - -async fn collect_e2ee<'a, Rooms>( - services: &Services, - (sender_user, sender_device, globalsince, body): ( - &UserId, - &DeviceId, - u64, - &sync_events::v5::Request, - ), - all_joined_rooms: Rooms, -) -> Result -where - Rooms: Iterator + Send + 'a, -{ - if !body.extensions.e2ee.enabled.unwrap_or(false) { - return Ok(sync_events::v5::response::E2EE::default()); - } - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_changes = HashSet::new(); - let mut device_list_left = HashSet::new(); - // Look for device list updates of this account - device_list_changes.extend( - services - .users - .keys_changed(sender_user, globalsince, None) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - for room_id in all_joined_rooms { - let Ok(current_shortstatehash) = - services.rooms.state.get_room_shortstatehash(room_id).await - else { - error!("Room {room_id} has no state"); - continue; - }; - - let since_shortstatehash = services - .rooms - .user - .get_token_shortstatehash(room_id, globalsince) - .await - .ok(); - - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .await - .is_ok(); - - if let Some(since_shortstatehash) = since_shortstatehash { - // Skip if there are only timeline changes - if since_shortstatehash == current_shortstatehash { - continue; - } - - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .await; - - let since_sender_member: Option = services - .rooms - .state_accessor - .state_get_content( - since_shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .ok() - .await; - - let joined_since_last_sync = since_sender_member - .as_ref() - .is_none_or(|member| member.membership != MembershipState::Join); - - let new_encrypted_room = encrypted_room && since_encryption.is_err(); - - if encrypted_room { - let current_state_ids: HashMap<_, OwnedEventId> = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .collect() - .await; - - let since_state_ids: HashMap<_, _> = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect() - .await; - - for (key, id) in current_state_ids { - if since_state_ids.get(&key) != Some(&id) { - let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else { - error!("Pdu in state not found: {id}"); - continue; - }; - if pdu.kind == TimelineEventType::RoomMember { - if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) - { - if user_id == sender_user { - continue; - } - - let content: RoomMemberEventContent = pdu.get_content()?; - match content.membership { - | MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room( - services, - sender_user, - user_id, - Some(room_id), - ) - .await - { - device_list_changes.insert(user_id.to_owned()); - } - }, - | MembershipState::Leave => { - // Write down users that have left encrypted rooms we - // are in - left_encrypted_users.insert(user_id.to_owned()); - }, - | _ => {}, - } - } - } - } - } - if joined_since_last_sync || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_changes.extend( - services - .rooms - .state_cache - .room_members(room_id) - // Don't send key updates from the sender to the sender - .ready_filter(|user_id| sender_user != *user_id) - // Only send keys if the sender doesn't share an encrypted room with the target - // already - .filter_map(|user_id| { - share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect::>() - .await, - ); - } - } - } - // Look for device list updates in this room - device_list_changes.extend( - services - .users - .room_keys_changed(room_id, globalsince, None) - .map(|(user_id, _)| user_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - } - - for user_id in left_encrypted_users { - let dont_share_encrypted_room = - !share_encrypted_room(services, sender_user, &user_id, None).await; - - // If the user doesn't share an encrypted room with the target anymore, we need - // to tell them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } - - Ok(sync_events::v5::response::E2EE { - device_unused_fallback_key_types: None, - - device_one_time_keys_count: services - .users - .count_one_time_keys(sender_user, sender_device) - .await, - - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - }) -} - -async fn collect_to_device( - services: &Services, - (sender_user, sender_device, globalsince, body): SyncInfo<'_>, - next_batch: u64, -) -> Option { - if !body.extensions.to_device.enabled.unwrap_or(false) { - return None; - } - - services - .users - .remove_to_device_events(sender_user, sender_device, globalsince) - .await; - - Some(sync_events::v5::response::ToDevice { - next_batch: next_batch.to_string(), - events: services - .users - .get_to_device_events(sender_user, sender_device, None, Some(next_batch)) - .collect() - .await, - }) -} - -async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts { - sync_events::v5::response::Receipts { rooms: BTreeMap::new() } - // TODO: get explicitly requested read receipts -} - -fn filter_rooms<'a, Rooms>( - services: &'a Services, - filter: &'a [RoomTypeFilter], - negate: &'a bool, - rooms: Rooms, -) -> impl Stream + Send + 'a -where - Rooms: Stream + Send + 'a, -{ - rooms.filter_map(async |room_id| { - let room_type = services.rooms.state_accessor.get_room_type(room_id).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if *negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(room_id) - }) -} diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs deleted file mode 100644 index caafe10d..00000000 --- a/src/api/client/tag.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::Result; -use ruma::{ - api::client::tag::{create_tag, delete_tag, get_tags}, - events::{ - RoomAccountDataEventType, - tag::{TagEvent, TagEventContent}, - }, -}; - -use crate::Ruma; - -/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` -/// -/// Adds a tag to the room. -/// -/// - Inserts the tag into the tag event of the room account data. -pub(crate) async fn update_tag_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut tags_event = services - .account_data - .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) - .await - .unwrap_or(TagEvent { - content: TagEventContent { tags: BTreeMap::new() }, - }); - - tags_event - .content - .tags - .insert(body.tag.clone().into(), body.tag_info.clone()); - - services - .account_data - .update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - ) - .await?; - - Ok(create_tag::v3::Response {}) -} - -/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` -/// -/// Deletes a tag from the room. -/// -/// - Removes the tag from the tag event of the room account data. -pub(crate) async fn delete_tag_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut tags_event = services - .account_data - .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) - .await - .unwrap_or(TagEvent { - content: TagEventContent { tags: BTreeMap::new() }, - }); - - tags_event.content.tags.remove(&body.tag.clone().into()); - - services - .account_data - .update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), - ) - .await?; - - Ok(delete_tag::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` -/// -/// Returns tags on the room. -/// -/// - Gets the tag event of the room account data. -pub(crate) async fn get_tags_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let tags_event = services - .account_data - .get_room(&body.room_id, sender_user, RoomAccountDataEventType::Tag) - .await - .unwrap_or(TagEvent { - content: TagEventContent { tags: BTreeMap::new() }, - }); - - Ok(get_tags::v3::Response { tags: tags_event.content.tags }) -} diff --git a/src/api/client/thirdparty.rs b/src/api/client/thirdparty.rs deleted file mode 100644 index 0713a882..00000000 --- a/src/api/client/thirdparty.rs +++ /dev/null @@ -1,26 +0,0 @@ -use std::collections::BTreeMap; - -use conduwuit::Result; -use ruma::api::client::thirdparty::get_protocols; - -use crate::{Ruma, RumaResponse}; - -/// # `GET /_matrix/client/r0/thirdparty/protocols` -/// -/// TODO: Fetches all metadata about protocols supported by the homeserver. -pub(crate) async fn get_protocols_route( - _body: Ruma, -) -> Result { - // TODO - Ok(get_protocols::v3::Response { protocols: BTreeMap::new() }) -} - -/// # `GET /_matrix/client/unstable/thirdparty/protocols` -/// -/// Same as `get_protocols_route`, except for some reason Element Android legacy -/// calls this -pub(crate) async fn get_protocols_route_unstable( - body: Ruma, -) -> Result> { - get_protocols_route(body).await.map(RumaResponse) -} diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs deleted file mode 100644 index 5b838bef..00000000 --- a/src/api/client/threads.rs +++ /dev/null @@ -1,62 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Result, at, - matrix::pdu::{PduCount, PduEvent}, -}; -use futures::StreamExt; -use ruma::{api::client::threads::get_threads, uint}; - -use crate::Ruma; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/threads` -pub(crate) async fn get_threads_route( - State(services): State, - ref body: Ruma, -) -> Result { - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .unwrap_or_else(|| uint!(10)) - .try_into() - .unwrap_or(10) - .min(100); - - let from: PduCount = body - .from - .as_deref() - .map(str::parse) - .transpose()? - .unwrap_or_else(PduCount::max); - - let threads: Vec<(PduCount, PduEvent)> = services - .rooms - .threads - .threads_until(body.sender_user(), &body.room_id, from, &body.include) - .await? - .take(limit) - .filter_map(|(count, pdu)| async move { - services - .rooms - .state_accessor - .user_can_see_event(body.sender_user(), &body.room_id, &pdu.event_id) - .await - .then_some((count, pdu)) - }) - .collect() - .await; - - Ok(get_threads::v1::Response { - next_batch: threads - .last() - .filter(|_| threads.len() >= limit) - .map(at!(0)) - .as_ref() - .map(ToString::to_string), - - chunk: threads - .into_iter() - .map(at!(1)) - .map(PduEvent::into_room_event) - .collect(), - }) -} diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs deleted file mode 100644 index 8ad9dc99..00000000 --- a/src/api/client/to_device.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{Error, Result}; -use conduwuit_service::sending::EduBuf; -use futures::StreamExt; -use ruma::{ - api::{ - client::{error::ErrorKind, to_device::send_event_to_device}, - federation::{self, transactions::edu::DirectDeviceContent}, - }, - to_device::DeviceIdOrAllDevices, -}; - -use crate::Ruma; - -/// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` -/// -/// Send a to-device event to a set of client devices. -pub(crate) async fn send_event_to_device_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_deref(); - - // Check if this is a new transaction id - if services - .transaction_ids - .existing_txnid(sender_user, sender_device, &body.txn_id) - .await - .is_ok() - { - return Ok(send_event_to_device::v3::Response {}); - } - - for (target_user_id, map) in &body.messages { - for (target_device_id_maybe, event) in map { - if !services.globals.user_is_local(target_user_id) { - let mut map = BTreeMap::new(); - map.insert(target_device_id_maybe.clone(), event.clone()); - let mut messages = BTreeMap::new(); - messages.insert(target_user_id.clone(), map); - let count = services.globals.next_count()?; - - let mut buf = EduBuf::new(); - serde_json::to_writer( - &mut buf, - &federation::transactions::edu::Edu::DirectToDevice(DirectDeviceContent { - sender: sender_user.clone(), - ev_type: body.event_type.clone(), - message_id: count.to_string().into(), - messages, - }), - ) - .expect("DirectToDevice EDU can be serialized"); - - services - .sending - .send_edu_server(target_user_id.server_name(), buf)?; - - continue; - } - - let event_type = &body.event_type.to_string(); - - let event = event - .deserialize_as() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?; - - match target_device_id_maybe { - | DeviceIdOrAllDevices::DeviceId(target_device_id) => { - services - .users - .add_to_device_event( - sender_user, - target_user_id, - target_device_id, - event_type, - event, - ) - .await; - }, - - | DeviceIdOrAllDevices::AllDevices => { - let (event_type, event) = (&event_type, &event); - services - .users - .all_device_ids(target_user_id) - .for_each(|target_device_id| { - services.users.add_to_device_event( - sender_user, - target_user_id, - target_device_id, - event_type, - event.clone(), - ) - }) - .await; - }, - } - } - } - - // Save transaction id with empty data - services - .transaction_ids - .add_txnid(sender_user, sender_device, &body.txn_id, &[]); - - Ok(send_event_to_device::v3::Response {}) -} diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs deleted file mode 100644 index 1d8d02fd..00000000 --- a/src/api/client/typing.rs +++ /dev/null @@ -1,75 +0,0 @@ -use axum::extract::State; -use conduwuit::{Err, Result, utils, utils::math::Tried}; -use ruma::api::client::typing::create_typing_event; - -use crate::Ruma; - -/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` -/// -/// Sets the typing state of the sender user. -pub(crate) async fn create_typing_event_route( - State(services): State, - body: Ruma, -) -> Result { - use create_typing_event::v3::Typing; - let sender_user = body.sender_user(); - - if sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot update typing status of other users."))); - } - - if !services - .rooms - .state_cache - .is_joined(sender_user, &body.room_id) - .await - { - return Err!(Request(Forbidden("You are not in this room."))); - } - - match body.state { - | Typing::Yes(duration) => { - let duration = utils::clamp( - duration.as_millis().try_into().unwrap_or(u64::MAX), - services - .server - .config - .typing_client_timeout_min_s - .try_mul(1000)?, - services - .server - .config - .typing_client_timeout_max_s - .try_mul(1000)?, - ); - services - .rooms - .typing - .typing_add( - sender_user, - &body.room_id, - utils::millis_since_unix_epoch() - .checked_add(duration) - .expect("user typing timeout should not get this high"), - ) - .await?; - }, - | _ => { - services - .rooms - .typing - .typing_remove(sender_user, &body.room_id) - .await?; - }, - } - - // ping presence - if services.config.allow_local_presence { - services - .presence - .ping_presence(&body.user_id, &ruma::presence::PresenceState::Online) - .await?; - } - - Ok(create_typing_event::v3::Response {}) -} diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs deleted file mode 100644 index e21eaf21..00000000 --- a/src/api/client/unstable.rs +++ /dev/null @@ -1,420 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Error, Result}; -use futures::StreamExt; -use ruma::{ - OwnedRoomId, - api::{ - client::{ - error::ErrorKind, - membership::mutual_rooms, - profile::{ - delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key, - set_profile_key, set_timezone_key, - }, - }, - federation, - }, - presence::PresenceState, -}; - -use super::{update_avatar_url, update_displayname}; -use crate::Ruma; - -/// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms` -/// -/// Gets all the rooms the sender shares with the specified user. -/// -/// TODO: Implement pagination, currently this just returns everything -/// -/// An implementation of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) -#[tracing::instrument(skip_all, fields(%client), name = "mutual_rooms")] -pub(crate) async fn get_mutual_rooms_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if sender_user == body.user_id { - return Err!(Request(Unknown("You cannot request rooms in common with yourself."))); - } - - if !services.users.exists(&body.user_id).await { - return Ok(mutual_rooms::unstable::Response { joined: vec![], next_batch_token: None }); - } - - let mutual_rooms: Vec = services - .rooms - .state_cache - .get_shared_rooms(sender_user, &body.user_id) - .map(ToOwned::to_owned) - .collect() - .await; - - Ok(mutual_rooms::unstable::Response { - joined: mutual_rooms, - next_batch_token: None, - }) -} - -/// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz` -/// -/// Deletes the `tz` (timezone) of a user, as per MSC4133 and MSC4175. -/// -/// - Also makes sure other users receive the update using presence EDUs -pub(crate) async fn delete_timezone_key_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if *sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot update the profile of another user"))); - } - - services.users.set_timezone(&body.user_id, None); - - if services.config.allow_local_presence { - // Presence update - services - .presence - .ping_presence(&body.user_id, &PresenceState::Online) - .await?; - } - - Ok(delete_timezone_key::unstable::Response {}) -} - -/// # `PUT /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz` -/// -/// Updates the `tz` (timezone) of a user, as per MSC4133 and MSC4175. -/// -/// - Also makes sure other users receive the update using presence EDUs -pub(crate) async fn set_timezone_key_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if *sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot update the profile of another user"))); - } - - services.users.set_timezone(&body.user_id, body.tz.clone()); - - if services.config.allow_local_presence { - // Presence update - services - .presence - .ping_presence(&body.user_id, &PresenceState::Online) - .await?; - } - - Ok(set_timezone_key::unstable::Response {}) -} - -/// # `PUT /_matrix/client/unstable/uk.tcpip.msc4133/profile/{user_id}/{field}` -/// -/// Updates the profile key-value field of a user, as per MSC4133. -/// -/// This also handles the avatar_url and displayname being updated. -pub(crate) async fn set_profile_key_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if *sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot update the profile of another user"))); - } - - if body.kv_pair.is_empty() { - return Err!(Request(BadJson( - "The key-value pair JSON body is empty. Use DELETE to delete a key" - ))); - } - - if body.kv_pair.len() > 1 { - // TODO: support PATCH or "recursively" adding keys in some sort - return Err!(Request(BadJson( - "This endpoint can only take one key-value pair at a time" - ))); - } - - let Some(profile_key_value) = body.kv_pair.get(&body.key_name) else { - return Err!(Request(BadJson( - "The key does not match the URL field key, or JSON body is empty (use DELETE)" - ))); - }; - - if body - .kv_pair - .keys() - .any(|key| key.starts_with("u.") && !profile_key_value.is_string()) - { - return Err!(Request(BadJson("u.* profile key fields must be strings"))); - } - - if body.kv_pair.keys().any(|key| key.len() > 128) { - return Err!(Request(BadJson("Key names cannot be longer than 128 bytes"))); - } - - if body.key_name == "displayname" { - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(&body.user_id) - .map(Into::into) - .collect() - .await; - - update_displayname( - &services, - &body.user_id, - Some(profile_key_value.to_string()), - &all_joined_rooms, - ) - .await; - } else if body.key_name == "avatar_url" { - let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(&body.user_id) - .map(Into::into) - .collect() - .await; - - update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await; - } else { - services.users.set_profile_key( - &body.user_id, - &body.key_name, - Some(profile_key_value.clone()), - ); - } - - if services.config.allow_local_presence { - // Presence update - services - .presence - .ping_presence(&body.user_id, &PresenceState::Online) - .await?; - } - - Ok(set_profile_key::unstable::Response {}) -} - -/// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/{user_id}/{field}` -/// -/// Deletes the profile key-value field of a user, as per MSC4133. -/// -/// This also handles the avatar_url and displayname being updated. -pub(crate) async fn delete_profile_key_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if *sender_user != body.user_id && body.appservice_info.is_none() { - return Err!(Request(Forbidden("You cannot update the profile of another user"))); - } - - if body.kv_pair.len() > 1 { - // TODO: support PATCH or "recursively" adding keys in some sort - return Err!(Request(BadJson( - "This endpoint can only take one key-value pair at a time" - ))); - } - - if body.key_name == "displayname" { - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(&body.user_id) - .map(Into::into) - .collect() - .await; - - update_displayname(&services, &body.user_id, None, &all_joined_rooms).await; - } else if body.key_name == "avatar_url" { - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(&body.user_id) - .map(Into::into) - .collect() - .await; - - update_avatar_url(&services, &body.user_id, None, None, &all_joined_rooms).await; - } else { - services - .users - .set_profile_key(&body.user_id, &body.key_name, None); - } - - if services.config.allow_local_presence { - // Presence update - services - .presence - .ping_presence(&body.user_id, &PresenceState::Online) - .await?; - } - - Ok(delete_profile_key::unstable::Response {}) -} - -/// # `GET /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz` -/// -/// Returns the `timezone` of the user as per MSC4133 and MSC4175. -/// -/// - If user is on another server and we do not have a local copy already fetch -/// `timezone` over federation -pub(crate) async fn get_timezone_key_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.globals.user_is_local(&body.user_id) { - // Create and update our local copy of the user - if let Ok(response) = services - .sending - .send_federation_request( - body.user_id.server_name(), - federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), - field: None, // we want the full user's profile to update locally as well - }, - ) - .await - { - if !services.users.exists(&body.user_id).await { - services.users.create(&body.user_id, None)?; - } - - services - .users - .set_displayname(&body.user_id, response.displayname.clone()); - - services - .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()); - - services - .users - .set_blurhash(&body.user_id, response.blurhash.clone()); - - services - .users - .set_timezone(&body.user_id, response.tz.clone()); - - return Ok(get_timezone_key::unstable::Response { tz: response.tz }); - } - } - - if !services.users.exists(&body.user_id).await { - // Return 404 if this user doesn't exist and we couldn't fetch it over - // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); - } - - Ok(get_timezone_key::unstable::Response { - tz: services.users.timezone(&body.user_id).await.ok(), - }) -} - -/// # `GET /_matrix/client/unstable/uk.tcpip.msc4133/profile/{userId}/{field}}` -/// -/// Gets the profile key-value field of a user, as per MSC4133. -/// -/// - If user is on another server and we do not have a local copy already fetch -/// `timezone` over federation -pub(crate) async fn get_profile_key_route( - State(services): State, - body: Ruma, -) -> Result { - let mut profile_key_value: BTreeMap = BTreeMap::new(); - - if !services.globals.user_is_local(&body.user_id) { - // Create and update our local copy of the user - if let Ok(response) = services - .sending - .send_federation_request( - body.user_id.server_name(), - federation::query::get_profile_information::v1::Request { - user_id: body.user_id.clone(), - field: None, // we want the full user's profile to update locally as well - }, - ) - .await - { - if !services.users.exists(&body.user_id).await { - services.users.create(&body.user_id, None)?; - } - - services - .users - .set_displayname(&body.user_id, response.displayname.clone()); - - services - .users - .set_avatar_url(&body.user_id, response.avatar_url.clone()); - - services - .users - .set_blurhash(&body.user_id, response.blurhash.clone()); - - services - .users - .set_timezone(&body.user_id, response.tz.clone()); - - match response.custom_profile_fields.get(&body.key_name) { - | Some(value) => { - profile_key_value.insert(body.key_name.clone(), value.clone()); - services.users.set_profile_key( - &body.user_id, - &body.key_name, - Some(value.clone()), - ); - }, - | _ => { - return Err!(Request(NotFound("The requested profile key does not exist."))); - }, - } - - if profile_key_value.is_empty() { - return Err!(Request(NotFound("The requested profile key does not exist."))); - } - - return Ok(get_profile_key::unstable::Response { value: profile_key_value }); - } - } - - if !services.users.exists(&body.user_id).await { - // Return 404 if this user doesn't exist and we couldn't fetch it over - // federation - return Err!(Request(NotFound("Profile was not found."))); - } - - match services - .users - .profile_key(&body.user_id, &body.key_name) - .await - { - | Ok(value) => { - profile_key_value.insert(body.key_name.clone(), value); - }, - | _ => { - return Err!(Request(NotFound("The requested profile key does not exist."))); - }, - } - - if profile_key_value.is_empty() { - return Err!(Request(NotFound("The requested profile key does not exist."))); - } - - Ok(get_profile_key::unstable::Response { value: profile_key_value }) -} diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs deleted file mode 100644 index 232d5b28..00000000 --- a/src/api/client/unversioned.rs +++ /dev/null @@ -1,87 +0,0 @@ -use std::collections::BTreeMap; - -use axum::{Json, extract::State, response::IntoResponse}; -use conduwuit::Result; -use futures::StreamExt; -use ruma::api::client::discovery::get_supported_versions; - -use crate::Ruma; - -/// # `GET /_matrix/client/versions` -/// -/// Get the versions of the specification and unstable features supported by -/// this server. -/// -/// - Versions take the form MAJOR.MINOR.PATCH -/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value -/// - Unstable features are namespaced and may include version information in -/// their name -/// -/// Note: Unstable features are used while developing new features. Clients -/// should avoid using unstable features in their stable releases -pub(crate) async fn get_supported_versions_route( - _body: Ruma, -) -> Result { - let resp = get_supported_versions::Response { - versions: vec![ - "r0.0.1".to_owned(), - "r0.1.0".to_owned(), - "r0.2.0".to_owned(), - "r0.3.0".to_owned(), - "r0.4.0".to_owned(), - "r0.5.0".to_owned(), - "r0.6.0".to_owned(), - "r0.6.1".to_owned(), - "v1.1".to_owned(), - "v1.2".to_owned(), - "v1.3".to_owned(), - "v1.4".to_owned(), - "v1.5".to_owned(), - "v1.11".to_owned(), - ], - unstable_features: BTreeMap::from_iter([ - ("org.matrix.e2e_cross_signing".to_owned(), true), - ("org.matrix.msc2285.stable".to_owned(), true), /* private read receipts (https://github.com/matrix-org/matrix-spec-proposals/pull/2285) */ - ("uk.half-shot.msc2666.query_mutual_rooms".to_owned(), true), /* query mutual rooms (https://github.com/matrix-org/matrix-spec-proposals/pull/2666) */ - ("org.matrix.msc2836".to_owned(), true), /* threading/threads (https://github.com/matrix-org/matrix-spec-proposals/pull/2836) */ - ("org.matrix.msc2946".to_owned(), true), /* spaces/hierarchy summaries (https://github.com/matrix-org/matrix-spec-proposals/pull/2946) */ - ("org.matrix.msc3026.busy_presence".to_owned(), true), /* busy presence status (https://github.com/matrix-org/matrix-spec-proposals/pull/3026) */ - ("org.matrix.msc3827".to_owned(), true), /* filtering of /publicRooms by room type (https://github.com/matrix-org/matrix-spec-proposals/pull/3827) */ - ("org.matrix.msc3952_intentional_mentions".to_owned(), true), /* intentional mentions (https://github.com/matrix-org/matrix-spec-proposals/pull/3952) */ - ("org.matrix.msc3575".to_owned(), true), /* sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/3575/files#r1588877046) */ - ("org.matrix.msc3916.stable".to_owned(), true), /* authenticated media (https://github.com/matrix-org/matrix-spec-proposals/pull/3916) */ - ("org.matrix.msc4180".to_owned(), true), /* stable flag for 3916 (https://github.com/matrix-org/matrix-spec-proposals/pull/4180) */ - ("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */ - ("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */ - ("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */ - ]), - }; - - Ok(resp) -} - -/// # `GET /_conduwuit/server_version` -/// -/// Conduwuit-specific API to get the server version, results akin to -/// `/_matrix/federation/v1/version` -pub(crate) async fn conduwuit_server_version() -> Result { - Ok(Json(serde_json::json!({ - "name": conduwuit::version::name(), - "version": conduwuit::version::version(), - }))) -} - -/// # `GET /_conduwuit/local_user_count` -/// -/// conduwuit-specific API to return the amount of users registered on this -/// homeserver. Endpoint is disabled if federation is disabled for privacy. This -/// only includes active users (not deactivated, no guests, etc) -pub(crate) async fn conduwuit_local_user_count( - State(services): State, -) -> Result { - let user_count = services.users.list_local_users().count().await; - - Ok(Json(serde_json::json!({ - "count": user_count - }))) -} diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs deleted file mode 100644 index 748fc049..00000000 --- a/src/api/client/user_directory.rs +++ /dev/null @@ -1,88 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Result, - utils::{ - future::BoolExt, - stream::{BroadbandExt, ReadyExt}, - }, -}; -use futures::{FutureExt, StreamExt, pin_mut}; -use ruma::{ - api::client::user_directory::search_users::{self}, - events::room::join_rules::JoinRule, -}; - -use crate::Ruma; - -// conduwuit can handle a lot more results than synapse -const LIMIT_MAX: usize = 500; -const LIMIT_DEFAULT: usize = 10; - -/// # `POST /_matrix/client/r0/user_directory/search` -/// -/// Searches all known users for a match. -/// -/// - Hides any local users that aren't in any public rooms (i.e. those that -/// have the join rule set to public) and don't share a room with the sender -pub(crate) async fn search_users_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let limit = usize::try_from(body.limit) - .map_or(LIMIT_DEFAULT, usize::from) - .min(LIMIT_MAX); - - let search_term = body.search_term.to_lowercase(); - let mut users = services - .users - .stream() - .ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term)) - .map(ToOwned::to_owned) - .broad_filter_map(async |user_id| { - let display_name = services.users.displayname(&user_id).await.ok(); - - let display_name_matches = display_name - .as_deref() - .map(str::to_lowercase) - .is_some_and(|display_name| display_name.contains(&search_term)); - - if !display_name_matches { - return None; - } - - let user_in_public_room = services - .rooms - .state_cache - .rooms_joined(&user_id) - .map(ToOwned::to_owned) - .broad_any(async |room_id| { - services - .rooms - .state_accessor - .get_join_rules(&room_id) - .map(|rule| matches!(rule, JoinRule::Public)) - .await - }); - - let user_sees_user = services - .rooms - .state_cache - .user_sees_user(sender_user, &user_id); - - pin_mut!(user_in_public_room, user_sees_user); - user_in_public_room - .or(user_sees_user) - .await - .then_some(search_users::v3::User { - user_id: user_id.clone(), - display_name, - avatar_url: services.users.avatar_url(&user_id).await.ok(), - }) - }); - - let results = users.by_ref().take(limit).collect().await; - let limited = users.next().await.is_some(); - - Ok(search_users::v3::Response { results, limited }) -} diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs deleted file mode 100644 index 91991d24..00000000 --- a/src/api/client/voip.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::time::{Duration, SystemTime}; - -use axum::extract::State; -use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{Err, Result, utils}; -use hmac::{Hmac, Mac}; -use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; -use sha1::Sha1; - -use crate::Ruma; - -const RANDOM_USER_ID_LENGTH: usize = 10; - -type HmacSha1 = Hmac; - -/// # `GET /_matrix/client/r0/voip/turnServer` -/// -/// TODO: Returns information about the recommended turn server. -pub(crate) async fn turn_server_route( - State(services): State, - body: Ruma, -) -> Result { - // MSC4166: return M_NOT_FOUND 404 if no TURN URIs are specified in any way - if services.server.config.turn_uris.is_empty() { - return Err!(Request(NotFound("Not Found"))); - } - - let turn_secret = services.globals.turn_secret.clone(); - - let (username, password) = if !turn_secret.is_empty() { - let expiry = SecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(services.globals.turn_ttl())) - .expect("TURN TTL should not get this high"), - ) - .expect("time is valid"); - - let user = body.sender_user.unwrap_or_else(|| { - UserId::parse_with_server_name( - utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - &services.server.name, - ) - .unwrap() - }); - - let username: String = format!("{}:{}", expiry.get(), user); - - let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes()) - .expect("HMAC can take key of any size"); - mac.update(username.as_bytes()); - - let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes()); - - (username, password) - } else { - ( - services.globals.turn_username().clone(), - services.globals.turn_password().clone(), - ) - }; - - Ok(get_turn_server_info::v3::Response { - username, - password, - uris: services.globals.turn_uris().to_vec(), - ttl: Duration::from_secs(services.globals.turn_ttl()), - }) -} diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs deleted file mode 100644 index eedab981..00000000 --- a/src/api/client/well_known.rs +++ /dev/null @@ -1,99 +0,0 @@ -use axum::{Json, extract::State, response::IntoResponse}; -use conduwuit::{Error, Result}; -use ruma::api::client::{ - discovery::{ - discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, - discover_support::{self, Contact}, - }, - error::ErrorKind, -}; - -use crate::Ruma; - -/// # `GET /.well-known/matrix/client` -/// -/// Returns the .well-known URL if it is configured, otherwise returns 404. -pub(crate) async fn well_known_client( - State(services): State, - _body: Ruma, -) -> Result { - let client_url = match services.server.config.well_known.client.as_ref() { - | Some(url) => url.to_string(), - | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }; - - Ok(discover_homeserver::Response { - homeserver: HomeserverInfo { base_url: client_url.clone() }, - identity_server: None, - sliding_sync_proxy: Some(SlidingSyncProxyInfo { url: client_url }), - tile_server: None, - }) -} - -/// # `GET /.well-known/matrix/support` -/// -/// Server support contact and support page of a homeserver's domain. -pub(crate) async fn well_known_support( - State(services): State, - _body: Ruma, -) -> Result { - let support_page = services - .server - .config - .well_known - .support_page - .as_ref() - .map(ToString::to_string); - - let role = services.server.config.well_known.support_role.clone(); - - // support page or role must be either defined for this to be valid - if support_page.is_none() && role.is_none() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - let email_address = services.server.config.well_known.support_email.clone(); - let matrix_id = services.server.config.well_known.support_mxid.clone(); - - // if a role is specified, an email address or matrix id is required - if role.is_some() && (email_address.is_none() && matrix_id.is_none()) { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - // TOOD: support defining multiple contacts in the config - let mut contacts: Vec = vec![]; - - if let Some(role) = role { - let contact = Contact { role, email_address, matrix_id }; - - contacts.push(contact); - } - - // support page or role+contacts must be either defined for this to be valid - if contacts.is_empty() && support_page.is_none() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - Ok(discover_support::Response { contacts, support_page }) -} - -/// # `GET /client/server.json` -/// -/// Endpoint provided by sliding sync proxy used by some clients such as Element -/// Web as a non-standard health check. -pub(crate) async fn syncv3_client_server_json( - State(services): State, -) -> Result { - let server_url = match services.server.config.well_known.client.as_ref() { - | Some(url) => url.to_string(), - | None => match services.server.config.well_known.server.as_ref() { - | Some(url) => url.to_string(), - | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }, - }; - - Ok(Json(serde_json::json!({ - "server": server_url, - "version": conduwuit::version(), - }))) -} diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs new file mode 100644 index 00000000..f74cdaa7 --- /dev/null +++ b/src/api/client_server/account.rs @@ -0,0 +1,585 @@ +use register::RegistrationKind; +use ruma::{ + api::client::{ + account::{ + change_password, deactivate, get_3pids, get_username_availability, + register::{self, LoginType}, + request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, + ThirdPartyIdRemovalStatus, + }, + error::ErrorKind, + uiaa::{AuthFlow, AuthType, UiaaInfo}, + }, + events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, + push, UserId, +}; +use tracing::{error, info, warn}; + +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use crate::{ + api::client_server::{self, join_room_by_id_helper}, + service, services, utils, Error, Result, Ruma, +}; + +const RANDOM_USER_ID_LENGTH: usize = 10; + +/// # `GET /_matrix/client/v3/register/available` +/// +/// Checks if a username is valid and available on this server. +/// +/// Conditions for returning true: +/// - The user id is not historical +/// - The server name of the user id matches this server +/// - No user or appservice on this server already claimed this username +/// +/// Note: This will not reserve the username, so the username might become +/// invalid when trying to register +pub async fn get_register_available_route( + body: Ruma, +) -> Result { + // Validate user id + let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), services().globals.server_name()) + .ok() + .filter(|user_id| !user_id.is_historical() && user_id.server_name() == services().globals.server_name()) + .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + + // Check if username is creative enough + if services().users.exists(&user_id)? { + return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); + } + + if services() + .globals + .forbidden_usernames() + .is_match(user_id.localpart()) + { + return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + } + + // TODO add check for appservice namespaces + + // If no if check is true we have an username that's available to be used. + Ok(get_username_availability::v3::Response { + available: true, + }) +} + +/// # `POST /_matrix/client/v3/register` +/// +/// Register an account on this homeserver. +/// +/// You can use [`GET +/// /_matrix/client/v3/register/available`](fn.get_register_available_route. +/// html) to check if the user id is valid and available. +/// +/// - Only works if registration is enabled +/// - If type is guest: ignores all parameters except +/// initial_device_display_name +/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage) +/// - If type is not guest and no username is given: Always fails after UIAA +/// check +/// - Creates a new account and populates it with default account data +/// - If `inhibit_login` is false: Creates a device and returns device id and +/// access_token +#[allow(clippy::doc_markdown)] +pub async fn register_route(body: Ruma) -> Result { + if !services().globals.allow_registration() && body.appservice_info.is_none() { + info!( + "Registration disabled and request not from known appservice, rejecting registration attempt for username \ + {:?}", + body.username + ); + return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled.")); + } + + let is_guest = body.kind == RegistrationKind::Guest; + + if is_guest + && (!services().globals.allow_guest_registration() + || (services().globals.allow_registration() && services().globals.config.registration_token.is_some())) + { + info!( + "Guest registration disabled / registration enabled with token configured, rejecting guest registration, \ + initial device name: {:?}", + body.initial_device_display_name + ); + return Err(Error::BadRequest( + ErrorKind::GuestAccessForbidden, + "Guest registration is disabled.", + )); + } + + // forbid guests from registering if there is not a real admin user yet. give + // generic user error. + if is_guest && services().users.count()? < 2 { + warn!( + "Guest account attempted to register before a real admin user has been registered, rejecting \ + registration. Guest's initial device name: {:?}", + body.initial_device_display_name + ); + return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration temporarily disabled.")); + } + + let user_id = match (&body.username, is_guest) { + (Some(username), false) => { + let proposed_user_id = + UserId::parse_with_server_name(username.to_lowercase(), services().globals.server_name()) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == services().globals.server_name() + }) + .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + + if services().users.exists(&proposed_user_id)? { + return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); + } + + if services() + .globals + .forbidden_usernames() + .is_match(proposed_user_id.localpart()) + { + return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + } + + proposed_user_id + }, + _ => loop { + let proposed_user_id = UserId::parse_with_server_name( + utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), + services().globals.server_name(), + ) + .unwrap(); + if !services().users.exists(&proposed_user_id)? { + break proposed_user_id; + } + }, + }; + + if body.body.login_type == Some(LoginType::ApplicationService) { + if let Some(ref info) = body.appservice_info { + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); + } + } else { + return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token.")); + } + } else if services().appservice.is_exclusive_user_id(&user_id).await { + return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); + } + + // UIAA + let mut uiaainfo; + let skip_auth; + if services().globals.config.registration_token.is_some() { + // Registration token required + uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::RegistrationToken], + }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + skip_auth = body.appservice_info.is_some(); + } else { + // No registration token necessary, but clients must still go through the flow + uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Dummy], + }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + skip_auth = body.appservice_info.is_some() || is_guest; + } + + if !skip_auth { + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services().uiaa.try_auth( + &UserId::parse_with_server_name("", services().globals.server_name()).expect("we know this is valid"), + "".into(), + auth, + &uiaainfo, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services().uiaa.create( + &UserId::parse_with_server_name("", services().globals.server_name()).expect("we know this is valid"), + "".into(), + &uiaainfo, + &json, + )?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + } + + let password = if is_guest { + None + } else { + body.password.as_deref() + }; + + // Create user + services().users.create(&user_id, password)?; + + // Default to pretty displayname + let mut displayname = user_id.localpart().to_owned(); + + // If `new_user_displayname_suffix` is set, registration will push whatever + // content is set to the user's display name with a space before it + if !services().globals.new_user_displayname_suffix().is_empty() { + displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix())); + } + + services() + .users + .set_displayname(&user_id, Some(displayname.clone())) + .await?; + + // Initial account data + services().account_data.update( + None, + &user_id, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: push::Ruleset::server_default(&user_id), + }, + }) + .expect("to json always works"), + )?; + + // Inhibit login does not work for guests + if !is_guest && body.inhibit_login { + return Ok(register::v3::Response { + access_token: None, + user_id, + device_id: None, + refresh_token: None, + expires_in: None, + }); + } + + // Generate new device id if the user didn't specify one + let device_id = if is_guest { + None + } else { + body.device_id.clone() + } + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); + + // Generate new token for the device + let token = utils::random_string(TOKEN_LENGTH); + + // Create device for this account + services() + .users + .create_device(&user_id, &device_id, &token, body.initial_device_display_name.clone())?; + + info!("New user \"{}\" registered on this server.", user_id); + + // log in conduit admin channel if a non-guest user registered + if body.appservice_info.is_none() && !is_guest { + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "New user \"{user_id}\" registered on this server." + ))); + } + + // log in conduit admin channel if a guest registered + if body.appservice_info.is_none() && is_guest && services().globals.log_guest_registrations() { + if let Some(device_display_name) = &body.initial_device_display_name { + if body + .initial_device_display_name + .as_ref() + .is_some_and(|device_display_name| !device_display_name.is_empty()) + { + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "Guest user \"{user_id}\" with device display name `{device_display_name}` registered on this \ + server." + ))); + } else { + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "Guest user \"{user_id}\" with no device display name registered on this server.", + ))); + } + } else { + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "Guest user \"{user_id}\" with no device display name registered on this server.", + ))); + } + } + + // If this is the first real user, grant them admin privileges except for guest + // users Note: the server user, @conduit:servername, is generated first + if !is_guest { + if let Some(admin_room) = service::admin::Service::get_admin_room()? { + if services() + .rooms + .state_cache + .room_joined_count(&admin_room)? + == Some(1) + { + services() + .admin + .make_user_admin(&user_id, displayname) + .await?; + + warn!("Granting {} admin privileges as the first user", user_id); + } + } + } + + if body.appservice_info.is_none() + && !services().globals.config.auto_join_rooms.is_empty() + && (services().globals.allow_guests_auto_join_rooms() || !is_guest) + { + for room in &services().globals.config.auto_join_rooms { + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room)? + { + warn!("Skipping room {room} to automatically join as we have never joined before."); + continue; + } + + if let Some(room_id_server_name) = room.server_name() { + if let Err(e) = join_room_by_id_helper( + Some(&user_id), + room, + Some("Automatically joining this room upon registration".to_owned()), + &[room_id_server_name.to_owned(), services().globals.server_name().to_owned()], + None, + ) + .await + { + // don't return this error so we don't fail registrations + error!("Failed to automatically join room {room} for user {user_id}: {e}"); + } else { + info!("Automatically joined room {room} for user {user_id}"); + }; + } + } + } + + Ok(register::v3::Response { + access_token: Some(token), + user_id, + device_id: Some(device_id), + refresh_token: None, + expires_in: None, + }) +} + +/// # `POST /_matrix/client/r0/account/password` +/// +/// Changes the password of this account. +/// +/// - Requires UIAA to verify user password +/// - Changes the password of the sender user +/// - The password hash is calculated using argon2 with 32 character salt, the +/// plain password is +/// not saved +/// +/// If logout_devices is true it does the following for each device except the +/// sender device: +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, +/// last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates +pub async fn change_password_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Password], + }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services() + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + services() + .users + .set_password(sender_user, Some(&body.new_password))?; + + if body.logout_devices { + // Logout all devices except the current one + for id in services() + .users + .all_device_ids(sender_user) + .filter_map(Result::ok) + .filter(|id| id != sender_device) + { + services().users.remove_device(sender_user, &id)?; + } + } + + info!("User {} changed their password.", sender_user); + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {sender_user} changed their password." + ))); + + Ok(change_password::v3::Response {}) +} + +/// # `GET _matrix/client/r0/account/whoami` +/// +/// Get `user_id` of the sender user. +/// +/// Note: Also works for Application Services +pub async fn whoami_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let device_id = body.sender_device.clone(); + + Ok(whoami::v3::Response { + user_id: sender_user.clone(), + device_id, + is_guest: services().users.is_deactivated(sender_user)? && body.appservice_info.is_none(), + }) +} + +/// # `POST /_matrix/client/r0/account/deactivate` +/// +/// Deactivate sender user account. +/// +/// - Leaves all rooms and rejects all invitations +/// - Invalidates all access tokens +/// - Deletes all device metadata (device id, device display name, last seen ip, +/// last seen ts) +/// - Forgets all to-device events +/// - Triggers device list updates +/// - Removes ability to log in again +pub async fn deactivate_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Password], + }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services() + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + // Make the user leave all rooms before deactivation + client_server::leave_all_rooms(sender_user).await?; + + // Remove devices and mark account as deactivated + services().users.deactivate_account(sender_user)?; + + info!("User {} deactivated their account.", sender_user); + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {sender_user} deactivated their account." + ))); + + Ok(deactivate::v3::Response { + id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, + }) +} + +/// # `GET _matrix/client/v3/account/3pid` +/// +/// Get a list of third party identifiers associated with this account. +/// +/// - Currently always returns empty list +pub async fn third_party_route(body: Ruma) -> Result { + let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + Ok(get_3pids::v3::Response::new(Vec::new())) +} + +/// # `POST /_matrix/client/v3/account/3pid/email/requestToken` +/// +/// "This API should be used to request validation tokens when adding an email +/// address to an account" +/// +/// - 403 signals that The homeserver does not allow the third party identifier +/// as a contact option. +pub async fn request_3pid_management_token_via_email_route( + _body: Ruma, +) -> Result { + Err(Error::BadRequest( + ErrorKind::ThreepidDenied, + "Third party identifier is not allowed", + )) +} + +/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` +/// +/// "This API should be used to request validation tokens when adding an phone +/// number to an account" +/// +/// - 403 signals that The homeserver does not allow the third party identifier +/// as a contact option. +pub async fn request_3pid_management_token_via_msisdn_route( + _body: Ruma, +) -> Result { + Err(Error::BadRequest( + ErrorKind::ThreepidDenied, + "Third party identifier is not allowed", + )) +} diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs new file mode 100644 index 00000000..3fafee72 --- /dev/null +++ b/src/api/client_server/alias.rs @@ -0,0 +1,236 @@ +use rand::seq::SliceRandom; +use ruma::{ + api::{ + appservice, + client::{ + alias::{create_alias, delete_alias, get_alias}, + error::ErrorKind, + }, + federation, + }, + OwnedRoomAliasId, OwnedServerName, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `PUT /_matrix/client/v3/directory/room/{roomAlias}` +/// +/// Creates a new room alias on this server. +pub async fn create_alias_route(body: Ruma) -> Result { + if body.room_alias.server_name() != services().globals.server_name() { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Alias is from another server.")); + } + + if services() + .globals + .forbidden_alias_names() + .is_match(body.room_alias.alias()) + { + return Err(Error::BadRequest(ErrorKind::Unknown, "Room alias is forbidden.")); + } + + if let Some(ref info) = body.appservice_info { + if !info.aliases.is_match(body.room_alias.as_str()) { + return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace.")); + } + } else if services() + .appservice + .is_exclusive_alias(&body.room_alias) + .await + { + return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias reserved by appservice.")); + } + + if services() + .rooms + .alias + .resolve_local_alias(&body.room_alias)? + .is_some() + { + return Err(Error::Conflict("Alias already exists.")); + } + + if services() + .rooms + .alias + .set_alias(&body.room_alias, &body.room_id) + .is_err() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid room alias. Alias must be in the form of '#localpart:server_name'", + )); + }; + + Ok(create_alias::v3::Response::new()) +} + +/// # `DELETE /_matrix/client/v3/directory/room/{roomAlias}` +/// +/// Deletes a room alias from this server. +/// +/// - TODO: additional access control checks +/// - TODO: Update canonical alias event +pub async fn delete_alias_route(body: Ruma) -> Result { + if body.room_alias.server_name() != services().globals.server_name() { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Alias is from another server.")); + } + + if services() + .rooms + .alias + .resolve_local_alias(&body.room_alias)? + .is_none() + { + return Err(Error::BadRequest(ErrorKind::NotFound, "Alias does not exist.")); + } + + if let Some(ref info) = body.appservice_info { + if !info.aliases.is_match(body.room_alias.as_str()) { + return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace.")); + } + } else if services() + .appservice + .is_exclusive_alias(&body.room_alias) + .await + { + return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias reserved by appservice.")); + } + + if services() + .rooms + .alias + .remove_alias(&body.room_alias) + .is_err() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid room alias. Alias must be in the form of '#localpart:server_name'", + )); + }; + + // TODO: update alt_aliases? + + Ok(delete_alias::v3::Response::new()) +} + +/// # `GET /_matrix/client/v3/directory/room/{roomAlias}` +/// +/// Resolve an alias locally or over federation. +pub async fn get_alias_route(body: Ruma) -> Result { + get_alias_helper(body.body.room_alias).await +} + +pub(crate) async fn get_alias_helper(room_alias: OwnedRoomAliasId) -> Result { + if room_alias.server_name() != services().globals.server_name() { + let response = services() + .sending + .send_federation_request( + room_alias.server_name(), + federation::query::get_room_information::v1::Request { + room_alias: room_alias.clone(), + }, + ) + .await?; + + let room_id = response.room_id; + + let mut servers = response.servers; + + // since the room alias server_name responded, insert it into the list + servers.push(room_alias.server_name().into()); + + // find active servers in room state cache to suggest + servers.extend( + services() + .rooms + .state_cache + .room_servers(&room_id) + .filter_map(Result::ok), + ); + + servers.sort_unstable(); + servers.dedup(); + + // shuffle list of servers randomly after sort and dedupe + servers.shuffle(&mut rand::thread_rng()); + + // prefer the very first server to be ourselves if available, else prefer the + // room alias server first + if let Some(server_index) = servers + .iter() + .position(|server| server == services().globals.server_name()) + { + servers.remove(server_index); + servers.insert(0, services().globals.server_name().to_owned()); + } else if let Some(alias_server_index) = servers + .iter() + .position(|server| server == room_alias.server_name()) + { + servers.remove(alias_server_index); + servers.insert(0, room_alias.server_name().into()); + } + + return Ok(get_alias::v3::Response::new(room_id, servers)); + } + + let mut room_id = None; + match services().rooms.alias.resolve_local_alias(&room_alias)? { + Some(r) => room_id = Some(r), + None => { + for appservice in services().appservice.read().await.values() { + if appservice.aliases.is_match(room_alias.as_str()) + && matches!( + services() + .sending + .send_appservice_request( + appservice.registration.clone(), + appservice::query::query_room_alias::v1::Request { + room_alias: room_alias.clone(), + }, + ) + .await, + Ok(Some(_opt_result)) + ) { + room_id = Some( + services() + .rooms + .alias + .resolve_local_alias(&room_alias)? + .ok_or_else(|| Error::bad_config("Room does not exist."))?, + ); + break; + } + } + }, + }; + + let Some(room_id) = room_id else { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room with alias not found.")); + }; + + // find active servers in room state cache to suggest + let mut servers: Vec = services() + .rooms + .state_cache + .room_servers(&room_id) + .filter_map(Result::ok) + .collect(); + + servers.sort_unstable(); + servers.dedup(); + + // shuffle list of servers randomly after sort and dedupe + servers.shuffle(&mut rand::thread_rng()); + + // insert our server as the very first choice if in list + if let Some(server_index) = servers + .iter() + .position(|server| server == services().globals.server_name()) + { + servers.remove(server_index); + servers.insert(0, services().globals.server_name().to_owned()); + } + + Ok(get_alias::v3::Response::new(room_id, servers)) +} diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs new file mode 100644 index 00000000..517a2f60 --- /dev/null +++ b/src/api/client_server/backup.rs @@ -0,0 +1,348 @@ +use ruma::api::client::{ + backup::{ + add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, + delete_backup_keys, delete_backup_keys_for_room, delete_backup_keys_for_session, delete_backup_version, + get_backup_info, get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session, + get_latest_backup_info, update_backup_version, + }, + error::ErrorKind, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `POST /_matrix/client/r0/room_keys/version` +/// +/// Creates a new backup. +pub async fn create_backup_version_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let version = services() + .key_backups + .create_backup(sender_user, &body.algorithm)?; + + Ok(create_backup_version::v3::Response { + version, + }) +} + +/// # `PUT /_matrix/client/r0/room_keys/version/{version}` +/// +/// Update information about an existing backup. Only `auth_data` can be +/// modified. +pub async fn update_backup_version_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + services() + .key_backups + .update_backup(sender_user, &body.version, &body.algorithm)?; + + Ok(update_backup_version::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/room_keys/version` +/// +/// Get information about the latest backup version. +pub async fn get_latest_backup_info_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let (version, algorithm) = services() + .key_backups + .get_latest_backup(sender_user)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Key backup does not exist."))?; + + Ok(get_latest_backup_info::v3::Response { + algorithm, + count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &version)?, + version, + }) +} + +/// # `GET /_matrix/client/r0/room_keys/version` +/// +/// Get information about an existing backup. +pub async fn get_backup_info_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let algorithm = services() + .key_backups + .get_backup(sender_user, &body.version)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Key backup does not exist."))?; + + Ok(get_backup_info::v3::Response { + algorithm, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + version: body.version.clone(), + }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/version/{version}` +/// +/// Delete an existing key backup. +/// +/// - Deletes both information about the backup, as well as all key data related +/// to the backup +pub async fn delete_backup_version_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .key_backups + .delete_backup(sender_user, &body.version)?; + + Ok(delete_backup_version::v3::Response {}) +} + +/// # `PUT /_matrix/client/r0/room_keys/keys` +/// +/// Add the received backup keys to the database. +/// +/// - Only manipulating the most recently created version of the backup is +/// allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag +pub async fn add_backup_keys_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if Some(&body.version) + != services() + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + + for (room_id, room) in &body.rooms { + for (session_id, key_data) in &room.sessions { + services() + .key_backups + .add_key(sender_user, &body.version, room_id, session_id, key_data)?; + } + } + + Ok(add_backup_keys::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Add the received backup keys to the database. +/// +/// - Only manipulating the most recently created version of the backup is +/// allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag +pub async fn add_backup_keys_for_room_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if Some(&body.version) + != services() + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + + for (session_id, key_data) in &body.sessions { + services() + .key_backups + .add_key(sender_user, &body.version, &body.room_id, session_id, key_data)?; + } + + Ok(add_backup_keys_for_room::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Add the received backup key to the database. +/// +/// - Only manipulating the most recently created version of the backup is +/// allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag +pub async fn add_backup_keys_for_session_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if Some(&body.version) + != services() + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + + services() + .key_backups + .add_key(sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data)?; + + Ok(add_backup_keys_for_session::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `GET /_matrix/client/r0/room_keys/keys` +/// +/// Retrieves all keys from the backup. +pub async fn get_backup_keys_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let rooms = services().key_backups.get_all(sender_user, &body.version)?; + + Ok(get_backup_keys::v3::Response { + rooms, + }) +} + +/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Retrieves all keys from the backup for a given room. +pub async fn get_backup_keys_for_room_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let sessions = services() + .key_backups + .get_room(sender_user, &body.version, &body.room_id)?; + + Ok(get_backup_keys_for_room::v3::Response { + sessions, + }) +} + +/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Retrieves a key from the backup. +pub async fn get_backup_keys_for_session_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let key_data = services() + .key_backups + .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Backup key not found for this user's session.", + ))?; + + Ok(get_backup_keys_for_session::v3::Response { + key_data, + }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/keys` +/// +/// Delete the keys from the backup. +pub async fn delete_backup_keys_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .key_backups + .delete_all_keys(sender_user, &body.version)?; + + Ok(delete_backup_keys::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Delete the keys from the backup for a given room. +pub async fn delete_backup_keys_for_room_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .key_backups + .delete_room_keys(sender_user, &body.version, &body.room_id)?; + + Ok(delete_backup_keys_for_room::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Delete a key from the backup. +pub async fn delete_backup_keys_for_session_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .key_backups + .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; + + Ok(delete_backup_keys_for_session::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs new file mode 100644 index 00000000..7aae8465 --- /dev/null +++ b/src/api/client_server/capabilities.rs @@ -0,0 +1,51 @@ +use std::collections::BTreeMap; + +use ruma::api::client::discovery::get_capabilities::{ + self, Capabilities, ChangePasswordCapability, RoomVersionStability, RoomVersionsCapability, SetAvatarUrlCapability, + SetDisplayNameCapability, ThirdPartyIdChangesCapability, +}; + +use crate::{services, Result, Ruma}; + +/// # `GET /_matrix/client/v3/capabilities` +/// +/// Get information on the supported feature set and other relevent capabilities +/// of this server. +pub async fn get_capabilities_route( + _body: Ruma, +) -> Result { + let mut available = BTreeMap::new(); + for room_version in &services().globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Unstable); + } + for room_version in &services().globals.stable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Stable); + } + + let mut capabilities = Capabilities::new(); + capabilities.room_versions = RoomVersionsCapability { + default: services().globals.default_room_version(), + available, + }; + + capabilities.change_password = ChangePasswordCapability { + enabled: true, + }; + + capabilities.set_avatar_url = SetAvatarUrlCapability { + enabled: true, + }; + + capabilities.set_displayname = SetDisplayNameCapability { + enabled: true, + }; + + // conduit does not implement 3PID stuff + capabilities.thirdparty_id_changes = ThirdPartyIdChangesCapability { + enabled: false, + }; + + Ok(get_capabilities::v3::Response { + capabilities, + }) +} diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs new file mode 100644 index 00000000..247b4ef8 --- /dev/null +++ b/src/api/client_server/config.rs @@ -0,0 +1,118 @@ +use ruma::{ + api::client::{ + config::{get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data}, + error::ErrorKind, + }, + events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent}, + serde::Raw, +}; +use serde::Deserialize; +use serde_json::{json, value::RawValue as RawJsonValue}; + +use crate::{services, Error, Result, Ruma}; + +/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` +/// +/// Sets some account data for the sender user. +pub async fn set_global_account_data_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; + + let event_type = body.event_type.to_string(); + + services().account_data.update( + None, + sender_user, + event_type.clone().into(), + &json!({ + "type": event_type, + "content": data, + }), + )?; + + Ok(set_global_account_data::v3::Response {}) +} + +/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` +/// +/// Sets some room account data for the sender user. +pub async fn set_room_account_data_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; + + let event_type = body.event_type.to_string(); + + services().account_data.update( + Some(&body.room_id), + sender_user, + event_type.clone().into(), + &json!({ + "type": event_type, + "content": data, + }), + )?; + + Ok(set_room_account_data::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` +/// +/// Gets some account data for the sender user. +pub async fn get_global_account_data_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event: Box = services() + .account_data + .get(None, sender_user, body.event_type.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + Ok(get_global_account_data::v3::Response { + account_data, + }) +} + +/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` +/// +/// Gets some room account data for the sender user. +pub async fn get_room_account_data_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event: Box = services() + .account_data + .get(Some(&body.room_id), sender_user, body.event_type.clone())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; + + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + Ok(get_room_account_data::v3::Response { + account_data, + }) +} + +#[derive(Deserialize)] +struct ExtractRoomEventContent { + content: Raw, +} + +#[derive(Deserialize)] +struct ExtractGlobalEventContent { + content: Raw, +} diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs new file mode 100644 index 00000000..0b012219 --- /dev/null +++ b/src/api/client_server/context.rs @@ -0,0 +1,201 @@ +use std::collections::HashSet; + +use ruma::{ + api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, + events::StateEventType, +}; +use tracing::error; + +use crate::{services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/context` +/// +/// Allows loading room history around an event. +/// +/// - Only works if the user is joined (TODO: always allow, but only show events +/// if the user was +/// joined, depending on history_visibility) +pub async fn get_context_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members, + } => (true, *include_redundant_members), + LazyLoadOptions::Disabled => (false, false), + }; + + let mut lazy_loaded = HashSet::new(); + + let base_token = services() + .rooms + .timeline + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Base event id not found."))?; + + let base_event = services() + .rooms + .timeline + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Base event not found."))?; + + let room_id = base_event.room_id.clone(); + + if !services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &room_id, &body.event_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this event.", + )); + } + + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &room_id, + &base_event.sender, + )? || lazy_load_send_redundant + { + lazy_loaded.insert(base_event.sender.as_str().to_owned()); + } + + // Use limit with maximum 100 + let limit = u64::from(body.limit).min(100) as usize; + + let base_event = base_event.to_room_event(); + + let events_before: Vec<_> = services() + .rooms + .timeline + .pdus_until(sender_user, &room_id, base_token)? + .take(limit / 2) + .filter_map(Result::ok) // Remove buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .unwrap_or(false) + }) + .collect(); + + for (_, event) in &events_before { + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); + } + } + + let start_token = events_before + .last() + .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); + + let events_before: Vec<_> = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + let events_after: Vec<_> = services() + .rooms + .timeline + .pdus_after(sender_user, &room_id, base_token)? + .take(limit / 2) + .filter_map(Result::ok) // Remove buggy events + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .unwrap_or(false) + }) + .collect(); + + for (_, event) in &events_after { + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); + } + } + + let shortstatehash = services() + .rooms + .state_accessor + .pdu_shortstatehash( + events_after + .last() + .map_or(&*body.event_id, |(_, e)| &*e.event_id), + )? + .map_or( + services() + .rooms + .state + .get_room_shortstatehash(&room_id)? + .expect("All rooms have state"), + |hash| hash, + ); + + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; + + let end_token = events_after + .last() + .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); + + let events_after: Vec<_> = events_after + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + let mut state = Vec::new(); + + for (shortstatekey, id) in state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else { + error!("Pdu in state not found: {}", id); + continue; + }; + + state.push(pdu.to_state_event()); + } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { + let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else { + error!("Pdu in state not found: {}", id); + continue; + }; + + state.push(pdu.to_state_event()); + } + } + + let resp = get_context::v3::Response { + start: Some(start_token), + end: Some(end_token), + events_before, + event: Some(base_event), + events_after, + state, + }; + + Ok(resp) +} diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs new file mode 100644 index 00000000..10a38a73 --- /dev/null +++ b/src/api/client_server/device.rs @@ -0,0 +1,163 @@ +use ruma::api::client::{ + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, + error::ErrorKind, + uiaa::{AuthFlow, AuthType, UiaaInfo}, +}; + +use super::SESSION_ID_LENGTH; +use crate::{services, utils, Error, Result, Ruma}; + +/// # `GET /_matrix/client/r0/devices` +/// +/// Get metadata on all devices of the sender user. +pub async fn get_devices_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let devices: Vec = services() + .users + .all_devices_metadata(sender_user) + .filter_map(Result::ok) // Filter out buggy devices + .collect(); + + Ok(get_devices::v3::Response { + devices, + }) +} + +/// # `GET /_matrix/client/r0/devices/{deviceId}` +/// +/// Get metadata on a single device of the sender user. +pub async fn get_device_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let device = services() + .users + .get_device_metadata(sender_user, &body.body.device_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; + + Ok(get_device::v3::Response { + device, + }) +} + +/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// +/// Updates the metadata on a given device of the sender user. +pub async fn update_device_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let mut device = services() + .users + .get_device_metadata(sender_user, &body.device_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; + + device.display_name.clone_from(&body.display_name); + + services() + .users + .update_device_metadata(sender_user, &body.device_id, &device)?; + + Ok(update_device::v3::Response {}) +} + +/// # `DELETE /_matrix/client/r0/devices/{deviceId}` +/// +/// Deletes the given device. +/// +/// - Requires UIAA to verify user password +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, +/// last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates +pub async fn delete_device_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Password], + }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services() + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + services() + .users + .remove_device(sender_user, &body.device_id)?; + + Ok(delete_device::v3::Response {}) +} + +/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// +/// Deletes the given device. +/// +/// - Requires UIAA to verify user password +/// +/// For each device: +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, +/// last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates +pub async fn delete_devices_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Password], + }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services() + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + for device_id in &body.devices { + services().users.remove_device(sender_user, device_id)?; + } + + Ok(delete_devices::v3::Response {}) +} diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs new file mode 100644 index 00000000..f1d70e9a --- /dev/null +++ b/src/api/client_server/directory.rs @@ -0,0 +1,392 @@ +use ruma::{ + api::{ + client::{ + directory::{get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility}, + error::ErrorKind, + room, + }, + federation, + }, + directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, + events::{ + room::{ + avatar::RoomAvatarEventContent, + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + topic::RoomTopicEventContent, + }, + StateEventType, + }, + ServerName, UInt, +}; +use tracing::{error, info, warn}; + +use crate::{services, Error, Result, Ruma}; + +/// # `POST /_matrix/client/v3/publicRooms` +/// +/// Lists the public rooms on this server. +/// +/// - Rooms are ordered by the number of joined members +pub async fn get_public_rooms_filtered_route( + body: Ruma, +) -> Result { + if let Some(server) = &body.server { + if services() + .globals + .forbidden_remote_room_directory_server_names() + .contains(server) + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + } + + let response = get_public_rooms_filtered_helper( + body.server.as_deref(), + body.limit, + body.since.as_deref(), + &body.filter, + &body.room_network, + ) + .await + .map_err(|e| { + warn!("Failed to return our /publicRooms: {e}"); + Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") + })?; + + Ok(response) +} + +/// # `GET /_matrix/client/v3/publicRooms` +/// +/// Lists the public rooms on this server. +/// +/// - Rooms are ordered by the number of joined members +pub async fn get_public_rooms_route( + body: Ruma, +) -> Result { + if let Some(server) = &body.server { + if services() + .globals + .forbidden_remote_room_directory_server_names() + .contains(server) + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + } + + let response = get_public_rooms_filtered_helper( + body.server.as_deref(), + body.limit, + body.since.as_deref(), + &Filter::default(), + &RoomNetwork::Matrix, + ) + .await + .map_err(|e| { + warn!("Failed to return our /publicRooms: {e}"); + Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") + })?; + + Ok(get_public_rooms::v3::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` +/// +/// Sets the visibility of a given room in the room directory. +/// +/// - TODO: Access control checks +pub async fn set_room_visibility_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services().rooms.metadata.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + } + + match &body.visibility { + room::Visibility::Public => { + if services().globals.config.lockdown_public_room_directory && !services().users.is_admin(sender_user)? { + info!( + "Non-admin user {sender_user} tried to publish {0} to the room directory while \ + \"lockdown_public_room_directory\" is enabled", + body.room_id + ); + + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Publishing rooms to the room directory is not allowed", + )); + } + + services().rooms.directory.set_public(&body.room_id)?; + info!("{sender_user} made {0} public", body.room_id); + }, + room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?, + _ => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room visibility type is not supported.", + )); + }, + } + + Ok(set_room_visibility::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/directory/list/room/{roomId}` +/// +/// Gets the visibility of a given room in the room directory. +pub async fn get_room_visibility_route( + body: Ruma, +) -> Result { + if !services().rooms.metadata.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + } + + Ok(get_room_visibility::v3::Response { + visibility: if services().rooms.directory.is_public_room(&body.room_id)? { + room::Visibility::Public + } else { + room::Visibility::Private + }, + }) +} + +pub(crate) async fn get_public_rooms_filtered_helper( + server: Option<&ServerName>, limit: Option, since: Option<&str>, filter: &Filter, _network: &RoomNetwork, +) -> Result { + if let Some(other_server) = server.filter(|server| *server != services().globals.server_name().as_str()) { + let response = services() + .sending + .send_federation_request( + other_server, + federation::directory::get_public_rooms_filtered::v1::Request { + limit, + since: since.map(ToOwned::to_owned), + filter: Filter { + generic_search_term: filter.generic_search_term.clone(), + room_types: filter.room_types.clone(), + }, + room_network: RoomNetwork::Matrix, + }, + ) + .await?; + + return Ok(get_public_rooms_filtered::v3::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }); + } + + let limit = limit.map_or(10, u64::from); + let mut num_since = 0_u64; + + if let Some(s) = &since { + let mut characters = s.chars(); + let backwards = match characters.next() { + Some('n') => false, + Some('p') => true, + _ => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")), + }; + + num_since = characters + .collect::() + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; + + if backwards { + num_since = num_since.saturating_sub(limit); + } + } + + let mut all_rooms: Vec<_> = services() + .rooms + .directory + .public_rooms() + .map(|room_id| { + let room_id = room_id?; + + let chunk = PublicRoomsChunk { + canonical_alias: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomCanonicalAliasEventContent| c.alias) + .map_err(|_| Error::bad_database("Invalid canonical alias event in database.")) + })?, + name: services().rooms.state_accessor.get_name(&room_id)?, + num_joined_members: services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or_else(|| { + warn!("Room {} has no member count", room_id); + 0 + }) + .try_into() + .expect("user count should not be that big"), + topic: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomTopic, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomTopicEventContent| Some(c.topic)) + .map_err(|e| { + error!("Invalid room topic event in database for room {room_id}: {e}"); + Error::bad_database("Invalid room topic event in database.") + }) + }) + .unwrap_or(None), + world_readable: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) + .map_err(|e| { + error!( + "Invalid room history visibility event in database for room {room_id}, assuming is \"shared\": {e}", + ); + Error::bad_database("Invalid room history visibility event in database.") + })}).unwrap_or(false), + guest_can_join: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin) + .map_err(|_| Error::bad_database("Invalid room guest access event in database.")) + })?, + avatar_url: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomAvatarEventContent| c.url) + .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) + }) + .transpose()? + // url is now an Option so we must flatten + .flatten(), + join_rule: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| match c.join_rule { + JoinRule::Public => Some(PublicRoomJoinRule::Public), + JoinRule::Knock => Some(PublicRoomJoinRule::Knock), + _ => None, + }) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .flatten() + .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, + room_type: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .map(|s| { + serde_json::from_str::(s.content.get()).map_err(|e| { + error!("Invalid room create event in database: {}", e); + Error::BadDatabase("Invalid room create event in database.") + }) + }) + .transpose()? + .and_then(|e| e.room_type), + room_id, + }; + Ok(chunk) + }) + .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms + .filter(|chunk| { + if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { + if let Some(name) = &chunk.name { + if name.as_str().to_lowercase().contains(&query) { + return true; + } + } + + if let Some(topic) = &chunk.topic { + if topic.to_lowercase().contains(&query) { + return true; + } + } + + if let Some(canonical_alias) = &chunk.canonical_alias { + if canonical_alias.as_str().to_lowercase().contains(&query) { + return true; + } + } + + false + } else { + // No search term + true + } + }) + // We need to collect all, so we can sort by member count + .collect(); + + all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); + + let total_room_count_estimate = (all_rooms.len() as u32).into(); + + let chunk: Vec<_> = all_rooms + .into_iter() + .skip(num_since as usize) + .take(limit as usize) + .collect(); + + let prev_batch = if num_since == 0 { + None + } else { + Some(format!("p{num_since}")) + }; + + let next_batch = if chunk.len() < limit as usize { + None + } else { + Some(format!("n{}", num_since + limit)) + }; + + Ok(get_public_rooms_filtered::v3::Response { + chunk, + prev_batch, + next_batch, + total_room_count_estimate: Some(total_room_count_estimate), + }) +} diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs new file mode 100644 index 00000000..f0e0f38c --- /dev/null +++ b/src/api/client_server/filter.rs @@ -0,0 +1,30 @@ +use ruma::api::client::{ + error::ErrorKind, + filter::{create_filter, get_filter}, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` +/// +/// Loads a filter that was previously created. +/// +/// - A user can only access their own filters +pub async fn get_filter_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let Some(filter) = services().users.get_filter(sender_user, &body.filter_id)? else { + return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")); + }; + + Ok(get_filter::v3::Response::new(filter)) +} + +/// # `PUT /_matrix/client/r0/user/{userId}/filter` +/// +/// Creates a new filter to be used by other endpoints. +pub async fn create_filter_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + Ok(create_filter::v3::Response::new( + services().users.create_filter(sender_user, &body.filter)?, + )) +} diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs new file mode 100644 index 00000000..0e717406 --- /dev/null +++ b/src/api/client_server/keys.rs @@ -0,0 +1,522 @@ +use std::{ + collections::{hash_map, BTreeMap, HashMap, HashSet}, + time::{Duration, Instant}, +}; + +use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::{ + api::{ + client::{ + error::ErrorKind, + keys::{claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, upload_signing_keys}, + uiaa::{AuthFlow, AuthType, UiaaInfo}, + }, + federation, + }, + serde::Raw, + DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, +}; +use serde_json::json; +use tracing::{debug, error}; + +use super::SESSION_ID_LENGTH; +use crate::{services, utils, Error, Result, Ruma}; + +/// # `POST /_matrix/client/r0/keys/upload` +/// +/// Publish end-to-end encryption keys for the sender device. +/// +/// - Adds one time keys +/// - If there are no device keys yet: Adds device keys (TODO: merge with +/// existing keys?) +pub async fn upload_keys_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + for (key_key, key_value) in &body.one_time_keys { + services() + .users + .add_one_time_key(sender_user, sender_device, key_key, key_value)?; + } + + if let Some(device_keys) = &body.device_keys { + // TODO: merge this and the existing event? + // This check is needed to assure that signatures are kept + if services() + .users + .get_device_keys(sender_user, sender_device)? + .is_none() + { + services() + .users + .add_device_keys(sender_user, sender_device, device_keys)?; + } + } + + Ok(upload_keys::v3::Response { + one_time_key_counts: services() + .users + .count_one_time_keys(sender_user, sender_device)?, + }) +} + +/// # `POST /_matrix/client/r0/keys/query` +/// +/// Get end-to-end encryption keys for the given users. +/// +/// - Always fetches users from other servers over federation +/// - Gets master keys, self-signing keys, user signing keys and device keys. +/// - The master and self-signing keys contain signatures that the user is +/// allowed to see +pub async fn get_keys_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let response = get_keys_helper( + Some(sender_user), + &body.device_keys, + |u| u == sender_user, + true, // Always allow local users to see device names of other local users + ) + .await?; + + Ok(response) +} + +/// # `POST /_matrix/client/r0/keys/claim` +/// +/// Claims one-time keys +pub async fn claim_keys_route(body: Ruma) -> Result { + let response = claim_keys_helper(&body.one_time_keys).await?; + + Ok(response) +} + +/// # `POST /_matrix/client/r0/keys/device_signing/upload` +/// +/// Uploads end-to-end key information for the sender user. +/// +/// - Requires UIAA to verify password +pub async fn upload_signing_keys_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Password], + }], + completed: Vec::new(), + params: Box::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services() + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + if let Some(master_key) = &body.master_key { + services().users.add_cross_signing_keys( + sender_user, + master_key, + &body.self_signing_key, + &body.user_signing_key, + true, // notify so that other users see the new keys + )?; + } + + Ok(upload_signing_keys::v3::Response {}) +} + +/// # `POST /_matrix/client/r0/keys/signatures/upload` +/// +/// Uploads end-to-end key signatures from the sender user. +pub async fn upload_signatures_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + for (user_id, keys) in &body.signed_keys { + for (key_id, key) in keys { + let key = serde_json::to_value(key) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + + for signature in key + .get("signatures") + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))? + .get(sender_user.to_string()) + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid user in signatures field."))? + .as_object() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))? + .clone() + { + // Signature validation? + let signature = ( + signature.0, + signature + .1 + .as_str() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature value."))? + .to_owned(), + ); + services() + .users + .sign_key(user_id, key_id, signature, sender_user)?; + } + } + } + + Ok(upload_signatures::v3::Response { + failures: BTreeMap::new(), // TODO: integrate + }) +} + +/// # `POST /_matrix/client/r0/keys/changes` +/// +/// Gets a list of users who have updated their device identity keys since the +/// previous sync token. +/// +/// - TODO: left users +pub async fn get_key_changes_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let mut device_list_updates = HashSet::new(); + + device_list_updates.extend( + services() + .users + .keys_changed( + sender_user.as_str(), + body.from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, + Some( + body.to + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, + ), + ) + .filter_map(Result::ok), + ); + + for room_id in services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(Result::ok) + { + device_list_updates.extend( + services() + .users + .keys_changed( + room_id.as_ref(), + body.from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, + Some( + body.to + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, + ), + ) + .filter_map(Result::ok), + ); + } + Ok(get_key_changes::v3::Response { + changed: device_list_updates.into_iter().collect(), + left: Vec::new(), // TODO + }) +} + +pub(crate) async fn get_keys_helper bool>( + sender_user: Option<&UserId>, device_keys_input: &BTreeMap>, allowed_signatures: F, + include_display_names: bool, +) -> Result { + let mut master_keys = BTreeMap::new(); + let mut self_signing_keys = BTreeMap::new(); + let mut user_signing_keys = BTreeMap::new(); + let mut device_keys = BTreeMap::new(); + + let mut get_over_federation = HashMap::new(); + + for (user_id, device_ids) in device_keys_input { + let user_id: &UserId = user_id; + + if user_id.server_name() != services().globals.server_name() { + get_over_federation + .entry(user_id.server_name()) + .or_insert_with(Vec::new) + .push((user_id, device_ids)); + continue; + } + + if device_ids.is_empty() { + let mut container = BTreeMap::new(); + for device_id in services().users.all_device_ids(user_id) { + let device_id = device_id?; + if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? { + let metadata = services() + .users + .get_device_metadata(user_id, &device_id)? + .ok_or_else(|| Error::bad_database("all_device_keys contained nonexistent device."))?; + + add_unsigned_device_display_name(&mut keys, metadata, include_display_names) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; + + container.insert(device_id, keys); + } + } + device_keys.insert(user_id.to_owned(), container); + } else { + for device_id in device_ids { + let mut container = BTreeMap::new(); + if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? { + let metadata = services() + .users + .get_device_metadata(user_id, device_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to get keys for nonexistent device.", + ))?; + + add_unsigned_device_display_name(&mut keys, metadata, include_display_names) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; + container.insert(device_id.to_owned(), keys); + } + device_keys.insert(user_id.to_owned(), container); + } + } + + if let Some(master_key) = services() + .users + .get_master_key(sender_user, user_id, &allowed_signatures)? + { + master_keys.insert(user_id.to_owned(), master_key); + } + if let Some(self_signing_key) = + services() + .users + .get_self_signing_key(sender_user, user_id, &allowed_signatures)? + { + self_signing_keys.insert(user_id.to_owned(), self_signing_key); + } + if Some(user_id) == sender_user { + if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? { + user_signing_keys.insert(user_id.to_owned(), user_signing_key); + } + } + } + + let mut failures = BTreeMap::new(); + + let back_off = |id| async { + match services() + .globals + .bad_query_ratelimiter + .write() + .await + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + } + }; + + let mut futures: FuturesUnordered<_> = get_over_federation + .into_iter() + .map(|(server, vec)| async move { + if let Some((time, tries)) = services() + .globals + .bad_query_ratelimiter + .read() + .await + .get(server) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off query from {:?}", server); + return (server, Err(Error::BadServerResponse("bad query, still backing off"))); + } + } + + let mut device_keys_input_fed = BTreeMap::new(); + for (user_id, keys) in vec { + device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); + } + ( + server, + tokio::time::timeout( + Duration::from_secs(90), + services().sending.send_federation_request( + server, + federation::keys::get_keys::v1::Request { + device_keys: device_keys_input_fed, + }, + ), + ) + .await + .map_err(|e| { + error!("get_keys_helper query took too long: {e}"); + Error::BadServerResponse("get_keys_helper query took too long") + }), + ) + }) + .collect(); + + while let Some((server, response)) = futures.next().await { + if let Ok(Ok(response)) = response { + for (user, masterkey) in response.master_keys { + let (master_key_id, mut master_key) = services().users.parse_master_key(&user, &masterkey)?; + + if let Some(our_master_key) = + services() + .users + .get_key(&master_key_id, sender_user, &user, &allowed_signatures)? + { + let (_, our_master_key) = services().users.parse_master_key(&user, &our_master_key)?; + master_key.signatures.extend(our_master_key.signatures); + } + let json = serde_json::to_value(master_key).expect("to_value always works"); + let raw = serde_json::from_value(json).expect("Raw::from_value always works"); + services().users.add_cross_signing_keys( + &user, &raw, &None, &None, + false, /* Dont notify. A notification would trigger another key request resulting in an + * endless loop */ + )?; + master_keys.insert(user, raw); + } + + self_signing_keys.extend(response.self_signing_keys); + device_keys.extend(response.device_keys); + } else { + back_off(server.to_owned()).await; + failures.insert(server.to_string(), json!({})); + } + } + + Ok(get_keys::v3::Response { + failures, + device_keys, + master_keys, + self_signing_keys, + user_signing_keys, + }) +} + +fn add_unsigned_device_display_name( + keys: &mut Raw, metadata: ruma::api::client::device::Device, + include_display_names: bool, +) -> serde_json::Result<()> { + if let Some(display_name) = metadata.display_name { + let mut object = keys.deserialize_as::>()?; + + let unsigned = object.entry("unsigned").or_insert_with(|| json!({})); + if let serde_json::Value::Object(unsigned_object) = unsigned { + if include_display_names { + unsigned_object.insert("device_display_name".to_owned(), display_name.into()); + } else { + unsigned_object.insert( + "device_display_name".to_owned(), + Some(metadata.device_id.as_str().to_owned()).into(), + ); + } + } + + *keys = Raw::from_json(serde_json::value::to_raw_value(&object)?); + } + + Ok(()) +} + +pub(crate) async fn claim_keys_helper( + one_time_keys_input: &BTreeMap>, +) -> Result { + let mut one_time_keys = BTreeMap::new(); + + let mut get_over_federation = BTreeMap::new(); + + for (user_id, map) in one_time_keys_input { + if user_id.server_name() != services().globals.server_name() { + get_over_federation + .entry(user_id.server_name()) + .or_insert_with(Vec::new) + .push((user_id, map)); + } + + let mut container = BTreeMap::new(); + for (device_id, key_algorithm) in map { + if let Some(one_time_keys) = services() + .users + .take_one_time_key(user_id, device_id, key_algorithm)? + { + let mut c = BTreeMap::new(); + c.insert(one_time_keys.0, one_time_keys.1); + container.insert(device_id.clone(), c); + } + } + one_time_keys.insert(user_id.clone(), container); + } + + let mut failures = BTreeMap::new(); + + let mut futures: FuturesUnordered<_> = get_over_federation + .into_iter() + .map(|(server, vec)| async move { + let mut one_time_keys_input_fed = BTreeMap::new(); + for (user_id, keys) in vec { + one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); + } + ( + server, + services() + .sending + .send_federation_request( + server, + federation::keys::claim_keys::v1::Request { + one_time_keys: one_time_keys_input_fed, + }, + ) + .await, + ) + }) + .collect(); + + while let Some((server, response)) = futures.next().await { + match response { + Ok(keys) => { + one_time_keys.extend(keys.one_time_keys); + }, + Err(_e) => { + failures.insert(server.to_string(), json!({})); + }, + } + } + + Ok(claim_keys::v3::Response { + failures, + one_time_keys, + }) +} diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs new file mode 100644 index 00000000..40e2b093 --- /dev/null +++ b/src/api/client_server/media.rs @@ -0,0 +1,882 @@ +use std::{io::Cursor, sync::Arc, time::Duration}; + +use image::io::Reader as ImgReader; +use ipaddress::IPAddress; +use reqwest::Url; +use ruma::api::client::{ + error::{ErrorKind, RetryAfter}, + media::{ + create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, + get_media_preview, + }, +}; +use tracing::{debug, error, info, warn}; +use webpage::HTML; + +use crate::{ + service::media::{FileMeta, UrlPreviewData}, + services, utils, Error, Result, Ruma, RumaResponse, +}; + +/// generated MXC ID (`media-id`) length +const MXC_LENGTH: usize = 32; + +/// # `GET /_matrix/media/v3/config` +/// +/// Returns max upload size. +pub async fn get_media_config_route( + _body: Ruma, +) -> Result { + Ok(get_media_config::v3::Response { + upload_size: services().globals.max_request_size().into(), + }) +} + +/// # `GET /_matrix/media/v1/config` +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See +/// +/// Returns max upload size. +pub async fn get_media_config_v1_route( + _body: Ruma, +) -> Result> { + Ok(get_media_config::v3::Response { + upload_size: services().globals.max_request_size().into(), + } + .into()) +} + +/// # `GET /_matrix/media/v3/preview_url` +/// +/// Returns URL preview. +pub async fn get_media_preview_route( + body: Ruma, +) -> Result { + let url = &body.url; + if !url_preview_allowed(url) { + return Err(Error::BadRequest(ErrorKind::forbidden(), "URL is not allowed to be previewed")); + } + + match get_url_preview(url).await { + Ok(preview) => { + let res = serde_json::value::to_raw_value(&preview).map_err(|e| { + error!("Failed to convert UrlPreviewData into a serde json value: {}", e); + Error::BadRequest( + ErrorKind::LimitExceeded { + retry_after: Some(RetryAfter::Delay(Duration::from_secs(5))), + }, + "Failed to generate a URL preview, try again later.", + ) + })?; + + Ok(get_media_preview::v3::Response::from_raw_value(res)) + }, + Err(e) => { + warn!("Failed to generate a URL preview: {e}"); + + // there doesn't seem to be an agreed-upon error code in the spec. + // the only response codes in the preview_url spec page are 200 and 429. + Err(Error::BadRequest( + ErrorKind::LimitExceeded { + retry_after: Some(RetryAfter::Delay(Duration::from_secs(5))), + }, + "Failed to generate a URL preview, try again later.", + )) + }, + } +} + +/// # `GET /_matrix/media/v1/preview_url` +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See +/// +/// Returns URL preview. +pub async fn get_media_preview_v1_route( + body: Ruma, +) -> Result> { + let url = &body.url; + if !url_preview_allowed(url) { + return Err(Error::BadRequest(ErrorKind::forbidden(), "URL is not allowed to be previewed")); + } + + match get_url_preview(url).await { + Ok(preview) => { + let res = serde_json::value::to_raw_value(&preview).map_err(|e| { + error!("Failed to convert UrlPreviewData into a serde json value: {}", e); + Error::BadRequest( + ErrorKind::LimitExceeded { + retry_after: Some(RetryAfter::Delay(Duration::from_secs(5))), + }, + "Failed to generate a URL preview, try again later.", + ) + })?; + + Ok(get_media_preview::v3::Response::from_raw_value(res).into()) + }, + Err(e) => { + warn!("Failed to generate a URL preview: {e}"); + + // there doesn't seem to be an agreed-upon error code in the spec. + // the only response codes in the preview_url spec page are 200 and 429. + Err(Error::BadRequest( + ErrorKind::LimitExceeded { + retry_after: Some(RetryAfter::Delay(Duration::from_secs(5))), + }, + "Failed to generate a URL preview, try again later.", + )) + }, + } +} + +/// # `POST /_matrix/media/v3/upload` +/// +/// Permanently save media in the server. +/// +/// - Some metadata will be saved in the database +/// - Media will be saved in the media/ directory +pub async fn create_content_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let mxc = format!( + "mxc://{}/{}", + services().globals.server_name(), + utils::random_string(MXC_LENGTH) + ); + + services() + .media + .create( + Some(sender_user.clone()), + mxc.clone(), + body.filename + .as_ref() + .map(|filename| "inline; filename=".to_owned() + filename) + .as_deref(), + body.content_type.as_deref(), + &body.file, + ) + .await?; + + let content_uri = mxc.into(); + + Ok(create_content::v3::Response { + content_uri, + blurhash: None, + }) +} + +/// # `POST /_matrix/media/v1/upload` +/// +/// Permanently save media in the server. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See +/// +/// - Some metadata will be saved in the database +/// - Media will be saved in the media/ directory +pub async fn create_content_v1_route( + body: Ruma, +) -> Result> { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let mxc = format!( + "mxc://{}/{}", + services().globals.server_name(), + utils::random_string(MXC_LENGTH) + ); + + services() + .media + .create( + Some(sender_user.clone()), + mxc.clone(), + body.filename + .as_ref() + .map(|filename| "inline; filename=".to_owned() + filename) + .as_deref(), + body.content_type.as_deref(), + &body.file, + ) + .await?; + + let content_uri = mxc.into(); + + Ok(create_content::v3::Response { + content_uri, + blurhash: None, + } + .into()) +} + +/// helper method to fetch remote media from other servers over federation +pub async fn get_remote_content( + mxc: &str, server_name: &ruma::ServerName, media_id: String, allow_redirect: bool, timeout_ms: Duration, +) -> Result { + // we'll lie to the client and say the blocked server's media was not found and + // log. the client has no way of telling anyways so this is a security bonus. + if services() + .globals + .prevent_media_downloads_from() + .contains(&server_name.to_owned()) + { + info!( + "Received request for remote media `{}` but server is in our media server blocklist. Returning 404.", + mxc + ); + return Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")); + } + + let content_response = services() + .sending + .send_federation_request( + server_name, + get_content::v3::Request { + allow_remote: true, + server_name: server_name.to_owned(), + media_id, + timeout_ms, + allow_redirect, + }, + ) + .await?; + + services() + .media + .create( + None, + mxc.to_owned(), + content_response.content_disposition.as_deref(), + content_response.content_type.as_deref(), + &content_response.file, + ) + .await?; + + Ok(content_response) +} + +/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub async fn get_content_route(body: Ruma) -> Result { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_disposition, + content_type, + file, + }) = services().media.get(mxc.clone()).await? + { + Ok(get_content::v3::Response { + file, + content_type, + content_disposition, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + }) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let remote_content_response = get_remote_content( + &mxc, + &body.server_name, + body.media_id.clone(), + body.allow_redirect, + body.timeout_ms, + ) + .await?; + Ok(remote_content_response) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub async fn get_content_v1_route( + body: Ruma, +) -> Result> { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_disposition, + content_type, + file, + }) = services().media.get(mxc.clone()).await? + { + Ok(get_content::v3::Response { + file, + content_type, + content_disposition, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + } + .into()) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let remote_content_response = get_remote_content( + &mxc, + &body.server_name, + body.media_id.clone(), + body.allow_redirect, + body.timeout_ms, + ) + .await?; + Ok(remote_content_response.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/media/v3/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub async fn get_content_as_filename_route( + body: Ruma, +) -> Result { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_type, + file, + .. + }) = services().media.get(mxc.clone()).await? + { + Ok(get_content_as_filename::v3::Response { + file, + content_type, + content_disposition: Some(format!("inline; filename={}", body.filename)), + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + }) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let remote_content_response = get_remote_content( + &mxc, + &body.server_name, + body.media_id.clone(), + body.allow_redirect, + body.timeout_ms, + ) + .await?; + + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(format!("inline: filename={}", body.filename)), + content_type: remote_content_response.content_type, + file: remote_content_response.file, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + }) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/media/v1/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub async fn get_content_as_filename_v1_route( + body: Ruma, +) -> Result> { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_type, + file, + .. + }) = services().media.get(mxc.clone()).await? + { + Ok(get_content_as_filename::v3::Response { + file, + content_type, + content_disposition: Some(format!("inline; filename={}", body.filename)), + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + } + .into()) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let remote_content_response = get_remote_content( + &mxc, + &body.server_name, + body.media_id.clone(), + body.allow_redirect, + body.timeout_ms, + ) + .await?; + + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(format!("inline: filename={}", body.filename)), + content_type: remote_content_response.content_type, + file: remote_content_response.file, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + } + .into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/media/v3/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub async fn get_content_thumbnail_route( + body: Ruma, +) -> Result { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_type, + file, + .. + }) = services() + .media + .get_thumbnail( + mxc.clone(), + body.width + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + body.height + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?, + ) + .await? + { + Ok(get_content_thumbnail::v3::Response { + file, + content_type, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + }) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + // we'll lie to the client and say the blocked server's media was not found and + // log. the client has no way of telling anyways so this is a security bonus. + if services() + .globals + .prevent_media_downloads_from() + .contains(&body.server_name.clone()) + { + info!( + "Received request for remote media `{}` but server is in our media server blocklist. Returning 404.", + mxc + ); + return Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")); + } + + let get_thumbnail_response = services() + .sending + .send_federation_request( + &body.server_name, + get_content_thumbnail::v3::Request { + allow_remote: body.allow_remote, + height: body.height, + width: body.width, + method: body.method.clone(), + server_name: body.server_name.clone(), + media_id: body.media_id.clone(), + timeout_ms: body.timeout_ms, + allow_redirect: body.allow_redirect, + }, + ) + .await?; + + services() + .media + .upload_thumbnail( + None, + mxc, + None, + get_thumbnail_response.content_type.as_deref(), + body.width.try_into().expect("all UInts are valid u32s"), + body.height.try_into().expect("all UInts are valid u32s"), + &get_thumbnail_response.file, + ) + .await?; + + Ok(get_thumbnail_response) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/media/v1/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +/// +/// This is a legacy endpoint ("/v1/") that some very old homeservers and/or +/// clients may call. conduwuit adds these for compatibility purposes. +/// See +/// +/// - Only allows federation if `allow_remote` is true +/// - Only redirects if `allow_redirect` is true +/// - Uses client-provided `timeout_ms` if available, else defaults to 20 +/// seconds +pub async fn get_content_thumbnail_v1_route( + body: Ruma, +) -> Result> { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_type, + file, + .. + }) = services() + .media + .get_thumbnail( + mxc.clone(), + body.width + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + body.height + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid."))?, + ) + .await? + { + Ok(get_content_thumbnail::v3::Response { + file, + content_type, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + cache_control: Some("public, max-age=31536000, immutable".to_owned()), + } + .into()) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + // we'll lie to the client and say the blocked server's media was not found and + // log. the client has no way of telling anyways so this is a security bonus. + if services() + .globals + .prevent_media_downloads_from() + .contains(&body.server_name.clone()) + { + info!( + "Received request for remote media `{}` but server is in our media server blocklist. Returning 404.", + mxc + ); + return Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")); + } + + let get_thumbnail_response = services() + .sending + .send_federation_request( + &body.server_name, + get_content_thumbnail::v3::Request { + allow_remote: body.allow_remote, + height: body.height, + width: body.width, + method: body.method.clone(), + server_name: body.server_name.clone(), + media_id: body.media_id.clone(), + timeout_ms: body.timeout_ms, + allow_redirect: body.allow_redirect, + }, + ) + .await?; + + services() + .media + .upload_thumbnail( + None, + mxc, + None, + get_thumbnail_response.content_type.as_deref(), + body.width.try_into().expect("all UInts are valid u32s"), + body.height.try_into().expect("all UInts are valid u32s"), + &get_thumbnail_response.file, + ) + .await?; + + Ok(get_thumbnail_response.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +async fn download_image(client: &reqwest::Client, url: &str) -> Result { + let image = client.get(url).send().await?.bytes().await?; + let mxc = format!( + "mxc://{}/{}", + services().globals.server_name(), + utils::random_string(MXC_LENGTH) + ); + + services() + .media + .create(None, mxc.clone(), None, None, &image) + .await?; + + let (width, height) = match ImgReader::new(Cursor::new(&image)).with_guessed_format() { + Err(_) => (None, None), + Ok(reader) => match reader.into_dimensions() { + Err(_) => (None, None), + Ok((width, height)) => (Some(width), Some(height)), + }, + }; + + Ok(UrlPreviewData { + image: Some(mxc), + image_size: Some(image.len()), + image_width: width, + image_height: height, + ..Default::default() + }) +} + +async fn download_html(client: &reqwest::Client, url: &str) -> Result { + let mut response = client.get(url).send().await?; + + let mut bytes: Vec = Vec::new(); + while let Some(chunk) = response.chunk().await? { + bytes.extend_from_slice(&chunk); + if bytes.len() > services().globals.url_preview_max_spider_size() { + debug!( + "Response body from URL {} exceeds url_preview_max_spider_size ({}), not processing the rest of the \ + response body and assuming our necessary data is in this range.", + url, + services().globals.url_preview_max_spider_size() + ); + break; + } + } + let body = String::from_utf8_lossy(&bytes); + let Ok(html) = HTML::from_string(body.to_string(), Some(url.to_owned())) else { + return Err(Error::BadRequest(ErrorKind::Unknown, "Failed to parse HTML")); + }; + + let mut data = match html.opengraph.images.first() { + None => UrlPreviewData::default(), + Some(obj) => download_image(client, &obj.url).await?, + }; + + let props = html.opengraph.properties; + + /* use OpenGraph title/description, but fall back to HTML if not available */ + data.title = props.get("title").cloned().or(html.title); + data.description = props.get("description").cloned().or(html.description); + + Ok(data) +} + +async fn request_url_preview(url: &str) -> Result { + if let Ok(ip) = IPAddress::parse(url) { + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + for cidr in cidr_ranges { + if cidr.includes(&ip) { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Requesting from this address is forbidden", + )); + } + } + } + + let client = &services().globals.client.url_preview; + let response = client.head(url).send().await?; + + if let Some(remote_addr) = response.remote_addr() { + if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + for cidr in cidr_ranges { + if cidr.includes(&ip) { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Requesting from this address is forbidden", + )); + } + } + } + } + + let Some(content_type) = response + .headers() + .get(reqwest::header::CONTENT_TYPE) + .and_then(|x| x.to_str().ok()) + else { + return Err(Error::BadRequest(ErrorKind::Unknown, "Unknown Content-Type")); + }; + let data = match content_type { + html if html.starts_with("text/html") => download_html(client, url).await?, + img if img.starts_with("image/") => download_image(client, url).await?, + _ => return Err(Error::BadRequest(ErrorKind::Unknown, "Unsupported Content-Type")), + }; + + services().media.set_url_preview(url, &data).await?; + + Ok(data) +} + +async fn get_url_preview(url: &str) -> Result { + if let Some(preview) = services().media.get_url_preview(url).await { + return Ok(preview); + } + + // ensure that only one request is made per URL + let mutex_request = Arc::clone( + services() + .media + .url_preview_mutex + .write() + .await + .entry(url.to_owned()) + .or_default(), + ); + let _request_lock = mutex_request.lock().await; + + match services().media.get_url_preview(url).await { + Some(preview) => Ok(preview), + None => request_url_preview(url).await, + } +} + +fn url_preview_allowed(url_str: &str) -> bool { + let url: Url = match Url::parse(url_str) { + Ok(u) => u, + Err(e) => { + warn!("Failed to parse URL from a str: {}", e); + return false; + }, + }; + + if ["http", "https"] + .iter() + .all(|&scheme| scheme != url.scheme().to_lowercase()) + { + debug!("Ignoring non-HTTP/HTTPS URL to preview: {}", url); + return false; + } + + let host = match url.host_str() { + None => { + debug!("Ignoring URL preview for a URL that does not have a host (?): {}", url); + return false; + }, + Some(h) => h.to_owned(), + }; + + let allowlist_domain_contains = services().globals.url_preview_domain_contains_allowlist(); + let allowlist_domain_explicit = services().globals.url_preview_domain_explicit_allowlist(); + let denylist_domain_explicit = services().globals.url_preview_domain_explicit_denylist(); + let allowlist_url_contains = services().globals.url_preview_url_contains_allowlist(); + + if allowlist_domain_contains.contains(&"*".to_owned()) + || allowlist_domain_explicit.contains(&"*".to_owned()) + || allowlist_url_contains.contains(&"*".to_owned()) + { + debug!("Config key contains * which is allowing all URL previews. Allowing URL {}", url); + return true; + } + + if !host.is_empty() { + if denylist_domain_explicit.contains(&host) { + debug!( + "Host {} is not allowed by url_preview_domain_explicit_denylist (check 1/4)", + &host + ); + return false; + } + + if allowlist_domain_explicit.contains(&host) { + debug!("Host {} is allowed by url_preview_domain_explicit_allowlist (check 2/4)", &host); + return true; + } + + if allowlist_domain_contains + .iter() + .any(|domain_s| domain_s.contains(&host.clone())) + { + debug!("Host {} is allowed by url_preview_domain_contains_allowlist (check 3/4)", &host); + return true; + } + + if allowlist_url_contains + .iter() + .any(|url_s| url.to_string().contains(&url_s.to_string())) + { + debug!("URL {} is allowed by url_preview_url_contains_allowlist (check 4/4)", &host); + return true; + } + + // check root domain if available and if user has root domain checks + if services().globals.url_preview_check_root_domain() { + debug!("Checking root domain"); + match host.split_once('.') { + None => return false, + Some((_, root_domain)) => { + if denylist_domain_explicit.contains(&root_domain.to_owned()) { + debug!( + "Root domain {} is not allowed by url_preview_domain_explicit_denylist (check 1/3)", + &root_domain + ); + return true; + } + + if allowlist_domain_explicit.contains(&root_domain.to_owned()) { + debug!( + "Root domain {} is allowed by url_preview_domain_explicit_allowlist (check 2/3)", + &root_domain + ); + return true; + } + + if allowlist_domain_contains + .iter() + .any(|domain_s| domain_s.contains(&root_domain.to_owned())) + { + debug!( + "Root domain {} is allowed by url_preview_domain_contains_allowlist (check 3/3)", + &root_domain + ); + return true; + } + }, + } + } + } + + false +} diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs new file mode 100644 index 00000000..b609fa34 --- /dev/null +++ b/src/api/client_server/membership.rs @@ -0,0 +1,1805 @@ +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; + +use ruma::{ + api::{ + client::{ + error::ErrorKind, + membership::{ + ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, + joined_members, joined_rooms, kick_user, leave_room, unban_user, ThirdPartySigned, + }, + }, + federation::{self, membership::create_invite}, + }, + canonical_json::to_canonical_value, + events::{ + room::{ + join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, + }, + serde::Base64, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, UserId, +}; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use tokio::sync::RwLock; +use tracing::{debug, error, info, trace, warn}; + +use super::get_alias_helper; +use crate::{ + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, +}; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/join` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: asks other servers over +/// federation +pub async fn join_room_by_id_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if services().rooms.metadata.is_banned(&body.room_id)? && !services().users.is_admin(sender_user)? { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This room is banned on this homeserver.", + )); + } + + if let Some(server) = body.room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + && !services().users.is_admin(sender_user)? + { + warn!( + "User {sender_user} tried joining room ID {} which has a server name that is globally forbidden. \ + Rejecting.", + body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This remote server is banned on this homeserver.", + )); + } + } + + // There is no body.server_name for /roomId/join + let mut servers = services() + .rooms + .state_cache + .servers_invite_via(&body.room_id)? + .unwrap_or( + services() + .rooms + .state_cache + .invite_state(sender_user, &body.room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect::>(), + ); + + if let Some(server) = body.room_id.server_name() { + servers.push(server.into()); + } + + join_room_by_id_helper( + body.sender_user.as_deref(), + &body.room_id, + body.reason.clone(), + &servers, + body.third_party_signed.as_ref(), + ) + .await +} + +/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: asks other servers over +/// federation +pub async fn join_room_by_id_or_alias_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); + let body = body.body; + + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + Ok(room_id) => { + if services().rooms.metadata.is_banned(&room_id)? && !services().users.is_admin(sender_user)? { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This room is banned on this homeserver.", + )); + } + + if let Some(server) = room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + && !services().users.is_admin(sender_user)? + { + warn!( + "User {sender_user} tried joining room ID {room_id} which has a server name that is globally \ + forbidden. Rejecting.", + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This remote server is banned on this homeserver.", + )); + } + } + + let mut servers = body.server_name.clone(); + + servers.extend( + services() + .rooms + .state_cache + .servers_invite_via(&room_id)? + .unwrap_or( + services() + .rooms + .state_cache + .invite_state(sender_user, &room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect(), + ), + ); + + if let Some(server) = room_id.server_name() { + servers.push(server.to_owned()); + } + + (servers, room_id) + }, + Err(room_alias) => { + let response = get_alias_helper(room_alias.clone()).await?; + + if services().rooms.metadata.is_banned(&response.room_id)? && !services().users.is_admin(sender_user)? { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This room is banned on this homeserver.", + )); + } + + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&room_alias.server_name().to_owned()) + && !services().users.is_admin(sender_user)? + { + warn!( + "User {sender_user} tried joining room alias {} with room ID {} which has a server name that is \ + globally forbidden. Rejecting.", + &room_alias, &response.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This remote server is banned on this homeserver.", + )); + } + + if let Some(server) = response.room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + && !services().users.is_admin(sender_user)? + { + warn!( + "User {sender_user} tried joining room alias {} with room ID {} which has a server name that \ + is globally forbidden. Rejecting.", + &room_alias, &response.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This remote server is banned on this homeserver.", + )); + } + } + + (response.servers, response.room_id) + }, + }; + + let join_room_response = join_room_by_id_helper( + Some(sender_user), + &room_id, + body.reason.clone(), + &servers, + body.third_party_signed.as_ref(), + ) + .await?; + + Ok(join_room_by_id_or_alias::v3::Response { + room_id: join_room_response.room_id, + }) +} + +/// # `POST /_matrix/client/v3/rooms/{roomId}/leave` +/// +/// Tries to leave the sender user from a room. +/// +/// - This should always work if the user is currently joined. +pub async fn leave_room_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + leave_room(sender_user, &body.room_id, body.reason.clone()).await?; + + Ok(leave_room::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/invite` +/// +/// Tries to send an invite event into the room. +pub async fn invite_user_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services().users.is_admin(sender_user)? && services().globals.block_non_admin_invites() { + info!( + "User {sender_user} is not an admin and attempted to send an invite to room {}", + &body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Invites are not allowed on this server.", + )); + } + + if services().rooms.metadata.is_banned(&body.room_id)? && !services().users.is_admin(sender_user)? { + info!( + "Local user {} who is not an admin attempted to send an invite for banned room {}.", + &sender_user, &body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This room is banned on this homeserver.", + )); + } + + if let Some(server) = body.room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + } + + if let invite_user::v3::InvitationRecipient::UserId { + user_id, + } = &body.recipient + { + invite_helper(sender_user, user_id, &body.room_id, body.reason.clone(), false).await?; + Ok(invite_user::v3::Response {}) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) + } +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/kick` +/// +/// Tries to send a kick event into the room. +pub async fn kick_user_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if let Ok(true) = services() + .rooms + .state_cache + .is_left(sender_user, &body.room_id) + { + info!("{} is not in room {}", &body.user_id, &body.room_id); + return Ok(kick_user::v3::Response {}); + } + + let mut event: RoomMemberEventContent = serde_json::from_str( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot kick member that's not in the room.", + ))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = MembershipState::Leave; + event.reason.clone_from(&body.reason); + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(kick_user::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/ban` +/// +/// Tries to send a ban event into the room. +pub async fn ban_user_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if let Ok(Some(membership_event)) = services() + .rooms + .state_accessor + .get_member(&body.room_id, sender_user) + { + if membership_event.membership == MembershipState::Ban { + info!("{} is already banned in {}", &body.user_id, &body.room_id); + return Ok(ban_user::v3::Response {}); + } + } + + let event = services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref())? + .map_or( + Ok(RoomMemberEventContent { + membership: MembershipState::Ban, + displayname: services().users.displayname(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(&body.user_id)?, + reason: body.reason.clone(), + join_authorized_via_users_server: None, + }), + |event| { + serde_json::from_str(event.content.get()) + .map(|event: RoomMemberEventContent| RoomMemberEventContent { + membership: MembershipState::Ban, + displayname: services() + .users + .displayname(&body.user_id) + .unwrap_or_default(), + avatar_url: services() + .users + .avatar_url(&body.user_id) + .unwrap_or_default(), + blurhash: services().users.blurhash(&body.user_id).unwrap_or_default(), + reason: body.reason.clone(), + join_authorized_via_users_server: None, + ..event + }) + .map_err(|_| Error::bad_database("Invalid member event in database.")) + }, + )?; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(ban_user::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/unban` +/// +/// Tries to send an unban event into the room. +pub async fn unban_user_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if let Ok(Some(membership_event)) = services() + .rooms + .state_accessor + .get_member(&body.room_id, sender_user) + { + if membership_event.membership != MembershipState::Ban { + info!("{} is already unbanned in {}", &body.user_id, &body.room_id); + return Ok(unban_user::v3::Response {}); + } + } + + let mut event: RoomMemberEventContent = serde_json::from_str( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomMember, body.user_id.as_ref())? + .ok_or(Error::BadRequest(ErrorKind::BadState, "Cannot unban a user who is not banned."))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = MembershipState::Leave; + event.reason.clone_from(&body.reason); + event.join_authorized_via_users_server = None; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(unban_user::v3::Response::new()) +} + +/// # `POST /_matrix/client/v3/rooms/{roomId}/forget` +/// +/// Forgets about a room. +/// +/// - If the sender user currently left the room: Stops sender user from +/// receiving information about the room +/// +/// Note: Other devices of the user have no way of knowing the room was +/// forgotten, so this has to be called from every device +pub async fn forget_room_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .rooms + .state_cache + .forget(&body.room_id, sender_user)?; + + Ok(forget_room::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/joined_rooms` +/// +/// Lists all rooms the user has joined. +pub async fn joined_rooms_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + Ok(joined_rooms::v3::Response { + joined_rooms: services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(Result::ok) + .collect(), + }) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/members` +/// +/// Lists all joined users in a room (TODO: at a specific point in time, with a +/// specific membership). +/// +/// - Only works if the user is currently joined +pub async fn get_member_events_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + + Ok(get_member_events::v3::Response { + chunk: services() + .rooms + .state_accessor + .room_state_full(&body.room_id) + .await? + .iter() + .filter(|(key, _)| key.0 == StateEventType::RoomMember) + .map(|(_, pdu)| pdu.to_member_event()) + .collect(), + }) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` +/// +/// Lists all members of a room. +/// +/// - The sender user must be in the room +/// - TODO: An appservice just needs a puppet joined +pub async fn joined_members_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + + let mut joined = BTreeMap::new(); + for user_id in services() + .rooms + .state_cache + .room_members(&body.room_id) + .filter_map(Result::ok) + { + let display_name = services().users.displayname(&user_id)?; + let avatar_url = services().users.avatar_url(&user_id)?; + + joined.insert( + user_id, + joined_members::v3::RoomMember { + display_name, + avatar_url, + }, + ); + } + + Ok(joined_members::v3::Response { + joined, + }) +} + +pub(crate) async fn join_room_by_id_helper( + sender_user: Option<&UserId>, room_id: &RoomId, reason: Option, servers: &[OwnedServerName], + _third_party_signed: Option<&ThirdPartySigned>, +) -> Result { + let sender_user = sender_user.expect("user is authenticated"); + + if let Ok(true) = services().rooms.state_cache.is_joined(sender_user, room_id) { + info!("{sender_user} is already joined in {room_id}"); + return Ok(join_room_by_id::v3::Response { + room_id: room_id.into(), + }); + } + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Ask a remote server if we are not participating in this room + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room_id)? + { + info!("Joining {room_id} over federation."); + + let (make_join_response, remote_server) = make_join_request(sender_user, room_id, servers).await?; + + info!("make_join finished"); + + let room_version_id = match make_join_response.room_version { + Some(room_version) + if services() + .globals + .supported_room_versions() + .contains(&room_version) => + { + room_version + }, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) + .map_err(|_| Error::BadServerResponse("Invalid make_join event json received from server."))?; + + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + + // TODO: Is origin needed? + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason, + join_authorized_via_users_server: join_authorized_via_users_server.clone(), + }) + .expect("event is valid, we just created it"), + ); + + // We keep the "event_id" in the pdu only in v1 or + // v2 rooms + match room_version_id { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + _ => { + join_event_stub.remove("event_id"); + }, + }; + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut join_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&join_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()).expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + + // It has enough fields to be called a proper event now + let mut join_event = join_event_stub; + + info!("Asking {remote_server} for send_join in room {room_id}"); + let send_join_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.to_owned(), + pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + omit_members: false, + }, + ) + .await?; + + info!("send_join finished"); + + if join_authorized_via_users_server.is_some() { + match &room_version_id { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 => { + warn!( + "Found `join_authorised_via_users_server` but room {} is version {}. Ignoring.", + room_id, &room_version_id + ); + }, + // only room versions 8 and above using `join_authorized_via_users_server` (restricted joins) need to + // validate and send signatures + RoomVersionId::V8 | RoomVersionId::V9 | RoomVersionId::V10 | RoomVersionId::V11 => { + if let Some(signed_raw) = &send_join_response.room_state.event { + info!( + "There is a signed event. This room is probably using restricted joins. Adding signature \ + to our event" + ); + let Ok((signed_event_id, signed_value)) = + gen_event_id_canonical_json(signed_raw, &room_version_id) + else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; + + if signed_event_id != event_id { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Server sent event with wrong event id", + )); + } + + match signed_value["signatures"] + .as_object() + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Server sent invalid signatures type", + )) + .and_then(|e| { + e.get(remote_server.as_str()).ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Server did not send its signature", + )) + }) { + Ok(signature) => { + join_event + .get_mut("signatures") + .expect("we created a valid pdu") + .as_object_mut() + .expect("we created a valid pdu") + .insert(remote_server.to_string(), signature.clone()); + }, + Err(e) => { + warn!( + "Server {remote_server} sent invalid signature in sendjoin signatures for event \ + {signed_value:?}: {e:?}", + ); + }, + } + } + }, + _ => { + warn!( + "Unexpected or unsupported room version {} for room {}", + &room_version_id, room_id + ); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + } + } + + services().rooms.short.get_or_create_shortroomid(room_id)?; + + info!("Parsing join event"); + let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone()) + .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; + + let mut state = HashMap::new(); + let pub_key_map = RwLock::new(BTreeMap::new()); + + info!("Fetching join signing keys"); + services() + .rooms + .event_handler + .fetch_join_signing_keys(&send_join_response, &room_version_id, &pub_key_map) + .await?; + + info!("Going through send_join response room_state"); + for result in send_join_response + .room_state + .state + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) + { + let Ok((event_id, value)) = result.await else { + continue; + }; + + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { + warn!("Invalid PDU in send_join response: {} {:?}", e, value); + Error::BadServerResponse("Invalid PDU in send_join response.") + })?; + + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; + if let Some(state_key) = &pdu.state_key { + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?; + state.insert(shortstatekey, pdu.event_id.clone()); + } + } + + info!("Going through send_join response auth_chain"); + for result in send_join_response + .room_state + .auth_chain + .iter() + .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) + { + let Ok((event_id, value)) = result.await else { + continue; + }; + + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; + } + + info!("Running send_join auth check"); + + let auth_check = state_res::event_auth::auth_check( + &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), + &parsed_join_pdu, + None::, // TODO: third party invite + |k, s| { + services() + .rooms + .timeline + .get_pdu( + state.get( + &services() + .rooms + .short + .get_or_create_shortstatekey(&k.to_string().into(), s) + .ok()?, + )?, + ) + .ok()? + }, + ) + .map_err(|e| { + warn!("Auth check failed: {e}"); + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") + })?; + + if !auth_check { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); + } + + info!("Saving state from send_join"); + let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( + room_id, + Arc::new( + state + .into_iter() + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) + .collect::>()?, + ), + )?; + + services() + .rooms + .state + .force_state(room_id, statehash_before_join, new, removed, &state_lock) + .await?; + + info!("Updating joined counts for new room"); + services().rooms.state_cache.update_joined_count(room_id)?; + + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?; + + info!("Appending new room join event"); + services() + .rooms + .timeline + .append_pdu( + &parsed_join_pdu, + join_event, + vec![(*parsed_join_pdu.event_id).to_owned()], + &state_lock, + ) + .await?; + + info!("Setting final room state for new room"); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + services() + .rooms + .state + .set_room_state(room_id, statehash_after_join, &state_lock)?; + } else { + info!("We can join locally"); + + let join_rules_event = + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + let restriction_rooms = match join_rules_event_content { + Some(RoomJoinRulesEventContent { + join_rule: JoinRule::Restricted(restricted), + }) + | Some(RoomJoinRulesEventContent { + join_rule: JoinRule::KnockRestricted(restricted), + }) => restricted + .allow + .into_iter() + .filter_map(|a| match a { + AllowRule::RoomMembership(r) => Some(r.room_id), + _ => None, + }) + .collect(), + _ => Vec::new(), + }; + + let local_members = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|user| user.server_name() == services().globals.server_name()) + .collect::>(); + + let mut authorized_user: Option = None; + + if restriction_rooms.iter().any(|restriction_room_id| { + services() + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + .unwrap_or(false) + }) { + for user in local_members { + if services() + .rooms + .state_accessor + .user_can_invite(room_id, &user, sender_user, &state_lock) + .await + .unwrap_or(false) + { + authorized_user = Some(user); + break; + } + } + } + + let event = RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: reason.clone(), + join_authorized_via_users_server: authorized_user, + }; + + // Try normal join first + let error = match services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await + { + Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())), + Err(e) => e, + }; + + if !restriction_rooms.is_empty() + && servers + .iter() + .all(|s| *s != services().globals.server_name()) + { + info!( + "We couldn't do the join locally, maybe federation can help to satisfy the restricted join \ + requirements" + ); + let (make_join_response, remote_server) = make_join_request(sender_user, room_id, servers).await?; + + let room_version_id = match make_join_response.room_version { + Some(room_version_id) + if services() + .globals + .supported_room_versions() + .contains(&room_version_id) => + { + room_version_id + }, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + let mut join_event_stub: CanonicalJsonObject = serde_json::from_str(make_join_response.event.get()) + .map_err(|_| Error::BadServerResponse("Invalid make_join event json received from server."))?; + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + // TODO: Is origin needed? + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason, + join_authorized_via_users_server, + }) + .expect("event is valid, we just created it"), + ); + + // We keep the "event_id" in the pdu only in v1 or + // v2 rooms + match room_version_id { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + _ => { + join_event_stub.remove("event_id"); + }, + }; + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut join_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&join_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + ); + let event_id = + <&EventId>::try_from(event_id.as_str()).expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + + // It has enough fields to be called a proper event now + let join_event = join_event_stub; + + let send_join_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.to_owned(), + pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + omit_members: false, + }, + ) + .await?; + + if let Some(signed_raw) = send_join_response.room_state.event { + let Ok((signed_event_id, signed_value)) = gen_event_id_canonical_json(&signed_raw, &room_version_id) + else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; + + if signed_event_id != event_id { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Server sent event with wrong event id", + )); + } + + drop(state_lock); + let pub_key_map = RwLock::new(BTreeMap::new()); + services() + .rooms + .event_handler + .fetch_required_signing_keys([&signed_value], &pub_key_map) + .await?; + services() + .rooms + .event_handler + .handle_incoming_pdu(&remote_server, &signed_event_id, room_id, signed_value, true, &pub_key_map) + .await?; + } else { + return Err(error); + } + } else { + return Err(error); + } + } + + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) +} + +async fn make_join_request( + sender_user: &UserId, room_id: &RoomId, servers: &[OwnedServerName], +) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> { + let mut make_join_response_and_server = Err(Error::BadServerResponse("No server available to assist in joining.")); + + let mut make_join_counter = 0; + let mut incompatible_room_version_count = 0; + + for remote_server in servers { + if remote_server == services().globals.server_name() { + continue; + } + info!("Asking {remote_server} for make_join ({make_join_counter})"); + let make_join_response = services() + .sending + .send_federation_request( + remote_server, + federation::membership::prepare_join_event::v1::Request { + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services().globals.supported_room_versions(), + }, + ) + .await; + + trace!("make_join response: {:?}", make_join_response); + make_join_counter += 1; + + if let Err(ref e) = make_join_response { + trace!("make_join ErrorKind string: {:?}", e.error_code().to_string()); + + // converting to a string is necessary (i think) because ruma is forcing us to + // fill in the struct for M_INCOMPATIBLE_ROOM_VERSION + if e.error_code() + .to_string() + .contains("M_INCOMPATIBLE_ROOM_VERSION") + || e.error_code() + .to_string() + .contains("M_UNSUPPORTED_ROOM_VERSION") + { + incompatible_room_version_count += 1; + } + + if incompatible_room_version_count > 15 { + info!( + "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or M_UNSUPPORTED_ROOM_VERSION, \ + assuming that Conduwuit does not support the room {room_id}: {e}" + ); + make_join_response_and_server = + Err(Error::BadServerResponse("Room version is not supported by Conduwuit")); + return make_join_response_and_server; + } + + if make_join_counter > 50 { + warn!( + "50 servers failed to provide valid make_join response, assuming no server can assist in joining." + ); + make_join_response_and_server = + Err(Error::BadServerResponse("No server available to assist in joining.")); + return make_join_response_and_server; + } + } + + make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); + + if make_join_response_and_server.is_ok() { + break; + } + } + + make_join_response_and_server +} + +async fn validate_and_add_event_id( + pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, +) -> Result<(OwnedEventId, CanonicalJsonObject)> { + let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&value, room_version).expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + let back_off = |id| async { + match services() + .globals + .bad_event_ratelimiter + .write() + .await + .entry(id) + { + Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + } + }; + + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(&event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version) { + warn!("Event {} failed verification {:?} {}", event_id, pdu, e); + back_off(event_id).await; + return Err(Error::BadServerResponse("Event failed verification.")); + } + + value.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + + Ok((event_id, value)) +} + +pub(crate) async fn invite_helper( + sender_user: &UserId, user_id: &UserId, room_id: &RoomId, reason: Option, is_direct: bool, +) -> Result<()> { + if !services().users.is_admin(user_id)? && services().globals.block_non_admin_invites() { + info!("User {sender_user} is not an admin and attempted to send an invite to room {room_id}"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Invites are not allowed on this server.", + )); + } + + if user_id.server_name() != services().globals.server_name() { + let (pdu, pdu_json, invite_room_state) = { + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let content = to_raw_value(&RoomMemberEventContent { + avatar_url: services().users.avatar_url(user_id)?, + displayname: None, + is_direct: Some(is_direct), + membership: MembershipState::Invite, + third_party_invite: None, + blurhash: None, + reason, + join_authorized_via_users_server: None, + }) + .expect("member event is valid value"); + + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + )?; + + let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; + + drop(state_lock); + + (pdu, pdu_json, invite_room_state) + }; + + let room_version_id = services().rooms.state.get_room_version(room_id)?; + + let response = services() + .sending + .send_federation_request( + user_id.server_name(), + create_invite::v2::Request { + room_id: room_id.to_owned(), + event_id: (*pdu.event_id).to_owned(), + room_version: room_version_id.clone(), + event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), + invite_room_state, + via: services().rooms.state_cache.servers_route_via(room_id).ok(), + }, + ) + .await?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + + // We do not add the event_id field to the pdu here because of signature and + // hashes checks + let Ok((event_id, value)) = gen_event_id_canonical_json(&response.event, &room_version_id) else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; + + if *pdu.event_id != *event_id { + warn!( + "Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", + user_id.server_name(), + pdu_json, + value + ); + } + + let origin: OwnedServerName = serde_json::from_value( + serde_json::to_value( + value + .get("origin") + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event needs an origin field."))?, + ) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + services() + .rooms + .event_handler + .fetch_required_signing_keys([&value], &pub_key_map) + .await?; + + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + + services().sending.send_pdu_room(room_id, &pdu_id)?; + return Ok(()); + } + + if !services() + .rooms + .state_cache + .is_joined(sender_user, room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, + is_direct: Some(is_direct), + third_party_invite: None, + blurhash: services().users.blurhash(user_id)?, + reason, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(()) +} + +// Make a user leave all their joined rooms +pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { + let all_rooms = services() + .rooms + .state_cache + .rooms_joined(user_id) + .chain( + services() + .rooms + .state_cache + .rooms_invited(user_id) + .map(|t| t.map(|(r, _)| r)), + ) + .collect::>(); + + for room_id in all_rooms { + let Ok(room_id) = room_id else { + continue; + }; + + // ignore errors + _ = leave_room(user_id, &room_id, None).await; + } + + Ok(()) +} + +pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { + // Ask a remote server if we don't have this room + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room_id)? + { + if let Err(e) = remote_leave_room(user_id, room_id).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } + + let last_state = services() + .rooms + .state_cache + .invite_state(user_id, room_id)? + .map_or_else(|| services().rooms.state_cache.left_state(user_id, room_id), |s| Ok(Some(s)))?; + + // We always drop the invite, we can't rely on other servers + services().rooms.state_cache.update_membership( + room_id, + user_id, + RoomMemberEventContent::new(MembershipState::Leave), + user_id, + last_state, + None, + true, + )?; + } else { + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let member_event = + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())?; + + // Fix for broken rooms + let member_event = match member_event { + None => { + error!("Trying to leave a room you are not a member of."); + + services().rooms.state_cache.update_membership( + room_id, + user_id, + RoomMemberEventContent::new(MembershipState::Leave), + user_id, + None, + None, + true, + )?; + return Ok(()); + }, + Some(e) => e, + }; + + let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get()).map_err(|e| { + error!("Invalid room member event in database: {}", e); + Error::bad_database("Invalid member event in database.") + })?; + + event.membership = MembershipState::Leave; + event.reason = reason; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + &state_lock, + ) + .await?; + } + + Ok(()) +} + +async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse("No server available to assist in leaving.")); + + let invite_state = services() + .rooms + .state_cache + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest(ErrorKind::BadState, "User is not invited."))?; + + let servers: HashSet = services() + .rooms + .state_cache + .servers_invite_via(room_id)? + .map_or( + invite_state + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect::>(), + HashSet::from_iter, + ); + + debug!("servers in remote_leave_room: {servers:?}"); + + for remote_server in servers { + let make_leave_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::prepare_leave_event::v1::Request { + room_id: room_id.to_owned(), + user_id: user_id.to_owned(), + }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let room_version_id = match make_leave_response.room_version { + Some(version) + if services() + .globals + .supported_room_versions() + .contains(&version) => + { + version + }, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut leave_event_stub = serde_json::from_str::(make_leave_response.event.get()) + .map_err(|_| Error::BadServerResponse("Invalid make_leave event json received from server."))?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + _ => { + leave_event_stub.remove("event_id"); + }, + }; + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut leave_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + leave_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + services() + .sending + .send_federation_request( + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id: room_id.to_owned(), + event_id, + pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + + Ok(()) +} diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs new file mode 100644 index 00000000..765dd595 --- /dev/null +++ b/src/api/client_server/message.rs @@ -0,0 +1,289 @@ +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, +}; + +use ruma::{ + api::client::{ + error::ErrorKind, + filter::{RoomEventFilter, UrlFilter}, + message::{get_message_events, send_message_event}, + }, + events::{MessageLikeEventType, StateEventType}, + RoomId, UserId, +}; +use serde_json::{from_str, Value}; + +use crate::{ + service::{pdu::PduBuilder, rooms::timeline::PduCount}, + services, utils, Error, PduEvent, Result, Ruma, +}; + +/// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` +/// +/// Send a message event into the room. +/// +/// - Is a NOOP if the txn id was already used before and returns the same event +/// id again +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is +/// allowed +pub async fn send_message_event_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_deref(); + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Forbid m.room.encrypted if encryption is disabled + if MessageLikeEventType::RoomEncrypted == body.event_type && !services().globals.allow_encryption() { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Encryption has been disabled")); + } + + if body.event_type == MessageLikeEventType::CallInvite + && services().rooms.directory.is_public_room(&body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Room call invites are not allowed in public rooms", + )); + } + + // Check if this is a new transaction id + if let Some(response) = services() + .transaction_ids + .existing_txnid(sender_user, sender_device, &body.txn_id)? + { + // The client might have sent a txnid of the /sendToDevice endpoint + // This txnid has no response associated with it + if response.is_empty() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to use txn id already used for an incompatible endpoint.", + )); + } + + let event_id = utils::string_from_bytes(&response) + .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; + return Ok(send_message_event::v3::Response { + event_id, + }); + } + + let mut unsigned = BTreeMap::new(); + unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); + + let event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: body.event_type.to_string().into(), + content: from_str(body.body.body.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + unsigned: Some(unsigned), + state_key: None, + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + services() + .transaction_ids + .add_txnid(sender_user, sender_device, &body.txn_id, event_id.as_bytes())?; + + drop(state_lock); + + Ok(send_message_event::v3::Response::new((*event_id).to_owned())) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` +/// +/// Allows paginating through room history. +/// +/// - Only works if the user is joined (TODO: always allow, but only show events +/// where the user was +/// joined, depending on `history_visibility`) +pub async fn get_message_events_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let from = match body.from.clone() { + Some(from) => PduCount::try_from_string(&from)?, + None => match body.dir { + ruma::api::Direction::Forward => PduCount::min(), + ruma::api::Direction::Backward => PduCount::max(), + }, + }; + + let to = body + .to + .as_ref() + .and_then(|t| PduCount::try_from_string(t).ok()); + + services() + .rooms + .lazy_loading + .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from) + .await?; + + let limit = u64::from(body.limit).min(100) as usize; + + let next_token; + + let mut resp = get_message_events::v3::Response::new(); + + let mut lazy_loaded = HashSet::new(); + + match body.dir { + ruma::api::Direction::Forward => { + let events_after: Vec<_> = services() + .rooms + .timeline + .pdus_after(sender_user, &body.room_id, from)? + .filter_map(Result::ok) // Filter out buggy events + .filter(|(_, pdu)| contains_url_filter(pdu, &body.filter)) + .filter(|(_, pdu)| visibility_filter(pdu, sender_user, &body.room_id)) + .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take(limit) + .collect(); + + for (_, event) in &events_after { + /* TODO: Remove the not "element_hacks" check when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 + */ + if !cfg!(feature = "element_hacks") + && !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + + lazy_loaded.insert(event.sender.clone()); + } + + next_token = events_after.last().map(|(count, _)| count).copied(); + + let events_after: Vec<_> = events_after + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + resp.start = from.stringify(); + resp.end = next_token.map(|count| count.stringify()); + resp.chunk = events_after; + }, + ruma::api::Direction::Backward => { + services() + .rooms + .timeline + .backfill_if_required(&body.room_id, from) + .await?; + let events_before: Vec<_> = services() + .rooms + .timeline + .pdus_until(sender_user, &body.room_id, from)? + .filter_map(Result::ok) // Filter out buggy events + .filter(|(_, pdu)| contains_url_filter(pdu, &body.filter)) + .filter(|(_, pdu)| visibility_filter(pdu, sender_user, &body.room_id)) + .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take(limit) + .collect(); + + for (_, event) in &events_before { + /* TODO: Remove the not "element_hacks" check when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 + */ + if !cfg!(feature = "element_hacks") + && !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + + lazy_loaded.insert(event.sender.clone()); + } + + next_token = events_before.last().map(|(count, _)| count).copied(); + + let events_before: Vec<_> = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + resp.start = from.stringify(); + resp.end = next_token.map(|count| count.stringify()); + resp.chunk = events_before; + }, + } + + resp.state = Vec::new(); + for ll_id in &lazy_loaded { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomMember, + ll_id.as_str(), + )? { + resp.state.push(member_event.to_state_event()); + } + } + + // remove the feature check when we are sure clients like element can handle it + if !cfg!(feature = "element_hacks") { + if let Some(next_token) = next_token { + services() + .rooms + .lazy_loading + .lazy_load_mark_sent(sender_user, sender_device, &body.room_id, lazy_loaded, next_token) + .await; + } + } + + Ok(resp) +} + +fn visibility_filter(pdu: &PduEvent, user_id: &UserId, room_id: &RoomId) -> bool { + services() + .rooms + .state_accessor + .user_can_see_event(user_id, room_id, &pdu.event_id) + .unwrap_or(false) +} + +fn contains_url_filter(pdu: &PduEvent, filter: &RoomEventFilter) -> bool { + if filter.url_filter.is_none() { + return true; + } + + let content: Value = from_str(pdu.content.get()).unwrap(); + match filter.url_filter { + Some(UrlFilter::EventsWithoutUrl) => !content["url"].is_string(), + Some(UrlFilter::EventsWithUrl) => content["url"].is_string(), + None => true, + } +} diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs new file mode 100644 index 00000000..1aac4aaa --- /dev/null +++ b/src/api/client_server/mod.rs @@ -0,0 +1,83 @@ +mod account; +mod alias; +mod backup; +mod capabilities; +mod config; +mod context; +mod device; +mod directory; +mod filter; +mod keys; +mod media; +mod membership; +mod message; +mod presence; +mod profile; +mod push; +mod read_marker; +mod redact; +mod relations; +mod report; +mod room; +mod search; +mod session; +mod space; +mod state; +mod sync; +mod tag; +mod thirdparty; +mod threads; +mod to_device; +mod typing; +mod unstable; +mod unversioned; +mod user_directory; +mod voip; + +pub use account::*; +pub use alias::*; +pub use backup::*; +pub use capabilities::*; +pub use config::*; +pub use context::*; +pub use device::*; +pub use directory::*; +pub use filter::*; +pub use keys::*; +pub use media::*; +pub use membership::*; +pub use message::*; +pub use presence::*; +pub use profile::*; +pub use push::*; +pub use read_marker::*; +pub use redact::*; +pub use relations::*; +pub use report::*; +pub use room::*; +pub use search::*; +pub use session::*; +pub use space::*; +pub use state::*; +pub use sync::*; +pub use tag::*; +pub use thirdparty::*; +pub use threads::*; +pub use to_device::*; +pub use typing::*; +pub use unstable::*; +pub use unversioned::*; +pub use user_directory::*; +pub use voip::*; + +/// generated device ID length +pub const DEVICE_ID_LENGTH: usize = 10; + +/// generated user access token length +pub const TOKEN_LENGTH: usize = 32; + +/// generated user session ID length +pub const SESSION_ID_LENGTH: usize = 32; + +/// auto-generated password length +pub const AUTO_GEN_PASSWORD_LENGTH: usize = 25; diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs new file mode 100644 index 00000000..424fe105 --- /dev/null +++ b/src/api/client_server/presence.rs @@ -0,0 +1,68 @@ +use std::time::Duration; + +use ruma::api::client::{ + error::ErrorKind, + presence::{get_presence, set_presence}, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `PUT /_matrix/client/r0/presence/{userId}/status` +/// +/// Sets the presence state of the sender user. +pub async fn set_presence_route(body: Ruma) -> Result { + if !services().globals.allow_local_presence() { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Presence is disabled on this server")); + } + + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + services() + .presence + .set_presence(sender_user, &body.presence, None, None, body.status_msg.clone())?; + + Ok(set_presence::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/presence/{userId}/status` +/// +/// Gets the presence state of the given user. +/// +/// - Only works if you share a room with the user +pub async fn get_presence_route(body: Ruma) -> Result { + if !services().globals.allow_local_presence() { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Presence is disabled on this server")); + } + + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let mut presence_event = None; + + for _room_id in services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? + { + if let Some(presence) = services().presence.get_presence(sender_user)? { + presence_event = Some(presence); + break; + } + } + + if let Some(presence) = presence_event { + Ok(get_presence::v3::Response { + // TODO: Should ruma just use the presenceeventcontent type here? + status_msg: presence.content.status_msg, + currently_active: presence.content.currently_active, + last_active_ago: presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + presence: presence.content.presence, + }) + } else { + Err(Error::BadRequest( + ErrorKind::NotFound, + "Presence state for this user was not found", + )) + } +} diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs new file mode 100644 index 00000000..083a3073 --- /dev/null +++ b/src/api/client_server/profile.rs @@ -0,0 +1,351 @@ +use std::sync::Arc; + +use ruma::{ + api::{ + client::{ + error::ErrorKind, + profile::{get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name}, + }, + federation, + }, + events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType}, + presence::PresenceState, +}; +use serde_json::value::to_raw_value; + +use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma}; + +/// # `PUT /_matrix/client/r0/profile/{userId}/displayname` +/// +/// Updates the displayname. +/// +/// - Also makes sure other users receive the update using presence EDUs +pub async fn set_displayname_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .users + .set_displayname(sender_user, body.displayname.clone()) + .await?; + + // Send a new membership event and presence update into all joined rooms + let all_rooms_joined: Vec<_> = services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(Result::ok) + .map(|room_id| { + Ok::<_, Error>(( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + displayname: body.displayname.clone(), + join_authorized_via_users_server: None, + ..serde_json::from_str( + services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomMember, sender_user.as_str())? + .ok_or_else(|| { + Error::bad_database("Tried to send displayname update for user not in the room.") + })? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + room_id, + )) + }) + .filter_map(Result::ok) + .collect(); + + for (pdu_builder, room_id) in all_rooms_joined { + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + _ = services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .await; + } + + if services().globals.allow_local_presence() { + // Presence update + services() + .presence + .ping_presence(sender_user, &PresenceState::Online)?; + } + + Ok(set_display_name::v3::Response {}) +} + +/// # `GET /_matrix/client/v3/profile/{userId}/displayname` +/// +/// Returns the displayname of the user. +/// +/// - If user is on another server and we do not have a local copy already +/// fetch displayname over federation +pub async fn get_displayname_route( + body: Ruma, +) -> Result { + if body.user_id.server_name() != services().globals.server_name() { + // Create and update our local copy of the user + if let Ok(response) = services() + .sending + .send_federation_request( + body.user_id.server_name(), + federation::query::get_profile_information::v1::Request { + user_id: body.user_id.clone(), + field: None, // we want the full user's profile to update locally too + }, + ) + .await + { + if !services().users.exists(&body.user_id)? { + services().users.create(&body.user_id, None)?; + } + + services() + .users + .set_displayname(&body.user_id, response.displayname.clone()) + .await?; + services() + .users + .set_avatar_url(&body.user_id, response.avatar_url.clone()) + .await?; + services() + .users + .set_blurhash(&body.user_id, response.blurhash.clone()) + .await?; + + return Ok(get_display_name::v3::Response { + displayname: response.displayname, + }); + } + } + + if !services().users.exists(&body.user_id)? { + // Return 404 if this user doesn't exist and we couldn't fetch it over + // federation + return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); + } + + Ok(get_display_name::v3::Response { + displayname: services().users.displayname(&body.user_id)?, + }) +} + +/// # `PUT /_matrix/client/v3/profile/{userId}/avatar_url` +/// +/// Updates the `avatar_url` and `blurhash`. +/// +/// - Also makes sure other users receive the update using presence EDUs +pub async fn set_avatar_url_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .users + .set_avatar_url(sender_user, body.avatar_url.clone()) + .await?; + + services() + .users + .set_blurhash(sender_user, body.blurhash.clone()) + .await?; + + // Send a new membership event and presence update into all joined rooms + let all_joined_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(Result::ok) + .map(|room_id| { + Ok::<_, Error>(( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + avatar_url: body.avatar_url.clone(), + join_authorized_via_users_server: None, + ..serde_json::from_str( + services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomMember, sender_user.as_str())? + .ok_or_else(|| { + Error::bad_database("Tried to send displayname update for user not in the room.") + })? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + room_id, + )) + }) + .filter_map(Result::ok) + .collect(); + + for (pdu_builder, room_id) in all_joined_rooms { + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + _ = services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .await; + } + + if services().globals.allow_local_presence() { + // Presence update + services() + .presence + .ping_presence(sender_user, &PresenceState::Online)?; + } + + Ok(set_avatar_url::v3::Response {}) +} + +/// # `GET /_matrix/client/v3/profile/{userId}/avatar_url` +/// +/// Returns the `avatar_url` and `blurhash` of the user. +/// +/// - If user is on another server and we do not have a local copy already +/// fetch `avatar_url` and blurhash over federation +pub async fn get_avatar_url_route(body: Ruma) -> Result { + if body.user_id.server_name() != services().globals.server_name() { + // Create and update our local copy of the user + if let Ok(response) = services() + .sending + .send_federation_request( + body.user_id.server_name(), + federation::query::get_profile_information::v1::Request { + user_id: body.user_id.clone(), + field: None, // we want the full user's profile to update locally as well + }, + ) + .await + { + if !services().users.exists(&body.user_id)? { + services().users.create(&body.user_id, None)?; + } + + services() + .users + .set_displayname(&body.user_id, response.displayname.clone()) + .await?; + services() + .users + .set_avatar_url(&body.user_id, response.avatar_url.clone()) + .await?; + services() + .users + .set_blurhash(&body.user_id, response.blurhash.clone()) + .await?; + + return Ok(get_avatar_url::v3::Response { + avatar_url: response.avatar_url, + blurhash: response.blurhash, + }); + } + } + + if !services().users.exists(&body.user_id)? { + // Return 404 if this user doesn't exist and we couldn't fetch it over + // federation + return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); + } + + Ok(get_avatar_url::v3::Response { + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, + }) +} + +/// # `GET /_matrix/client/v3/profile/{userId}` +/// +/// Returns the displayname, avatar_url and blurhash of the user. +/// +/// - If user is on another server and we do not have a local copy already, +/// fetch profile over federation. +pub async fn get_profile_route(body: Ruma) -> Result { + if body.user_id.server_name() != services().globals.server_name() { + // Create and update our local copy of the user + if let Ok(response) = services() + .sending + .send_federation_request( + body.user_id.server_name(), + federation::query::get_profile_information::v1::Request { + user_id: body.user_id.clone(), + field: None, + }, + ) + .await + { + if !services().users.exists(&body.user_id)? { + services().users.create(&body.user_id, None)?; + } + + services() + .users + .set_displayname(&body.user_id, response.displayname.clone()) + .await?; + services() + .users + .set_avatar_url(&body.user_id, response.avatar_url.clone()) + .await?; + services() + .users + .set_blurhash(&body.user_id, response.blurhash.clone()) + .await?; + + return Ok(get_profile::v3::Response { + displayname: response.displayname, + avatar_url: response.avatar_url, + blurhash: response.blurhash, + }); + } + } + + if !services().users.exists(&body.user_id)? { + // Return 404 if this user doesn't exist and we couldn't fetch it over + // federation + return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); + } + + Ok(get_profile::v3::Response { + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, + }) +} diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs new file mode 100644 index 00000000..71d937a6 --- /dev/null +++ b/src/api/client_server/push.rs @@ -0,0 +1,371 @@ +use ruma::{ + api::client::{ + error::ErrorKind, + push::{ + delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, + set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleScope, + }, + }, + events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, + push::{InsertPushRuleError, RemovePushRuleError, Ruleset}, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/r0/pushrules/` +/// +/// Retrieves the push rules event for this user. +pub async fn get_pushrules_all_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = + services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())?; + + if let Some(event) = event { + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + Ok(get_pushrules_all::v3::Response { + global: account_data.global, + }) + } else { + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: Ruleset::server_default(sender_user), + }, + }) + .expect("to json always works"), + )?; + + Ok(get_pushrules_all::v3::Response { + global: Ruleset::server_default(sender_user), + }) + } +} + +/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// +/// Retrieves a single specified push rule for this user. +pub async fn get_pushrule_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let rule = account_data + .global + .get(body.kind.clone(), &body.rule_id) + .map(Into::into); + + if let Some(rule) = rule { + Ok(get_pushrule::v3::Response { + rule, + }) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")) + } +} + +/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// +/// Creates a single specified push rule for this user. +pub async fn set_pushrule_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; + + if body.scope != RuleScope::Global { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let event = services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + if let Err(error) = + account_data + .content + .global + .insert(body.rule.clone(), body.after.as_deref(), body.before.as_deref()) + { + let err = match error { + InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest( + ErrorKind::InvalidParam, + "Rule IDs starting with a dot are reserved for server-default rules.", + ), + InsertPushRuleError::InvalidRuleId => { + Error::BadRequest(ErrorKind::InvalidParam, "Rule ID containing invalid characters.") + }, + InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest( + ErrorKind::InvalidParam, + "Can't place a push rule relatively to a server-default rule.", + ), + InsertPushRuleError::UnknownRuleId => { + Error::BadRequest(ErrorKind::NotFound, "The before or after rule could not be found.") + }, + InsertPushRuleError::BeforeHigherThanAfter => Error::BadRequest( + ErrorKind::InvalidParam, + "The before rule has a higher priority than the after rule.", + ), + _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), + }; + + return Err(err); + } + + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + + Ok(set_pushrule::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` +/// +/// Gets the actions of a single specified push rule for this user. +pub async fn get_pushrule_actions_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != RuleScope::Global { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let event = services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let global = account_data.global; + let actions = global + .get(body.kind.clone(), &body.rule_id) + .map(|rule| rule.actions().to_owned()) + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))?; + + Ok(get_pushrule_actions::v3::Response { + actions, + }) +} + +/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` +/// +/// Sets the actions of a single specified push rule for this user. +pub async fn set_pushrule_actions_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != RuleScope::Global { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let event = services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + if account_data + .content + .global + .set_actions(body.kind.clone(), &body.rule_id, body.actions.clone()) + .is_err() + { + return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); + } + + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + + Ok(set_pushrule_actions::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` +/// +/// Gets the enabled status of a single specified push rule for this user. +pub async fn get_pushrule_enabled_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != RuleScope::Global { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let event = services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = account_data.content.global; + let enabled = global + .get(body.kind.clone(), &body.rule_id) + .map(ruma::push::AnyPushRuleRef::enabled) + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))?; + + Ok(get_pushrule_enabled::v3::Response { + enabled, + }) +} + +/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` +/// +/// Sets the enabled status of a single specified push rule for this user. +pub async fn set_pushrule_enabled_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != RuleScope::Global { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let event = services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + if account_data + .content + .global + .set_enabled(body.kind.clone(), &body.rule_id, body.enabled) + .is_err() + { + return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); + } + + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + + Ok(set_pushrule_enabled::v3::Response {}) +} + +/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` +/// +/// Deletes a single specified push rule for this user. +pub async fn delete_pushrule_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.scope != RuleScope::Global { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Scopes other than 'global' are not supported.", + )); + } + + let event = services() + .account_data + .get(None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into())? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "PushRules event not found."))?; + + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + if let Err(error) = account_data + .content + .global + .remove(body.kind.clone(), &body.rule_id) + { + let err = match error { + RemovePushRuleError::ServerDefault => { + Error::BadRequest(ErrorKind::InvalidParam, "Cannot delete a server-default pushrule.") + }, + RemovePushRuleError::NotFound => Error::BadRequest(ErrorKind::NotFound, "Push rule not found."), + _ => Error::BadRequest(ErrorKind::InvalidParam, "Invalid data."), + }; + + return Err(err); + } + + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + + Ok(delete_pushrule::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/pushers` +/// +/// Gets all currently active pushers for the sender user. +pub async fn get_pushers_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + Ok(get_pushers::v3::Response { + pushers: services().pusher.get_pushers(sender_user)?, + }) +} + +/// # `POST /_matrix/client/r0/pushers/set` +/// +/// Adds a pusher for the sender user. +/// +/// - TODO: Handle `append` +pub async fn set_pushers_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .pusher + .set_pusher(sender_user, body.action.clone())?; + + Ok(set_pusher::v3::Response::default()) +} diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs new file mode 100644 index 00000000..607b5aba --- /dev/null +++ b/src/api/client_server/read_marker.rs @@ -0,0 +1,173 @@ +use std::collections::BTreeMap; + +use ruma::{ + api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, + events::{ + receipt::{ReceiptThread, ReceiptType}, + RoomAccountDataEventType, + }, + MilliSecondsSinceUnixEpoch, +}; + +use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma}; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` +/// +/// Sets different types of read markers. +/// +/// - Updates fully-read account data event to `fully_read` +/// - If `read_receipt` is set: Update private marker and public read receipt +/// EDU +pub async fn set_read_marker_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if let Some(fully_read) = &body.fully_read { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: fully_read.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + } + + if body.private_read_receipt.is_some() || body.read_receipt.is_some() { + services() + .rooms + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } + + if let Some(event) = &body.private_read_receipt { + let count = services() + .rooms + .timeline + .get_pdu_count(event)? + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; + let count = match count { + PduCount::Backfilled(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Read receipt is in backfilled timeline", + )) + }, + PduCount::Normal(c) => c, + }; + services() + .rooms + .read_receipt + .private_read_set(&body.room_id, sender_user, count)?; + } + + if let Some(event) = &body.read_receipt { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + ); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event.to_owned(), receipts); + + services().rooms.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + } + + Ok(set_read_marker::v3::Response {}) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` +/// +/// Sets private read marker and public read receipt EDU. +pub async fn create_receipt_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if matches!( + &body.receipt_type, + create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate + ) { + services() + .rooms + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } + + match body.receipt_type { + create_receipt::v3::ReceiptType::FullyRead => { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: body.event_id.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + }, + create_receipt::v3::ReceiptType::Read => { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + ); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(body.event_id.clone(), receipts); + + services().rooms.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + }, + create_receipt::v3::ReceiptType::ReadPrivate => { + let count = services() + .rooms + .timeline + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."))?; + let count = match count { + PduCount::Backfilled(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Read receipt is in backfilled timeline", + )) + }, + PduCount::Normal(c) => c, + }; + services() + .rooms + .read_receipt + .private_read_set(&body.room_id, sender_user, count)?; + }, + _ => return Err(Error::bad_database("Unsupported receipt type")), + } + + Ok(create_receipt::v3::Response {}) +} diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs new file mode 100644 index 00000000..8e71bd3b --- /dev/null +++ b/src/api/client_server/redact.rs @@ -0,0 +1,58 @@ +use std::sync::Arc; + +use ruma::{ + api::client::redact::redact_event, + events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, +}; +use serde_json::value::to_raw_value; + +use crate::{service::pdu::PduBuilder, services, Result, Ruma}; + +/// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` +/// +/// Tries to send a redaction event into the room. +/// +/// - TODO: Handle txn id +pub async fn redact_event_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomRedaction, + content: to_raw_value(&RoomRedactionEventContent { + redacts: Some(body.event_id.clone()), + reason: body.reason.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: Some(body.event_id.into()), + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + let event_id = (*event_id).to_owned(); + Ok(redact_event::v3::Response { + event_id, + }) +} diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs new file mode 100644 index 00000000..f2ddfecb --- /dev/null +++ b/src/api/client_server/relations.rs @@ -0,0 +1,88 @@ +use ruma::api::client::relations::{ + get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, +}; + +use crate::{services, Result, Ruma}; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` +pub async fn get_relating_events_with_rel_type_and_event_type_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let res = services() + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + &Some(body.event_type.clone()), + &Some(body.rel_type.clone()), + &body.from, + &body.to, + &body.limit, + body.recurse, + body.dir, + )?; + + Ok(get_relating_events_with_rel_type_and_event_type::v1::Response { + chunk: res.chunk, + next_batch: res.next_batch, + prev_batch: res.prev_batch, + recursion_depth: res.recursion_depth, + }) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` +pub async fn get_relating_events_with_rel_type_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let res = services() + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + &None, + &Some(body.rel_type.clone()), + &body.from, + &body.to, + &body.limit, + body.recurse, + body.dir, + )?; + + Ok(get_relating_events_with_rel_type::v1::Response { + chunk: res.chunk, + next_batch: res.next_batch, + prev_batch: res.prev_batch, + recursion_depth: res.recursion_depth, + }) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` +pub async fn get_relating_events_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .rooms + .pdu_metadata + .paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + &None, + &None, + &body.from, + &body.to, + &body.limit, + body.recurse, + body.dir, + ) +} diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs new file mode 100644 index 00000000..86ba91f8 --- /dev/null +++ b/src/api/client_server/report.rs @@ -0,0 +1,111 @@ +use std::time::Duration; + +use rand::Rng; +use ruma::{ + api::client::{error::ErrorKind, room::report_content}, + events::room::message, + int, +}; +use tokio::time::sleep; +use tracing::{debug, info}; + +use crate::{services, utils::HtmlEscape, Error, Result, Ruma}; + +/// # `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}` +/// +/// Reports an inappropriate event to homeserver admins +pub async fn report_event_route(body: Ruma) -> Result { + // user authentication + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + info!("Received /report request by user {}", sender_user); + + // check if we know about the reported event ID or if it's invalid + let Some(pdu) = services().rooms.timeline.get_pdu(&body.event_id)? else { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Event ID is not known to us or Event ID is invalid", + )); + }; + + // check if the room ID from the URI matches the PDU's room ID + if body.room_id != pdu.room_id { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Event ID does not belong to the reported room", + )); + } + + // check if reporting user is in the reporting room + if !services() + .rooms + .state_cache + .room_members(&pdu.room_id) + .filter_map(Result::ok) + .any(|user_id| user_id == *sender_user) + { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "You are not in the room you are reporting.", + )); + } + + // check if score is in valid range + if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid score, must be within 0 to -100", + )); + }; + + // check if report reasoning is less than or equal to 750 characters + if let Some(true) = body.reason.clone().map(|s| s.chars().count() >= 750) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Reason too long, should be 750 characters or fewer", + )); + }; + + // send admin room message that we received the report with an @room ping for + // urgency + services() + .admin + .send_message(message::RoomMessageEventContent::text_html( + format!( + "@room Report received from: {}\n\nEvent ID: {}\nRoom ID: {}\nSent By: {}\n\nReport Score: {}\nReport \ + Reason: {}", + sender_user.to_owned(), + pdu.event_id, + pdu.room_id, + pdu.sender.clone(), + body.score.unwrap_or_else(|| ruma::Int::from(0)), + body.reason.as_deref().unwrap_or("") + ), + format!( + "
@room Report received from: {0}\ +
  • Event Info
    • Event ID: {1}\ + 🔗
    • Room ID: {2}\ +
    • Sent By: {3}
  • \ + Report Info
    • Report Score: {4}
    • Report Reason: {5}
  • \ +
", + sender_user.to_owned(), + pdu.event_id.clone(), + pdu.room_id.clone(), + pdu.sender.clone(), + body.score.unwrap_or_else(|| ruma::Int::from(0)), + HtmlEscape(body.reason.as_deref().unwrap_or("")) + ), + )); + + // even though this is kinda security by obscurity, let's still make a small + // random delay sending a successful response per spec suggestion regarding + // enumerating for potential events existing in our server. + let time_to_wait = rand::thread_rng().gen_range(8..21); + debug!( + "Got successful /report request, waiting {} seconds before sending successful response.", + time_to_wait + ); + sleep(Duration::from_secs(time_to_wait)).await; + + Ok(report_content::v3::Response {}) +} diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs new file mode 100644 index 00000000..78ba5142 --- /dev/null +++ b/src/api/client_server/room.rs @@ -0,0 +1,964 @@ +use std::{cmp::max, collections::BTreeMap, sync::Arc}; + +use ruma::{ + api::client::{ + error::ErrorKind, + room::{self, aliases, create_room, get_room_event, upgrade_room}, + }, + events::{ + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + tombstone::RoomTombstoneEventContent, + topic::RoomTopicEventContent, + }, + StateEventType, TimelineEventType, + }, + int, + serde::JsonObject, + CanonicalJsonObject, CanonicalJsonValue, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, +}; +use serde_json::{json, value::to_raw_value}; +use tracing::{debug, error, info, warn}; + +use crate::{api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma}; + +/// # `POST /_matrix/client/v3/createRoom` +/// +/// Creates a new room. +/// +/// - Room ID is randomly generated +/// - Create alias if `room_alias_name` is set +/// - Send create event +/// - Join sender user +/// - Send power levels event +/// - Send canonical room alias +/// - Send join rules +/// - Send history visibility +/// - Send guest access +/// - Send events listed in initial state +/// - Send events implied by `name` and `topic` +/// - Send invite events +pub async fn create_room_route(body: Ruma) -> Result { + use create_room::v3::RoomPreset; + + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services().globals.allow_room_creation() + && body.appservice_info.is_none() + && !services().users.is_admin(sender_user)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Room creation has been disabled.")); + } + + let room_id: OwnedRoomId; + + // checks if the user specified an explicit (custom) room_id to be created with + // in request body. falls back to normal generated room ID if not specified. + if let Some(CanonicalJsonValue::Object(json_body)) = &body.json_body { + match json_body.get("room_id") { + Some(custom_room_id) => { + let custom_room_id_s = custom_room_id.to_string(); + + // do some checks on the custom room ID similar to room aliases + if custom_room_id_s.contains(':') { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Custom room ID contained `:` which is not allowed. Please note that this expects a \ + localpart, not the full room ID.", + )); + } else if custom_room_id_s.contains(char::is_whitespace) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Custom room ID contained spaces which is not valid.", + )); + } else if custom_room_id_s.len() > 255 { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Custom room ID is too long.")); + } + + // apply forbidden room alias checks to custom room IDs too + if services() + .globals + .forbidden_alias_names() + .is_match(&custom_room_id_s) + { + return Err(Error::BadRequest(ErrorKind::Unknown, "Custom room ID is forbidden.")); + } + + let full_room_id = "!".to_owned() + + &custom_room_id_s.replace('"', "") + + ":" + services().globals.server_name().as_ref(); + debug!("Full room ID: {}", full_room_id); + + room_id = RoomId::parse(full_room_id).map_err(|e| { + info!("User attempted to create room with custom room ID but failed parsing: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Custom room ID could not be parsed") + })?; + }, + None => room_id = RoomId::new(services().globals.server_name()), + } + } else { + room_id = RoomId::new(services().globals.server_name()); + } + + // check if room ID doesn't already exist instead of erroring on auth check + if services().rooms.short.get_shortroomid(&room_id)?.is_some() { + return Err(Error::BadRequest( + ErrorKind::RoomInUse, + "Room with that custom room ID already exists", + )); + } + + services().rooms.short.get_or_create_shortroomid(&room_id)?; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let alias: Option = body + .room_alias_name + .as_ref() + .map_or(Ok(None), |localpart| { + // Basic checks on the room alias validity + if localpart.contains(':') { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room alias contained `:` which is not allowed. Please note that this expects a localpart, not \ + the full room alias.", + )); + } else if localpart.contains(char::is_whitespace) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room alias contained spaces which is not a valid room alias.", + )); + } else if localpart.len() > 255 { + // there is nothing spec-wise saying to check the limit of this, + // however absurdly long room aliases are guaranteed to be unreadable or done + // maliciously. there is no reason a room alias should even exceed 100 + // characters as is. generally in spec, 255 is matrix's fav number + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room alias is excessively long, clients may not be able to handle this. Please shorten it.", + )); + } else if localpart.contains('"') { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room alias contained `\"` which is not allowed.", + )); + } + + // check if room alias is forbidden + if services() + .globals + .forbidden_alias_names() + .is_match(localpart) + { + return Err(Error::BadRequest(ErrorKind::Unknown, "Room alias name is forbidden.")); + } + + let alias = + RoomAliasId::parse(format!("#{}:{}", localpart, services().globals.server_name())).map_err(|e| { + warn!("Failed to parse room alias for room ID {}: {e}", room_id); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid room alias specified.") + })?; + + if services() + .rooms + .alias + .resolve_local_alias(&alias)? + .is_some() + { + Err(Error::BadRequest(ErrorKind::RoomInUse, "Room alias already exists.")) + } else { + Ok(Some(alias)) + } + })?; + + if let Some(ref alias) = alias { + if let Some(ref info) = body.appservice_info { + if !info.aliases.is_match(alias.as_str()) { + return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace.")); + } + } else if services().appservice.is_exclusive_alias(alias).await { + return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias reserved by appservice.")); + } + } + + let room_version = match body.room_version.clone() { + Some(room_version) => { + if services() + .globals + .supported_room_versions() + .contains(&room_version) + { + room_version + } else { + return Err(Error::BadRequest( + ErrorKind::UnsupportedRoomVersion, + "This server does not support that room version.", + )); + } + }, + None => services().globals.default_room_version(), + }; + + let content = match &body.creation_content { + Some(content) => { + let mut content = content + .deserialize_as::() + .map_err(|e| { + error!("Failed to deserialise content as canonical JSON: {}", e); + Error::bad_database("Failed to deserialise content as canonical JSON.") + })?; + match room_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { + content.insert( + "creator".into(), + json!(&sender_user).try_into().map_err(|e| { + info!("Invalid creation content: {e}"); + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, + ); + }, + RoomVersionId::V11 => {}, // V11 removed the "creator" key + _ => { + warn!("Unexpected or unsupported room version {room_version}"); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + } + + content.insert( + "room_version".into(), + json!(room_version.as_str()) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?, + ); + content + }, + None => { + let content = match room_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()), + RoomVersionId::V11 => RoomCreateEventContent::new_v11(), + _ => { + warn!("Unexpected or unsupported room version {room_version}"); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + }; + let mut content = serde_json::from_str::( + to_raw_value(&content) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? + .get(), + ) + .unwrap(); + content.insert( + "room_version".into(), + json!(room_version.as_str()) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))?, + ); + content + }, + }; + + // Validate creation content + let de_result = serde_json::from_str::( + to_raw_value(&content) + .expect("Invalid creation content") + .get(), + ); + + if de_result.is_err() { + return Err(Error::BadRequest(ErrorKind::BadJson, "Invalid creation content")); + } + + // 1. The room create event + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + + // 2. Let the room creator join + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: Some(body.is_direct), + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + + // 3. Power levels + + // Figure out preset. We need it for preset specific events + let preset = body.preset.clone().unwrap_or(match &body.visibility { + room::Visibility::Public => RoomPreset::PublicChat, + _ => RoomPreset::PrivateChat, // Room visibility should not be custom + }); + + let mut users = BTreeMap::new(); + users.insert(sender_user.clone(), int!(100)); + + if preset == RoomPreset::TrustedPrivateChat { + for invite_ in &body.invite { + users.insert(invite_.clone(), int!(100)); + } + } + + let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"); + + // secure proper defaults of sensitive/dangerous permissions that moderators + // (power level 50) should not have easy access to + power_levels_content["events"]["m.room.power_levels"] = serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.server_acl"] = serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.tombstone"] = serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.encryption"] = serde_json::to_value(100).expect("100 is valid Value"); + power_levels_content["events"]["m.room.history_visibility"] = + serde_json::to_value(100).expect("100 is valid Value"); + + // synapse does this too. clients do not expose these permissions. it prevents + // default users from calling public rooms, for obvious reasons. + if body.visibility == room::Visibility::Public { + power_levels_content["events"]["m.call.invite"] = serde_json::to_value(50).expect("50 is valid Value"); + power_levels_content["events"]["org.matrix.msc3401.call"] = + serde_json::to_value(50).expect("50 is valid Value"); + power_levels_content["events"]["org.matrix.msc3401.call.member"] = + serde_json::to_value(50).expect("50 is valid Value"); + } + + if let Some(power_level_content_override) = &body.power_level_content_override { + let json: JsonObject = serde_json::from_str(power_level_content_override.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override."))?; + + for (key, value) in json { + power_levels_content[key] = value; + } + } + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&power_levels_content).expect("to_raw_value always works on serde_json::Value"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + + // 4. Canonical room alias + if let Some(room_alias_id) = &alias { + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(room_alias_id.to_owned()), + alt_aliases: vec![], + }) + .expect("We checked that alias earlier, it must be fine"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + } + + // 5. Events set by preset + + // 5.1 Join Rules + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { + RoomPreset::PublicChat => JoinRule::Public, + // according to spec "invite" is the default + _ => JoinRule::Invite, + })) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + + // 5.2 History Visibility + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + + // 5.3 Guest Access + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { + RoomPreset::PublicChat => GuestAccess::Forbidden, + _ => GuestAccess::CanJoin, + })) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + + // 6. Events listed in initial_state + for event in &body.initial_state { + let mut pdu_builder = event.deserialize_as::().map_err(|e| { + warn!("Invalid initial state event: {:?}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") + })?; + + // Implicit state key defaults to "" + pdu_builder.state_key.get_or_insert_with(String::new); + + // Silently skip encryption events if they are not allowed + if pdu_builder.event_type == TimelineEventType::RoomEncryption && !services().globals.allow_encryption() { + continue; + } + + services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .await?; + } + + // 7. Events implied by name and topic + if let Some(name) = &body.name { + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(name.clone())) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + } + + if let Some(topic) = &body.topic { + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; + } + + // 8. Events implied by invite (and TODO: invite_3pid) + drop(state_lock); + for user_id in &body.invite { + _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await; + } + + // Homeserver specific stuff + if let Some(alias) = alias { + services().rooms.alias.set_alias(&alias, &room_id)?; + } + + if body.visibility == room::Visibility::Public { + services().rooms.directory.set_public(&room_id)?; + } + + info!("{} created a room", sender_user); + + Ok(create_room::v3::Response::new(room_id)) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` +/// +/// Gets a single event. +/// +/// - You have to currently be joined to the room (TODO: Respect history +/// visibility) +pub async fn get_room_event_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services() + .rooms + .timeline + .get_pdu(&body.event_id)? + .ok_or_else(|| { + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + })?; + + if !services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &event.room_id, &body.event_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this event.", + )); + } + + let mut event = (*event).clone(); + event.add_age()?; + + Ok(get_room_event::v3::Response { + event: event.to_room_event(), + }) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` +/// +/// Lists all aliases of the room. +/// +/// - Only users joined to the room are allowed to call this, or if +/// `history_visibility` is world readable in the room +pub async fn get_room_aliases_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + + Ok(aliases::v3::Response { + aliases: services() + .rooms + .alias + .local_aliases_for_room(&body.room_id) + .filter_map(Result::ok) + .collect(), + }) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` +/// +/// Upgrades the room. +/// +/// - Creates a replacement room +/// - Sends a tombstone event into the current room +/// - Sender user joins the room +/// - Transfers some state events +/// - Moves local aliases +/// - Modifies old room power levels to prevent users from speaking +pub async fn upgrade_room_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .globals + .supported_room_versions() + .contains(&body.new_version) + { + return Err(Error::BadRequest( + ErrorKind::UnsupportedRoomVersion, + "This server does not support that room version.", + )); + } + + // Create a replacement room + let replacement_room = RoomId::new(services().globals.server_name()); + services() + .rooms + .short + .get_or_create_shortroomid(&replacement_room)?; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Send a m.room.tombstone event to the old room to indicate that it is not + // intended to be used any further Fail if the sender does not have the required + // permissions + let tombstone_event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomTombstone, + content: to_raw_value(&RoomTombstoneEventContent { + body: "This room has been replaced".to_owned(), + replacement_room: replacement_room.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + // Change lock to replacement room + drop(state_lock); + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(replacement_room.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Get the old room creation event + let mut create_event_content = serde_json::from_str::( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid room event in database."))?; + + // Use the m.room.tombstone event as the predecessor + let predecessor = Some(ruma::events::room::create::PreviousRoom::new( + body.room_id.clone(), + (*tombstone_event_id).to_owned(), + )); + + // Send a m.room.create event containing a predecessor field and the applicable + // room_version + match body.new_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { + create_event_content.insert( + "creator".into(), + json!(&sender_user).try_into().map_err(|e| { + info!("Error forming creation event: {e}"); + Error::BadRequest(ErrorKind::BadJson, "Error forming creation event") + })?, + ); + }, + RoomVersionId::V11 => { + // "creator" key no longer exists in V11 rooms + create_event_content.remove("creator"); + }, + _ => { + warn!("Unexpected or unsupported room version {}", body.new_version); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + } + + create_event_content.insert( + "room_version".into(), + json!(&body.new_version) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + create_event_content.insert( + "predecessor".into(), + json!(predecessor) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + + // Validate creation event content + let de_result = serde_json::from_str::( + to_raw_value(&create_event_content) + .expect("Error forming creation event") + .get(), + ); + + if de_result.is_err() { + return Err(Error::BadRequest(ErrorKind::BadJson, "Error forming creation event")); + } + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCreate, + content: to_raw_value(&create_event_content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; + + // Join the new room + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; + + // Recommended transferable state events list from the specs + let transferable_state_events = vec![ + StateEventType::RoomServerAcl, + StateEventType::RoomEncryption, + StateEventType::RoomName, + StateEventType::RoomAvatar, + StateEventType::RoomTopic, + StateEventType::RoomGuestAccess, + StateEventType::RoomHistoryVisibility, + StateEventType::RoomJoinRules, + StateEventType::RoomPowerLevels, + ]; + + // Replicate transferable state events to the new room + for event_type in transferable_state_events { + let event_content = match services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &event_type, "")? + { + Some(v) => v.content.clone(), + None => continue, // Skipping missing events. + }; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: event_content, + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; + } + + // Moves any local aliases to the new room + for alias in services() + .rooms + .alias + .local_aliases_for_room(&body.room_id) + .filter_map(Result::ok) + { + services() + .rooms + .alias + .set_alias(&alias, &replacement_room)?; + } + + // Get the old room power levels + let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? + .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid room event in database."))?; + + // Setting events_default and invite to the greater of 50 and users_default + 1 + let new_level = max(int!(50), power_levels_event_content.users_default + int!(1)); + power_levels_event_content.events_default = new_level; + power_levels_event_content.invite = new_level; + + // Modify the power levels in the old room to prevent sending of events and + // inviting new users + _ = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&power_levels_event_content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + // Return the replacement room id + Ok(upgrade_room::v3::Response { + replacement_room, + }) +} diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs new file mode 100644 index 00000000..d2a305d7 --- /dev/null +++ b/src/api/client_server/search.rs @@ -0,0 +1,182 @@ +use std::collections::BTreeMap; + +use ruma::{ + api::client::{ + error::ErrorKind, + search::search_events::{ + self, + v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, + }, + }, + events::AnyStateEvent, + serde::Raw, + OwnedRoomId, +}; +use tracing::debug; + +use crate::{services, Error, Result, Ruma}; + +/// # `POST /_matrix/client/r0/search` +/// +/// Searches rooms for messages. +/// +/// - Only works if the user is currently joined to the room (TODO: Respect +/// history visibility) +pub async fn search_events_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let search_criteria = body.search_categories.room_events.as_ref().unwrap(); + let filter = &search_criteria.filter; + let include_state = &search_criteria.include_state; + + let room_ids = filter.rooms.clone().unwrap_or_else(|| { + services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(Result::ok) + .collect() + }); + + // Use limit or else 10, with maximum 100 + let limit = filter.limit.map_or(10, u64::from).min(100) as usize; + + let mut room_states: BTreeMap>> = BTreeMap::new(); + + if include_state.is_some_and(|include_state| include_state) { + for room_id in &room_ids { + if !services() + .rooms + .state_cache + .is_joined(sender_user, room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + + // check if sender_user can see state events + if services() + .rooms + .state_accessor + .user_can_see_state_events(sender_user, room_id)? + { + let room_state = services() + .rooms + .state_accessor + .room_state_full(room_id) + .await? + .values() + .map(|pdu| pdu.to_state_event()) + .collect::>(); + + debug!("Room state: {:?}", room_state); + + room_states.insert(room_id.clone(), room_state); + } else { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + } + } + + let mut searches = Vec::new(); + + for room_id in &room_ids { + if !services() + .rooms + .state_cache + .is_joined(sender_user, room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view this room.", + )); + } + + if let Some(search) = services() + .rooms + .search + .search_pdus(room_id, &search_criteria.search_term)? + { + searches.push(search.0.peekable()); + } + } + + let skip = match body.next_batch.as_ref().map(|s| s.parse()) { + Some(Ok(s)) => s, + Some(Err(_)) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid next_batch token.")), + None => 0, // Default to the start + }; + + let mut results = Vec::new(); + for _ in 0..skip + limit { + if let Some(s) = searches + .iter_mut() + .map(|s| (s.peek().cloned(), s)) + .max_by_key(|(peek, _)| peek.clone()) + .and_then(|(_, i)| i.next()) + { + results.push(s); + } + } + + let results: Vec<_> = results + .iter() + .filter_map(|result| { + services() + .rooms + .timeline + .get_pdu_from_id(result) + .ok()? + .filter(|pdu| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) + .unwrap_or(false) + }) + .map(|pdu| pdu.to_room_event()) + }) + .map(|result| { + Ok::<_, Error>(SearchResult { + context: EventContextResult { + end: None, + events_after: Vec::new(), + events_before: Vec::new(), + profile_info: BTreeMap::new(), + start: None, + }, + rank: None, + result: Some(result), + }) + }) + .filter_map(Result::ok) + .skip(skip) + .take(limit) + .collect(); + + let next_batch = if results.len() < limit { + None + } else { + Some((skip + limit).to_string()) + }; + + Ok(search_events::v3::Response::new(ResultCategories { + room_events: ResultRoomEvents { + count: Some((results.len() as u32).into()), + groups: BTreeMap::new(), // TODO + next_batch, + results, + state: room_states, + highlights: search_criteria + .search_term + .split_terminator(|c: char| !c.is_alphanumeric()) + .map(str::to_lowercase) + .collect(), + }, + })) +} diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs new file mode 100644 index 00000000..21543637 --- /dev/null +++ b/src/api/client_server/session.rs @@ -0,0 +1,274 @@ +use argon2::{PasswordHash, PasswordVerifier}; +use ruma::{ + api::client::{ + error::ErrorKind, + session::{ + get_login_types::{ + self, + v3::{ApplicationServiceLoginType, PasswordLoginType}, + }, + login::{ + self, + v3::{DiscoveryInfo, HomeserverInfo}, + }, + logout, logout_all, + }, + uiaa::UserIdentifier, + }, + UserId, +}; +use serde::Deserialize; +use tracing::{debug, error, info, warn}; + +use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; +use crate::{services, utils, Error, Result, Ruma}; + +#[derive(Debug, Deserialize)] +struct Claims { + sub: String, + //exp: usize, +} + +/// # `GET /_matrix/client/v3/login` +/// +/// Get the supported login types of this server. One of these should be used as +/// the `type` field when logging in. +pub async fn get_login_types_route(_body: Ruma) -> Result { + Ok(get_login_types::v3::Response::new(vec![ + get_login_types::v3::LoginType::Password(PasswordLoginType::default()), + get_login_types::v3::LoginType::ApplicationService(ApplicationServiceLoginType::default()), + ])) +} + +/// # `POST /_matrix/client/v3/login` +/// +/// Authenticates the user and returns an access token it can use in subsequent +/// requests. +/// +/// - The user needs to authenticate using their password (or if enabled using a +/// json web token) +/// - If `device_id` is known: invalidates old access token of that device +/// - If `device_id` is unknown: creates a new device +/// - Returns access token that is associated with the user and device +/// +/// Note: You can use [`GET +/// /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see +/// supported login types. +pub async fn login_route(body: Ruma) -> Result { + // Validate login method + // TODO: Other login methods + let user_id = match &body.login_info { + #[allow(deprecated)] + login::v3::LoginInfo::Password(login::v3::Password { + identifier, + password, + user, + .. + }) => { + debug!("Got password login type"); + let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name(user_id.to_lowercase(), services().globals.server_name()) + } else if let Some(user) = user { + UserId::parse(user) + } else { + warn!("Bad login type: {:?}", &body.login_info); + return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + } + .map_err(|e| { + warn!("Failed to parse username from user logging in: {e}"); + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; + + if services().appservice.is_exclusive_user_id(&user_id).await { + return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); + } + + let hash = services() + .users + .password_hash(&user_id)? + .ok_or(Error::BadRequest(ErrorKind::forbidden(), "Wrong username or password."))?; + + if hash.is_empty() { + return Err(Error::BadRequest(ErrorKind::UserDeactivated, "The user has been deactivated")); + } + + let Ok(parsed_hash) = PasswordHash::new(&hash) else { + error!("error while hashing user {}", user_id); + return Err(Error::BadServerResponse("could not hash")); + }; + + let hash_matches = services() + .globals + .argon + .verify_password(password.as_bytes(), &parsed_hash) + .is_ok(); + + if !hash_matches { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Wrong username or password.")); + } + + user_id + }, + login::v3::LoginInfo::Token(login::v3::Token { + token, + }) => { + debug!("Got token login type"); + if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() { + let token = + jsonwebtoken::decode::(token, jwt_decoding_key, &jsonwebtoken::Validation::default()) + .map_err(|e| { + warn!("Failed to parse JWT token from user logging in: {e}"); + Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid.") + })?; + + let username = token.claims.sub.to_lowercase(); + + let user_id = + UserId::parse_with_server_name(username, services().globals.server_name()).map_err(|e| { + warn!("Failed to parse username from user logging in: {e}"); + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; + + if services().appservice.is_exclusive_user_id(&user_id).await { + return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); + } + + user_id + } else { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Token login is not supported (server has no jwt decoding key).", + )); + } + }, + #[allow(deprecated)] + login::v3::LoginInfo::ApplicationService(login::v3::ApplicationService { + identifier, + user, + }) => { + debug!("Got appservice login type"); + let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name(user_id.to_lowercase(), services().globals.server_name()) + } else if let Some(user) = user { + UserId::parse(user) + } else { + warn!("Bad login type: {:?}", &body.login_info); + return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + } + .map_err(|e| { + warn!("Failed to parse username from appservice logging in: {e}"); + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; + + if let Some(ref info) = body.appservice_info { + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); + } + } else { + return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token.")); + } + + user_id + }, + _ => { + warn!("Unsupported or unknown login type: {:?}", &body.login_info); + debug!("JSON body: {:?}", &body.json_body); + return Err(Error::BadRequest(ErrorKind::Unknown, "Unsupported or unknown login type.")); + }, + }; + + // Generate new device id if the user didn't specify one + let device_id = body + .device_id + .clone() + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); + + // Generate a new token for the device + let token = utils::random_string(TOKEN_LENGTH); + + // Determine if device_id was provided and exists in the db for this user + let device_exists = body.device_id.as_ref().map_or(false, |device_id| { + services() + .users + .all_device_ids(&user_id) + .any(|x| x.as_ref().map_or(false, |v| v == device_id)) + }); + + if device_exists { + services().users.set_token(&user_id, &device_id, &token)?; + } else { + services() + .users + .create_device(&user_id, &device_id, &token, body.initial_device_display_name.clone())?; + } + + // send client well-known if specified so the client knows to reconfigure itself + let client_discovery_info: Option = services() + .globals + .well_known_client() + .as_ref() + .map(|server| DiscoveryInfo::new(HomeserverInfo::new(server.to_string()))); + + info!("{user_id} logged in"); + + // home_server is deprecated but apparently must still be sent despite it being + // deprecated over 6 years ago. initially i thought this macro was unnecessary, + // but ruma uses this same macro for the same reason so... + #[allow(deprecated)] + Ok(login::v3::Response { + user_id, + access_token: token, + device_id, + well_known: client_discovery_info, + expires_in: None, + home_server: Some(services().globals.server_name().to_owned()), + refresh_token: None, + }) +} + +/// # `POST /_matrix/client/v3/logout` +/// +/// Log out the current device. +/// +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, +/// last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates +pub async fn logout_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + services().users.remove_device(sender_user, sender_device)?; + + // send device list update for user after logout + services().users.mark_device_key_update(sender_user)?; + + Ok(logout::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/logout/all` +/// +/// Log out all devices of this user. +/// +/// - Invalidates all access tokens +/// - Deletes all device metadata (device id, device display name, last seen ip, +/// last seen ts) +/// - Forgets all to-device events +/// - Triggers device list updates +/// +/// Note: This is equivalent to calling [`GET +/// /_matrix/client/r0/logout`](fn.logout_route.html) from each device of this +/// user. +pub async fn logout_all_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + for device_id in services().users.all_device_ids(sender_user).flatten() { + services().users.remove_device(sender_user, &device_id)?; + } + + // send device list update for user after logout + services().users.mark_device_key_update(sender_user)?; + + Ok(logout_all::v3::Response::new()) +} diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs new file mode 100644 index 00000000..acce261e --- /dev/null +++ b/src/api/client_server/space.rs @@ -0,0 +1,54 @@ +use std::str::FromStr; + +use ruma::{ + api::client::{error::ErrorKind, space::get_hierarchy}, + UInt, +}; + +use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy` +/// +/// Paginates over the space tree in a depth-first manner to locate child rooms +/// of a given space. +pub async fn get_hierarchy_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let limit = body + .limit + .unwrap_or_else(|| UInt::from(10_u32)) + .min(UInt::from(100_u32)); + + let max_depth = body + .max_depth + .unwrap_or_else(|| UInt::from(3_u32)) + .min(UInt::from(10_u32)); + + let key = body + .from + .as_ref() + .and_then(|s| PagnationToken::from_str(s).ok()); + + // Should prevent unexpeded behaviour in (bad) clients + if let Some(ref token) = key { + if token.suggested_only != body.suggested_only || token.max_depth != max_depth { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "suggested_only and max_depth cannot change on paginated requests", + )); + } + } + + services() + .rooms + .spaces + .get_client_hierarchy( + sender_user, + &body.room_id, + u64::from(limit) as usize, + key.map_or(0, |token| u64::from(token.skip) as usize), + u64::from(max_depth) as usize, + body.suggested_only, + ) + .await +} diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs new file mode 100644 index 00000000..f9e910af --- /dev/null +++ b/src/api/client_server/state.rs @@ -0,0 +1,312 @@ +use std::sync::Arc; + +use ruma::{ + api::client::{ + error::ErrorKind, + state::{get_state_events, get_state_events_for_key, send_state_event}, + }, + events::{ + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + }, + AnyStateEventContent, StateEventType, + }, + serde::Raw, + EventId, RoomId, UserId, +}; +use tracing::{error, log::warn}; + +use crate::{ + service::{self, pdu::PduBuilder}, + services, Error, Result, Ruma, RumaResponse, +}; + +/// # `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}/{stateKey}` +/// +/// Sends a state event into the room. +/// +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is +/// allowed +/// - If event is new `canonical_alias`: Rejects if alias is incorrect +pub async fn send_state_event_for_key_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event_id = send_state_event_for_key_helper( + sender_user, + &body.room_id, + &body.event_type, + &body.body.body, // Yes, I hate it too + body.state_key.clone(), + ) + .await?; + + let event_id = (*event_id).to_owned(); + Ok(send_state_event::v3::Response { + event_id, + }) +} + +/// # `PUT /_matrix/client/*/rooms/{roomId}/state/{eventType}` +/// +/// Sends a state event into the room. +/// +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is +/// allowed +/// - If event is new `canonical_alias`: Rejects if alias is incorrect +pub async fn send_state_event_for_empty_key_route( + body: Ruma, +) -> Result> { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event_id = send_state_event_for_key_helper( + sender_user, + &body.room_id, + &body.event_type.to_string().into(), + &body.body.body, + body.state_key.clone(), + ) + .await?; + + let event_id = (*event_id).to_owned(); + Ok(send_state_event::v3::Response { + event_id, + } + .into()) +} + +/// # `GET /_matrix/client/v3/rooms/{roomid}/state` +/// +/// Get all state events for a room. +/// +/// - If not joined: Only works if current room history visibility is world +/// readable +pub async fn get_state_events_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view the room state.", + )); + } + + Ok(get_state_events::v3::Response { + room_state: services() + .rooms + .state_accessor + .room_state_full(&body.room_id) + .await? + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + }) +} + +/// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}/{stateKey}` +/// +/// Get single state event of a room with the specified state key. +/// The optional query parameter `?format=event|content` allows returning the +/// full room state event or just the state event's content (default behaviour) +/// +/// - If not joined: Only works if current room history visibility is world +/// readable +pub async fn get_state_events_for_key_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view the room state.", + )); + } + + let event = services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &body.event_type, &body.state_key)? + .ok_or_else(|| { + warn!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id); + Error::BadRequest(ErrorKind::NotFound, "State event not found.") + })?; + if body + .format + .as_ref() + .is_some_and(|f| f.to_lowercase().eq("event")) + { + Ok(get_state_events_for_key::v3::Response { + content: None, + event: serde_json::from_str(event.to_state_event().json().get()).map_err(|e| { + error!("Invalid room state event in database: {}", e); + Error::bad_database("Invalid room state event in database") + })?, + }) + } else { + Ok(get_state_events_for_key::v3::Response { + content: Some(serde_json::from_str(event.content.get()).map_err(|e| { + error!("Invalid room state event content in database: {}", e); + Error::bad_database("Invalid room state event content in database") + })?), + event: None, + }) + } +} + +/// # `GET /_matrix/client/v3/rooms/{roomid}/state/{eventType}` +/// +/// Get single state event of a room. +/// The optional query parameter `?format=event|content` allows returning the +/// full room state event or just the state event's content (default behaviour) +/// +/// - If not joined: Only works if current room history visibility is world +/// readable +pub async fn get_state_events_for_empty_key_route( + body: Ruma, +) -> Result> { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You don't have permission to view the room state.", + )); + } + + let event = services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &body.event_type, "")? + .ok_or_else(|| { + warn!("State event {:?} not found in room {:?}", &body.event_type, &body.room_id); + Error::BadRequest(ErrorKind::NotFound, "State event not found.") + })?; + + if body + .format + .as_ref() + .is_some_and(|f| f.to_lowercase().eq("event")) + { + Ok(get_state_events_for_key::v3::Response { + content: None, + event: serde_json::from_str(event.to_state_event().json().get()).map_err(|e| { + error!("Invalid room state event in database: {}", e); + Error::bad_database("Invalid room state event in database") + })?, + } + .into()) + } else { + Ok(get_state_events_for_key::v3::Response { + content: Some(serde_json::from_str(event.content.get()).map_err(|e| { + error!("Invalid room state event content in database: {}", e); + Error::bad_database("Invalid room state event content in database") + })?), + event: None, + } + .into()) + } +} + +async fn send_state_event_for_key_helper( + sender: &UserId, room_id: &RoomId, event_type: &StateEventType, json: &Raw, state_key: String, +) -> Result> { + match *event_type { + // Forbid m.room.encryption if encryption is disabled + StateEventType::RoomEncryption => { + if !services().globals.allow_encryption() { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Encryption has been disabled")); + } + }, + // admin room is a sensitive room, it should not ever be made public + StateEventType::RoomJoinRules => { + if let Some(admin_room_id) = service::admin::Service::get_admin_room()? { + if admin_room_id == room_id { + if let Ok(join_rule) = serde_json::from_str::(json.json().get()) { + if join_rule.join_rule == JoinRule::Public { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Admin room is not allowed to be public.", + )); + } + } + } + } + }, + // TODO: allow alias if it previously existed + StateEventType::RoomCanonicalAlias => { + if let Ok(canonical_alias) = serde_json::from_str::(json.json().get()) { + let mut aliases = canonical_alias.alt_aliases.clone(); + + if let Some(alias) = canonical_alias.alias { + aliases.push(alias); + } + + for alias in aliases { + if alias.server_name() != services().globals.server_name() + || services() + .rooms + .alias + .resolve_local_alias(&alias)? + .filter(|room| room == room_id) // Make sure it's the right room + .is_none() + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "You are only allowed to send canonical_alias events when its aliases already exist", + )); + } + } + } + }, + _ => {}, + } + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: serde_json::from_str(json.json().get()).expect("content is valid json"), + unsigned: None, + state_key: Some(state_key), + redacts: None, + }, + sender, + room_id, + &state_lock, + ) + .await?; + + Ok(event_id) +} diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs new file mode 100644 index 00000000..ef891621 --- /dev/null +++ b/src/api/client_server/sync.rs @@ -0,0 +1,1724 @@ +use std::{ + cmp::Ordering, + collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, + sync::Arc, + time::Duration, +}; + +use ruma::{ + api::client::{ + filter::{FilterDefinition, LazyLoadOptions}, + sync::sync_events::{ + self, + v3::{ + Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence, + RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, + }, + v4::SlidingOp, + DeviceLists, UnreadNotificationsCount, + }, + uiaa::UiaaResponse, + }, + events::{ + presence::PresenceEvent, + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, TimelineEventType, + }, + serde::Raw, + uint, DeviceId, EventId, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, +}; +use tokio::sync::watch::Sender; +use tracing::{debug, error}; + +use crate::{ + service::{pdu::EventHash, rooms::timeline::PduCount}, + services, utils, Error, PduEvent, Result, Ruma, RumaResponse, +}; + +/// # `GET /_matrix/client/r0/sync` +/// +/// Synchronize the client's state with the latest state on the server. +/// +/// - This endpoint takes a `since` parameter which should be the `next_batch` +/// value from a +/// previous request for incremental syncs. +/// +/// Calling this endpoint without a `since` parameter returns: +/// - Some of the most recent events of each timeline +/// - Notification counts for each room +/// - Joined and invited member counts, heroes +/// - All state events +/// +/// Calling this endpoint with a `since` parameter from a previous `next_batch` +/// returns: For joined rooms: +/// - Some of the most recent events of each timeline that happened after since +/// - If user joined the room after since: All state events (unless lazy loading +/// is activated) and +/// all device list updates in that room +/// - If the user was already in the room: A list of all events that are in the +/// state now, but were +/// not in the state at `since` +/// - If the state we send contains a member event: Joined and invited member +/// counts, heroes +/// - Device list updates that happened after `since` +/// - If there are events in the timeline we send or the user send updated his +/// read mark: Notification counts +/// - EDUs that are active now (read receipts, typing updates, presence) +/// - TODO: Allow multiple sync streams to support Pantalaimon +/// +/// For invited rooms: +/// - If the user was invited after `since`: A subset of the state of the room +/// at the point of the invite +/// +/// For left rooms: +/// - If the user left after `since`: `prev_batch` token, empty state (TODO: +/// subset of the state at the point of the leave) +/// +/// - Sync is handled in an async task, multiple requests from the same device +/// with the same +/// `since` will be cached +pub async fn sync_events_route( + body: Ruma, +) -> Result> { + let sender_user = body.sender_user.expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let body = body.body; + + let mut rx = match services() + .globals + .sync_receivers + .write() + .await + .entry((sender_user.clone(), sender_device.clone())) + { + Entry::Vacant(v) => { + let (tx, rx) = tokio::sync::watch::channel(None); + + v.insert((body.since.clone(), rx.clone())); + + tokio::spawn(sync_helper_wrapper(sender_user.clone(), sender_device.clone(), body, tx)); + + rx + }, + Entry::Occupied(mut o) => { + if o.get().0 != body.since { + let (tx, rx) = tokio::sync::watch::channel(None); + + o.insert((body.since.clone(), rx.clone())); + + debug!("Sync started for {sender_user}"); + + tokio::spawn(sync_helper_wrapper(sender_user.clone(), sender_device.clone(), body, tx)); + + rx + } else { + o.get().1.clone() + } + }, + }; + + let we_have_to_wait = rx.borrow().is_none(); + if we_have_to_wait { + if let Err(e) = rx.changed().await { + error!("Error waiting for sync: {}", e); + } + } + + let result = match rx + .borrow() + .as_ref() + .expect("When sync channel changes it's always set to some") + { + Ok(response) => Ok(response.clone()), + Err(error) => Err(error.to_response()), + }; + + result +} + +async fn sync_helper_wrapper( + sender_user: OwnedUserId, sender_device: OwnedDeviceId, body: sync_events::v3::Request, + tx: Sender>>, +) { + let since = body.since.clone(); + + let r = sync_helper(sender_user.clone(), sender_device.clone(), body).await; + + if let Ok((_, caching_allowed)) = r { + if !caching_allowed { + match services() + .globals + .sync_receivers + .write() + .await + .entry((sender_user, sender_device)) + { + Entry::Occupied(o) => { + // Only remove if the device didn't start a different /sync already + if o.get().0 == since { + o.remove(); + } + }, + Entry::Vacant(_) => {}, + } + } + } + + _ = tx.send(Some(r.map(|(r, _)| r))); +} + +async fn sync_helper( + sender_user: OwnedUserId, + sender_device: OwnedDeviceId, + body: sync_events::v3::Request, + // bool = caching allowed +) -> Result<(sync_events::v3::Response, bool), Error> { + // Presence update + if services().globals.allow_local_presence() { + services() + .presence + .ping_presence(&sender_user, &body.set_presence)?; + } + + // Setup watchers, so if there's no response, we can wait for them + let watcher = services().globals.watch(&sender_user, &sender_device); + + let next_batch = services().globals.current_count()?; + let next_batchcount = PduCount::Normal(next_batch); + let next_batch_string = next_batch.to_string(); + + // Load filter + let filter = match body.filter { + None => FilterDefinition::default(), + Some(Filter::FilterDefinition(filter)) => filter, + Some(Filter::FilterId(filter_id)) => services() + .users + .get_filter(&sender_user, &filter_id)? + .unwrap_or_default(), + }; + + let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members: redundant, + } => (true, redundant), + LazyLoadOptions::Disabled => (false, false), + }; + + let full_state = body.full_state; + + let mut joined_rooms = BTreeMap::new(); + let since = body + .since + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + let sincecount = PduCount::Normal(since); + + let mut presence_updates = HashMap::new(); + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + let mut device_list_updates = HashSet::new(); + let mut device_list_left = HashSet::new(); + + // Look for device list updates of this account + device_list_updates.extend( + services() + .users + .keys_changed(sender_user.as_ref(), since, None) + .filter_map(Result::ok), + ); + + if services().globals.allow_local_presence() { + process_presence_updates(&mut presence_updates, since, &sender_user).await?; + } + + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(&sender_user) + .collect::>(); + + // Coalesce database writes for the remainder of this scope. + let _cork = services().globals.db.cork_and_flush()?; + + for room_id in all_joined_rooms { + let room_id = room_id?; + if let Ok(joined_room) = load_joined_room( + &sender_user, + &sender_device, + &room_id, + since, + sincecount, + next_batch, + next_batchcount, + lazy_load_enabled, + lazy_load_send_redundant, + full_state, + &mut device_list_updates, + &mut left_encrypted_users, + ) + .await + { + if !joined_room.is_empty() { + joined_rooms.insert(room_id.clone(), joined_room); + } + } + } + + let mut left_rooms = BTreeMap::new(); + let all_left_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_left(&sender_user) + .collect(); + for result in all_left_rooms { + let (room_id, _) = result?; + + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().await; + drop(insert_lock); + }; + + let left_count = services() + .rooms + .state_cache + .get_left_count(&room_id, &sender_user)?; + + // Left before last sync + if Some(since) >= left_count { + continue; + } + + if !services().rooms.metadata.exists(&room_id)? { + // This is just a rejected invite, not a room we know + // Insert a leave event anyways + let event = PduEvent { + event_id: EventId::new(services().globals.server_name()).into(), + sender: sender_user.clone(), + origin: None, + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + kind: TimelineEventType::RoomMember, + content: serde_json::from_str(r#"{"membership":"leave"}"#).expect("this is valid JSON"), + state_key: Some(sender_user.to_string()), + unsigned: None, + // The following keys are dropped on conversion + room_id: room_id.clone(), + prev_events: vec![], + depth: uint!(1), + auth_events: vec![], + redacts: None, + hashes: EventHash { + sha256: String::new(), + }, + signatures: None, + }; + + left_rooms.insert( + room_id, + LeftRoom { + account_data: RoomAccountData { + events: Vec::new(), + }, + timeline: Timeline { + limited: false, + prev_batch: Some(next_batch_string.clone()), + events: Vec::new(), + }, + state: State { + events: vec![event.to_sync_state_event()], + }, + }, + ); + continue; + } + + let mut left_state_events = Vec::new(); + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + let since_state_ids = match since_shortstatehash { + Some(s) => services().rooms.state_accessor.state_full_ids(s).await?, + None => HashMap::new(), + }; + + let Some(left_event_id) = services().rooms.state_accessor.room_state_get_id( + &room_id, + &StateEventType::RoomMember, + sender_user.as_str(), + )? + else { + error!("Left room but no left state event"); + continue; + }; + + let Some(left_shortstatehash) = services() + .rooms + .state_accessor + .pdu_shortstatehash(&left_event_id)? + else { + error!("Leave event has no state"); + continue; + }; + + let mut left_state_ids = services() + .rooms + .state_accessor + .state_full_ids(left_shortstatehash) + .await?; + + let leave_shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())?; + + left_state_ids.insert(leave_shortstatekey, left_event_id); + + let mut i = 0; + for (key, id) in left_state_ids { + if full_state || since_state_ids.get(&key) != Some(&id) { + let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?; + + if !lazy_load_enabled + || event_type != StateEventType::RoomMember + || full_state + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || (cfg!(feature = "element_hacks") && *sender_user == state_key) + { + let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else { + error!("Pdu in state not found: {}", id); + continue; + }; + + left_state_events.push(pdu.to_sync_state_event()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + } + + left_rooms.insert( + room_id.clone(), + LeftRoom { + account_data: RoomAccountData { + events: Vec::new(), + }, + timeline: Timeline { + limited: false, + prev_batch: Some(next_batch_string.clone()), + events: Vec::new(), + }, + state: State { + events: left_state_events, + }, + }, + ); + } + + let mut invited_rooms = BTreeMap::new(); + let all_invited_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_invited(&sender_user) + .collect(); + for result in all_invited_rooms { + let (room_id, invite_state_events) = result?; + + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().await; + drop(insert_lock); + }; + + let invite_count = services() + .rooms + .state_cache + .get_invite_count(&room_id, &sender_user)?; + + // Invited before last sync + if Some(since) >= invite_count { + continue; + } + + invited_rooms.insert( + room_id.clone(), + InvitedRoom { + invite_state: InviteState { + events: invite_state_events, + }, + }, + ); + } + + for user_id in left_encrypted_users { + let dont_share_encrypted_room = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? + .filter_map(Result::ok) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target anymore, we need + // to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + + // Remove all to-device events the device received *last time* + services() + .users + .remove_to_device_events(&sender_user, &sender_device, since)?; + + let response = sync_events::v3::Response { + next_batch: next_batch_string, + rooms: Rooms { + leave: left_rooms, + join: joined_rooms, + invite: invited_rooms, + knock: BTreeMap::new(), // TODO + }, + presence: Presence { + events: presence_updates + .into_values() + .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) + .collect(), + }, + account_data: GlobalAccountData { + events: services() + .account_data + .changes_since(None, &sender_user, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect(), + }, + device_lists: DeviceLists { + changed: device_list_updates.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, + to_device: ToDevice { + events: services() + .users + .get_to_device_events(&sender_user, &sender_device)?, + }, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, + }; + + // TODO: Retry the endpoint instead of returning + if !full_state + && response.rooms.is_empty() + && response.presence.is_empty() + && response.account_data.is_empty() + && response.device_lists.is_empty() + && response.to_device.is_empty() + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or_default(); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); + } + _ = tokio::time::timeout(duration, watcher).await; + Ok((response, false)) + } else { + Ok((response, since != next_batch)) // Only cache if we made progress + } +} + +async fn process_presence_updates( + presence_updates: &mut HashMap, since: u64, syncing_user: &OwnedUserId, +) -> Result<()> { + // Take presence updates + for (user_id, _, presence_bytes) in services().presence.presence_since(since) { + if !services() + .rooms + .state_cache + .user_sees_user(syncing_user, &user_id)? + { + continue; + } + + use crate::service::presence::Presence; + let presence_event = Presence::from_json_bytes_to_event(&presence_bytes, &user_id)?; + match presence_updates.entry(user_id) { + Entry::Vacant(slot) => { + slot.insert(presence_event); + }, + Entry::Occupied(mut slot) => { + let curr_event = slot.get_mut(); + let curr_content = &mut curr_event.content; + let new_content = presence_event.content; + + // Update existing presence event with more info + curr_content.presence = new_content.presence; + curr_content.status_msg = new_content + .status_msg + .or_else(|| curr_content.status_msg.take()); + curr_content.last_active_ago = new_content.last_active_ago.or(curr_content.last_active_ago); + curr_content.displayname = new_content + .displayname + .or_else(|| curr_content.displayname.take()); + curr_content.avatar_url = new_content + .avatar_url + .or_else(|| curr_content.avatar_url.take()); + curr_content.currently_active = new_content + .currently_active + .or(curr_content.currently_active); + }, + } + } + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +async fn load_joined_room( + sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, since: u64, sincecount: PduCount, + next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool, lazy_load_send_redundant: bool, + full_state: bool, device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, +) -> Result { + { + // Get and drop the lock to wait for remaining operations to finish + // This will make sure the we have all events until next_batch + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().await; + drop(insert_lock); + }; + + let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?; + + let send_notification_counts = !timeline_pdus.is_empty() + || services() + .rooms + .user + .last_notification_read(sender_user, room_id)? + > since; + + let mut timeline_users = HashSet::new(); + for (_, event) in &timeline_pdus { + timeline_users.insert(event.sender.as_str().to_owned()); + } + + services() + .rooms + .lazy_loading + .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount) + .await?; + + // Database queries: + + let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else { + error!("Room {} has no state", room_id); + return Err(Error::BadDatabase("Room has no state")); + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(room_id, since)?; + + let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = + if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { + // No state changes + (Vec::new(), None, None, false, Vec::new()) + } else { + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || { + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(room_id)? + .unwrap_or(0); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still + // joined or invited until we have 5 or we reach the end + + for hero in services() + .rooms + .timeline + .all_pdus(sender_user, room_id)? + .filter_map(Result::ok) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) + .map(|(_, pdu)| { + let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + // The membership was and still is invite or join + if matches!(content.membership, MembershipState::Join | MembershipState::Invite) + && (services().rooms.state_cache.is_joined(&user_id, room_id)? + || services().rooms.state_cache.is_invited(&user_id, room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + // Filter out buggy users + .filter_map(Result::ok) + // Filter for possible heroes + .flatten() + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } + } + + Ok::<_, Error>((Some(joined_member_count), Some(invited_member_count), heroes)) + }; + + let since_sender_member: Option = since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + + if since_shortstatehash.is_none() || joined_since_last_sync { + // Probably since = 0, we will do an initial sync + + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + let mut i = 0; + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else { + error!("Pdu in state not found: {}", id); + continue; + }; + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } else if !lazy_load_enabled + || full_state + || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || (cfg!(feature = "element_hacks") && *sender_user == state_key) + { + let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else { + error!("Pdu in state not found: {}", id); + continue; + }; + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); + } + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + + // Reset lazy loading because this is an initial sync + services() + .rooms + .lazy_loading + .lazy_load_reset(sender_user, sender_device, room_id)?; + + // The state_events above should contain all timeline_users, let's mark them as + // lazy loaded. + services() + .rooms + .lazy_loading + .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount) + .await; + + (heroes, joined_member_count, invited_member_count, true, state_events) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.unwrap(); + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + if since_shortstatehash != current_shortstatehash { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if full_state || since_state_ids.get(&key) != Some(&id) { + let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else { + error!("Pdu in state not found: {}", id); + continue; + }; + + if pdu.kind == TimelineEventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + }, + Err(e) => error!("Invalid state key for member event: {}", e), + } + } + + state_events.push(pdu); + tokio::task::yield_now().await; + } + } + } + + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + room_id, + &event.sender, + )? || lazy_load_send_redundant + { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomMember, + event.sender.as_str(), + )? { + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); + } + } + } + + services() + .rooms + .lazy_loading + .lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount) + .await; + + let encrypted_room = services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some(); + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + // Calculations: + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + + let send_member_count = state_events + .iter() + .any(|event| event.kind == TimelineEventType::RoomMember); + + if encrypted_room { + for state_event in &state_events { + if state_event.kind != TimelineEventType::RoomMember { + continue; + } + + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == sender_user { + continue; + } + + let new_membership = + serde_json::from_str::(state_event.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(sender_user, &user_id, room_id)? { + device_list_updates.insert(user_id); + } + }, + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + }, + _ => {}, + } + } + } + } + + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + services() + .rooms + .state_cache + .room_members(room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the sender + sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target + // already + !share_encrypted_room(sender_user, user_id, room_id).unwrap_or(false) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + calculate_counts()? + } else { + (None, None, Vec::new()) + }; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + } + }; + + // Look for device list updates in this room + device_list_updates.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(Result::ok), + ); + + let notification_count = if send_notification_counts { + Some( + services() + .rooms + .user + .notification_count(sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ) + } else { + None + }; + + let highlight_count = if send_notification_counts { + Some( + services() + .rooms + .user + .highlight_count(sender_user, room_id)? + .try_into() + .expect("highlight count can't go that high"), + ) + } else { + None + }; + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + }, + PduCount::Normal(c) => c.to_string(), + })) + })?; + + let room_events: Vec<_> = timeline_pdus + .iter() + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let mut edus: Vec<_> = services() + .rooms + .read_receipt + .readreceipts_since(room_id, since) + .filter_map(Result::ok) // Filter out buggy events + .map(|(_, _, v)| v) + .collect(); + + if services().rooms.typing.last_typing_update(room_id).await? > since { + edus.push( + serde_json::from_str( + &serde_json::to_string(&services().rooms.typing.typings_all(room_id).await?) + .expect("event is valid, we just created it"), + ) + .expect("event is valid, we just created it"), + ); + } + + // Save the state after this sync so we can send the correct state diff next + // sync + services() + .rooms + .user + .associate_token_shortstatehash(room_id, next_batch, current_shortstatehash)?; + + Ok(JoinedRoom { + account_data: RoomAccountData { + events: services() + .account_data + .changes_since(Some(room_id), sender_user, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect(), + }, + summary: RoomSummary { + heroes, + joined_member_count: joined_member_count.map(|n| (n as u32).into()), + invited_member_count: invited_member_count.map(|n| (n as u32).into()), + }, + unread_notifications: UnreadNotificationsCount { + highlight_count, + notification_count, + }, + timeline: Timeline { + limited: limited || joined_since_last_sync, + prev_batch, + events: room_events, + }, + state: State { + events: state_events + .iter() + .map(|pdu| pdu.to_sync_state_event()) + .collect(), + }, + ephemeral: Ephemeral { + events: edus, + }, + unread_thread_notifications: BTreeMap::new(), + }) +} + +fn load_timeline( + sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64, +) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { + let timeline_pdus; + let limited; + if services() + .rooms + .timeline + .last_timeline_count(sender_user, room_id)? + > roomsincecount + { + let mut non_timeline_pdus = services() + .rooms + .timeline + .pdus_until(sender_user, room_id, PduCount::max())? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .take_while(|(pducount, _)| pducount > &roomsincecount); + + // Take the last events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(limit as usize) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output + // is limited unless there are events in non_timeline_pdus + limited = non_timeline_pdus.next().is_some(); + } else { + timeline_pdus = Vec::new(); + limited = false; + } + Ok((timeline_pdus, limited)) +} + +fn share_encrypted_room(sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId) -> Result { + Ok(services() + .rooms + .user + .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? + .filter_map(Result::ok) + .filter(|room_id| room_id != ignore_room) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .any(|encrypted| encrypted)) +} + +/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` +/// +/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`) +pub async fn sync_events_v4_route( + body: Ruma, +) -> Result> { + let sender_user = body.sender_user.expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let mut body = body.body; + // Setup watchers, so if there's no response, we can wait for them + let watcher = services().globals.watch(&sender_user, &sender_device); + + let next_batch = services().globals.next_count()?; + + let globalsince = body + .pos + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + + if globalsince == 0 { + if let Some(conn_id) = &body.conn_id { + services().users.forget_sync_request_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ); + } + } + + // Get sticky parameters from cache + let known_rooms = + services() + .users + .update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body); + + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(&sender_user) + .filter_map(Result::ok) + .collect::>(); + + if body.extensions.to_device.enabled.unwrap_or(false) { + services() + .users + .remove_to_device_events(&sender_user, &sender_device, globalsince)?; + } + + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + let mut device_list_changes = HashSet::new(); + let mut device_list_left = HashSet::new(); + + if body.extensions.e2ee.enabled.unwrap_or(false) { + // Look for device list updates of this account + device_list_changes.extend( + services() + .users + .keys_changed(sender_user.as_ref(), globalsince, None) + .filter_map(Result::ok), + ); + + for room_id in &all_joined_rooms { + let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else { + error!("Room {} has no state", room_id); + continue; + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(room_id, globalsince)?; + + let since_sender_member: Option = since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get(shortstatehash, &StateEventType::RoomMember, sender_user.as_str()) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some(); + + if let Some(since_shortstatehash) = since_shortstatehash { + // Skip if there are only timeline changes + if since_shortstatehash == current_shortstatehash { + continue; + } + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + let joined_since_last_sync = + since_sender_member.map_or(true, |member| member.membership != MembershipState::Join); + + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + if encrypted_room { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if since_state_ids.get(&key) != Some(&id) { + let Some(pdu) = services().rooms.timeline.get_pdu(&id)? else { + error!("Pdu in state not found: {}", id); + continue; + }; + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == sender_user { + continue; + } + + let new_membership = + serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(&sender_user, &user_id, room_id)? { + device_list_changes.insert(user_id); + } + }, + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + }, + _ => {}, + } + } + } + } + } + if joined_since_last_sync || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_changes.extend( + services() + .rooms + .state_cache + .room_members(room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the sender + &sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target + // already + !share_encrypted_room(&sender_user, user_id, room_id).unwrap_or(false) + }), + ); + } + } + } + // Look for device list updates in this room + device_list_changes.extend( + services() + .users + .keys_changed(room_id.as_ref(), globalsince, None) + .filter_map(Result::ok), + ); + } + for user_id in left_encrypted_users { + let dont_share_encrypted_room = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? + .filter_map(Result::ok) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target anymore, we need + // to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + } + + let mut lists = BTreeMap::new(); + let mut todo_rooms = BTreeMap::new(); // and required state + + for (list_id, list) in body.lists { + if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { + continue; + } + + let mut new_known_rooms = BTreeSet::new(); + + lists.insert( + list_id.clone(), + sync_events::v4::SyncList { + ops: list + .ranges + .into_iter() + .map(|mut r| { + r.0 = + r.0.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1)); + r.1 = + r.1.clamp(r.0, UInt::from(all_joined_rooms.len() as u32 - 1)); + let room_ids = all_joined_rooms[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)].to_vec(); + new_known_rooms.extend(room_ids.iter().cloned()); + for room_id in &room_ids { + let todo_room = todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0, u64::MAX)); + let limit = list + .room_details + .timeline_limit + .map_or(10, u64::from) + .min(100); + todo_room + .0 + .extend(list.room_details.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(&list_id) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + } + sync_events::v4::SyncOp { + op: SlidingOp::Sync, + range: Some(r), + index: None, + room_ids, + room_id: None, + } + }) + .collect(), + count: UInt::from(all_joined_rooms.len() as u32), + }, + ); + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + list_id, + new_known_rooms, + globalsince, + ); + } + } + + let mut known_subscription_rooms = BTreeSet::new(); + for (room_id, room) in &body.room_subscriptions { + if !services().rooms.metadata.exists(room_id)? { + continue; + } + let todo_room = todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0, u64::MAX)); + let limit = room.timeline_limit.map_or(10, u64::from).min(100); + todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get("subscriptions") + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + known_subscription_rooms.insert(room_id.clone()); + } + + for r in body.unsubscribe_rooms { + known_subscription_rooms.remove(&r); + body.room_subscriptions.remove(&r); + } + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ); + } + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_subscriptions( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + body.room_subscriptions, + ); + } + + let mut rooms = BTreeMap::new(); + for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { + let roomsincecount = PduCount::Normal(*roomsince); + + let (timeline_pdus, limited) = load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?; + + if roomsince != &0 && timeline_pdus.is_empty() { + continue; + } + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("timeline in backfill state?!"); + "0".to_owned() + }, + PduCount::Normal(c) => c.to_string(), + })) + })? + .or_else(|| { + if roomsince != &0 { + Some(roomsince.to_string()) + } else { + None + } + }); + + let room_events: Vec<_> = timeline_pdus + .iter() + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let required_state = required_state_request + .iter() + .map(|state| { + services() + .rooms + .state_accessor + .room_state_get(room_id, &state.0, &state.1) + }) + .filter_map(Result::ok) + .flatten() + .map(|state| state.to_sync_state_event()) + .collect(); + + // Heroes + let heroes = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|member| member != &sender_user) + .map(|member| { + Ok::<_, Error>( + services() + .rooms + .state_accessor + .get_member(room_id, &member)? + .map(|memberevent| { + ( + memberevent + .displayname + .unwrap_or_else(|| member.to_string()), + memberevent.avatar_url, + ) + }), + ) + }) + .filter_map(Result::ok) + .flatten() + .take(5) + .collect::>(); + let name = match heroes.len().cmp(&(1_usize)) { + Ordering::Greater => { + let last = heroes[0].0.clone(); + Some( + heroes[1..] + .iter() + .map(|h| h.0.clone()) + .collect::>() + .join(", ") + " and " + &last, + ) + }, + Ordering::Equal => Some(heroes[0].0.clone()), + Ordering::Less => None, + }; + + let heroes_avatar = if heroes.len() == 1 { + heroes[0].1.clone() + } else { + None + }; + + rooms.insert( + room_id.clone(), + sync_events::v4::SlidingSyncRoom { + name: services().rooms.state_accessor.get_name(room_id)?.or(name), + avatar: if let Some(heroes_avatar) = heroes_avatar { + ruma::JsOption::Some(heroes_avatar) + } else { + match services().rooms.state_accessor.get_avatar(room_id)? { + ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), + ruma::JsOption::Null => ruma::JsOption::Null, + ruma::JsOption::Undefined => ruma::JsOption::Undefined, + } + }, + initial: Some(roomsince == &0), + is_dm: None, + invite_state: None, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services() + .rooms + .user + .highlight_count(&sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services() + .rooms + .user + .notification_count(&sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + (services() + .rooms + .state_cache + .room_joined_count(room_id)? + .unwrap_or(0) as u32) + .into(), + ), + invited_count: Some( + (services() + .rooms + .state_cache + .room_invited_count(room_id)? + .unwrap_or(0) as u32) + .into(), + ), + num_live: None, // Count events in timeline greater than global sync counter + timestamp: None, + }, + ); + } + + if rooms + .iter() + .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or(Duration::from_secs(30)); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); + } + _ = tokio::time::timeout(duration, watcher).await; + } + + Ok(sync_events::v4::Response { + initial: globalsince == 0, + txn_id: body.txn_id.clone(), + pos: next_batch.to_string(), + lists, + rooms, + extensions: sync_events::v4::Extensions { + to_device: if body.extensions.to_device.enabled.unwrap_or(false) { + Some(sync_events::v4::ToDevice { + events: services() + .users + .get_to_device_events(&sender_user, &sender_device)?, + next_batch: next_batch.to_string(), + }) + } else { + None + }, + e2ee: sync_events::v4::E2EE { + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, + }, + account_data: sync_events::v4::AccountData { + global: if body.extensions.account_data.enabled.unwrap_or(false) { + services() + .account_data + .changes_since(None, &sender_user, globalsince)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect() + } else { + Vec::new() + }, + rooms: BTreeMap::new(), + }, + receipts: sync_events::v4::Receipts { + rooms: BTreeMap::new(), + }, + typing: sync_events::v4::Typing { + rooms: BTreeMap::new(), + }, + }, + delta_token: None, + }) +} diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs new file mode 100644 index 00000000..eb00e9fd --- /dev/null +++ b/src/api/client_server/tag.rs @@ -0,0 +1,112 @@ +use std::collections::BTreeMap; + +use ruma::{ + api::client::tag::{create_tag, delete_tag, get_tags}, + events::{ + tag::{TagEvent, TagEventContent}, + RoomAccountDataEventType, + }, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` +/// +/// Adds a tag to the room. +/// +/// - Inserts the tag into the tag event of the room account data. +pub async fn update_tag_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services() + .account_data + .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?; + + let mut tags_event = event.map_or_else( + || { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + }, + |e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")), + )?; + + tags_event + .content + .tags + .insert(body.tag.clone().into(), body.tag_info.clone()); + + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + )?; + + Ok(create_tag::v3::Response {}) +} + +/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` +/// +/// Deletes a tag from the room. +/// +/// - Removes the tag from the tag event of the room account data. +pub async fn delete_tag_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services() + .account_data + .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?; + + let mut tags_event = event.map_or_else( + || { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + }, + |e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")), + )?; + + tags_event.content.tags.remove(&body.tag.clone().into()); + + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + )?; + + Ok(delete_tag::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` +/// +/// Returns tags on the room. +/// +/// - Gets the tag event of the room account data. +pub async fn get_tags_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services() + .account_data + .get(Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag)?; + + let tags_event = event.map_or_else( + || { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + }, + |e| serde_json::from_str(e.get()).map_err(|_| Error::bad_database("Invalid account data event in db.")), + )?; + + Ok(get_tags::v3::Response { + tags: tags_event.content.tags, + }) +} diff --git a/src/api/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs new file mode 100644 index 00000000..f5de4c61 --- /dev/null +++ b/src/api/client_server/thirdparty.rs @@ -0,0 +1,15 @@ +use std::collections::BTreeMap; + +use ruma::api::client::thirdparty::get_protocols; + +use crate::{Result, Ruma}; + +/// # `GET /_matrix/client/r0/thirdparty/protocols` +/// +/// TODO: Fetches all metadata about protocols supported by the homeserver. +pub async fn get_protocols_route(_body: Ruma) -> Result { + // TODO + Ok(get_protocols::v3::Response { + protocols: BTreeMap::new(), + }) +} diff --git a/src/api/client_server/threads.rs b/src/api/client_server/threads.rs new file mode 100644 index 00000000..c752d782 --- /dev/null +++ b/src/api/client_server/threads.rs @@ -0,0 +1,47 @@ +use ruma::api::client::{error::ErrorKind, threads::get_threads}; + +use crate::{services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/threads` +pub async fn get_threads_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + // Use limit or else 10, with maximum 100 + let limit = body + .limit + .and_then(|l| l.try_into().ok()) + .unwrap_or(10) + .min(100); + + let from = if let Some(from) = &body.from { + from.parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, ""))? + } else { + u64::MAX + }; + + let threads = services() + .rooms + .threads + .threads_until(sender_user, &body.room_id, from, &body.include)? + .take(limit) + .filter_map(Result::ok) + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) + .unwrap_or(false) + }) + .collect::>(); + + let next_batch = threads.last().map(|(count, _)| count.to_string()); + + Ok(get_threads::v1::Response { + chunk: threads + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(), + next_batch, + }) +} diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs new file mode 100644 index 00000000..128d3cda --- /dev/null +++ b/src/api/client_server/to_device.rs @@ -0,0 +1,90 @@ +use std::collections::BTreeMap; + +use ruma::{ + api::{ + client::{error::ErrorKind, to_device::send_event_to_device}, + federation::{self, transactions::edu::DirectDeviceContent}, + }, + to_device::DeviceIdOrAllDevices, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` +/// +/// Send a to-device event to a set of client devices. +pub async fn send_event_to_device_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_deref(); + + // Check if this is a new transaction id + if services() + .transaction_ids + .existing_txnid(sender_user, sender_device, &body.txn_id)? + .is_some() + { + return Ok(send_event_to_device::v3::Response {}); + } + + for (target_user_id, map) in &body.messages { + for (target_device_id_maybe, event) in map { + if target_user_id.server_name() != services().globals.server_name() { + let mut map = BTreeMap::new(); + map.insert(target_device_id_maybe.clone(), event.clone()); + let mut messages = BTreeMap::new(); + messages.insert(target_user_id.clone(), map); + let count = services().globals.next_count()?; + + services().sending.send_edu_server( + target_user_id.server_name(), + serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(DirectDeviceContent { + sender: sender_user.clone(), + ev_type: body.event_type.clone(), + message_id: count.to_string().into(), + messages, + })) + .expect("DirectToDevice EDU can be serialized"), + )?; + + continue; + } + + match target_device_id_maybe { + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + services().users.add_to_device_event( + sender_user, + target_user_id, + target_device_id, + &body.event_type.to_string(), + event + .deserialize_as() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?, + )?; + }, + + DeviceIdOrAllDevices::AllDevices => { + for target_device_id in services().users.all_device_ids(target_user_id) { + services().users.add_to_device_event( + sender_user, + target_user_id, + &target_device_id?, + &body.event_type.to_string(), + event + .deserialize_as() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid"))?, + )?; + } + }, + } + } + } + + // Save transaction id with empty data + services() + .transaction_ids + .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; + + Ok(send_event_to_device::v3::Response {}) +} diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs new file mode 100644 index 00000000..6dfdd97c --- /dev/null +++ b/src/api/client_server/typing.rs @@ -0,0 +1,43 @@ +use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; + +use crate::{services, utils, Error, Result, Ruma}; + +/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` +/// +/// Sets the typing state of the sender user. +pub async fn create_typing_event_route( + body: Ruma, +) -> Result { + use create_typing_event::v3::Typing; + + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "You are not in this room.")); + } + + if let Typing::Yes(duration) = body.state { + let duration = utils::clamp( + duration.as_millis() as u64, + services().globals.config.typing_client_timeout_min_s * 1000, + services().globals.config.typing_client_timeout_max_s * 1000, + ); + services() + .rooms + .typing + .typing_add(sender_user, &body.room_id, utils::millis_since_unix_epoch() + duration) + .await?; + } else { + services() + .rooms + .typing + .typing_remove(sender_user, &body.room_id) + .await?; + } + + Ok(create_typing_event::v3::Response {}) +} diff --git a/src/api/client_server/unstable.rs b/src/api/client_server/unstable.rs new file mode 100644 index 00000000..eb9fa0af --- /dev/null +++ b/src/api/client_server/unstable.rs @@ -0,0 +1,45 @@ +use ruma::{ + api::client::{error::ErrorKind, membership::mutual_rooms}, + OwnedRoomId, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms` +/// +/// Gets all the rooms the sender shares with the specified user. +/// +/// TODO: Implement pagination, currently this just returns everything +/// +/// An implementation of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) +pub async fn get_mutual_rooms_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if sender_user == &body.user_id { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "You cannot request rooms in common with yourself.", + )); + } + + if !services().users.exists(&body.user_id)? { + return Ok(mutual_rooms::unstable::Response { + joined: vec![], + next_batch_token: None, + }); + } + + let mutual_rooms: Vec = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? + .filter_map(Result::ok) + .collect(); + + Ok(mutual_rooms::unstable::Response { + joined: mutual_rooms, + next_batch_token: None, + }) +} diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs new file mode 100644 index 00000000..163837b0 --- /dev/null +++ b/src/api/client_server/unversioned.rs @@ -0,0 +1,168 @@ +use std::collections::BTreeMap; + +use axum::{response::IntoResponse, Json}; +use ruma::api::client::{ + discovery::{ + discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, + discover_support::{self, Contact}, + get_supported_versions, + }, + error::ErrorKind, +}; + +use crate::{services, Error, Result, Ruma}; + +/// # `GET /_matrix/client/versions` +/// +/// Get the versions of the specification and unstable features supported by +/// this server. +/// +/// - Versions take the form MAJOR.MINOR.PATCH +/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value +/// - Unstable features are namespaced and may include version information in +/// their name +/// +/// Note: Unstable features are used while developing new features. Clients +/// should avoid using unstable features in their stable releases +pub async fn get_supported_versions_route( + _body: Ruma, +) -> Result { + let resp = get_supported_versions::Response { + versions: vec![ + "r0.0.1".to_owned(), + "r0.1.0".to_owned(), + "r0.2.0".to_owned(), + "r0.3.0".to_owned(), + "r0.4.0".to_owned(), + "r0.5.0".to_owned(), + "r0.6.0".to_owned(), + "r0.6.1".to_owned(), + "v1.1".to_owned(), + "v1.2".to_owned(), + "v1.3".to_owned(), + "v1.4".to_owned(), + "v1.5".to_owned(), + ], + unstable_features: BTreeMap::from_iter([ + ("org.matrix.e2e_cross_signing".to_owned(), true), + ("org.matrix.msc2285.stable".to_owned(), true), + ("uk.half-shot.msc2666.query_mutual_rooms".to_owned(), true), + ("org.matrix.msc2836".to_owned(), true), + ("org.matrix.msc2946".to_owned(), true), + ("org.matrix.msc3026.busy_presence".to_owned(), true), + ("org.matrix.msc3827".to_owned(), true), + ]), + }; + + Ok(resp) +} + +/// # `GET /.well-known/matrix/client` +/// +/// Returns the .well-known URL if it is configured, otherwise returns 404. +pub async fn well_known_client(_body: Ruma) -> Result { + let client_url = match services().globals.well_known_client() { + Some(url) => url.to_string(), + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + }; + + Ok(discover_homeserver::Response { + homeserver: HomeserverInfo { + base_url: client_url.clone(), + }, + identity_server: None, + sliding_sync_proxy: Some(SlidingSyncProxyInfo { + url: client_url, + }), + tile_server: None, + }) +} + +/// # `GET /.well-known/matrix/support` +/// +/// Server support contact and support page of a homeserver's domain. +pub async fn well_known_support(_body: Ruma) -> Result { + let support_page = services() + .globals + .well_known_support_page() + .as_ref() + .map(ToString::to_string); + + let role = services().globals.well_known_support_role().clone(); + + // support page or role must be either defined for this to be valid + if support_page.is_none() && role.is_none() { + return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); + } + + let email_address = services().globals.well_known_support_email().clone(); + let matrix_id = services().globals.well_known_support_mxid().clone(); + + // if a role is specified, an email address or matrix id is required + if role.is_some() && (email_address.is_none() && matrix_id.is_none()) { + return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); + } + + // TOOD: support defining multiple contacts in the config + let mut contacts: Vec = vec![]; + + if let Some(role) = role { + let contact = Contact { + role, + email_address, + matrix_id, + }; + + contacts.push(contact); + } + + // support page or role+contacts must be either defined for this to be valid + if contacts.is_empty() && support_page.is_none() { + return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); + } + + Ok(discover_support::Response { + contacts, + support_page, + }) +} + +/// # `GET /client/server.json` +/// +/// Endpoint provided by sliding sync proxy used by some clients such as Element +/// Web as a non-standard health check. +pub async fn syncv3_client_server_json() -> Result { + let server_url = match services().globals.well_known_client() { + Some(url) => url.to_string(), + None => match services().globals.well_known_server() { + Some(url) => url.to_string(), + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + }, + }; + + let version = match option_env!("CONDUIT_VERSION_EXTRA") { + Some(extra) => format!("{} ({})", env!("CARGO_PKG_VERSION"), extra), + None => env!("CARGO_PKG_VERSION").to_owned(), + }; + + Ok(Json(serde_json::json!({ + "server": server_url, + "version": version, + }))) +} + +/// # `GET /_conduwuit/server_version` +/// +/// Conduwuit-specific API to get the server version, results akin to +/// `/_matrix/federation/v1/version` +pub async fn conduwuit_server_version() -> Result { + let version = match option_env!("CONDUIT_VERSION_EXTRA") { + Some(extra) => format!("{} ({})", env!("CARGO_PKG_VERSION"), extra), + None => env!("CARGO_PKG_VERSION").to_owned(), + }; + + Ok(Json(serde_json::json!({ + "name": "Conduwuit", + "version": version, + }))) +} diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs new file mode 100644 index 00000000..418ad50c --- /dev/null +++ b/src/api/client_server/user_directory.rs @@ -0,0 +1,102 @@ +use ruma::{ + api::client::user_directory::search_users, + events::{ + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + StateEventType, + }, +}; + +use crate::{services, Result, Ruma}; + +/// # `POST /_matrix/client/r0/user_directory/search` +/// +/// Searches all known users for a match. +/// +/// - Hides any local users that aren't in any public rooms (i.e. those that +/// have the join rule set to public) +/// and don't share a room with the sender +pub async fn search_users_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let limit = u64::from(body.limit) as usize; + + let mut users = services().users.iter().filter_map(|user_id| { + // Filter out buggy users (they should not exist, but you never know...) + let user_id = user_id.ok()?; + + let user = search_users::v3::User { + user_id: user_id.clone(), + display_name: services().users.displayname(&user_id).ok()?, + avatar_url: services().users.avatar_url(&user_id).ok()?, + }; + + let user_id_matches = user + .user_id + .to_string() + .to_lowercase() + .contains(&body.search_term.to_lowercase()); + + let user_displayname_matches = user + .display_name + .as_ref() + .filter(|name| { + name.to_lowercase() + .contains(&body.search_term.to_lowercase()) + }) + .is_some(); + + if !user_id_matches && !user_displayname_matches { + return None; + } + + // It's a matching user, but is the sender allowed to see them? + let mut user_visible = false; + + let user_is_in_public_rooms = services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(Result::ok) + .any(|room| { + services() + .rooms + .state_accessor + .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .map_or(false, |event| { + event.map_or(false, |event| { + serde_json::from_str(event.content.get()) + .map_or(false, |r: RoomJoinRulesEventContent| r.join_rule == JoinRule::Public) + }) + }) + }); + + if user_is_in_public_rooms { + user_visible = true; + } else { + let user_is_in_shared_rooms = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id]) + .ok()? + .next() + .is_some(); + + if user_is_in_shared_rooms { + user_visible = true; + } + } + + if !user_visible { + return None; + } + + Some(user) + }); + + let results = users.by_ref().take(limit).collect(); + let limited = users.next().is_some(); + + Ok(search_users::v3::Response { + results, + limited, + }) +} diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs new file mode 100644 index 00000000..5bd10ea3 --- /dev/null +++ b/src/api/client_server/voip.rs @@ -0,0 +1,49 @@ +use std::time::{Duration, SystemTime}; + +use base64::{engine::general_purpose, Engine as _}; +use hmac::{Hmac, Mac}; +use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; +use sha1::Sha1; + +use crate::{services, Result, Ruma}; + +type HmacSha1 = Hmac; + +/// # `GET /_matrix/client/r0/voip/turnServer` +/// +/// TODO: Returns information about the recommended turn server. +pub async fn turn_server_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let turn_secret = services().globals.turn_secret().clone(); + + let (username, password) = if !turn_secret.is_empty() { + let expiry = SecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()), + ) + .expect("time is valid"); + + let username: String = format!("{}:{}", expiry.get(), sender_user); + + let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes()).expect("HMAC can take key of any size"); + mac.update(username.as_bytes()); + + let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes()); + + (username, password) + } else { + ( + services().globals.turn_username().clone(), + services().globals.turn_password().clone(), + ) + }; + + Ok(get_turn_server_info::v3::Response { + username, + password, + uris: services().globals.turn_uris().to_vec(), + ttl: Duration::from_secs(services().globals.turn_ttl()), + }) +} diff --git a/src/api/mod.rs b/src/api/mod.rs index 9ca24e72..5c284757 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,14 +1,3 @@ -#![type_length_limit = "16384"] //TODO: reduce me -#![allow(clippy::toplevel_ref_arg)] - -pub mod client; -pub mod router; -pub mod server; - -extern crate conduwuit_core as conduwuit; -extern crate conduwuit_service as service; - -pub(crate) use self::router::{Ruma, RumaResponse, State}; - -conduwuit::mod_ctor! {} -conduwuit::mod_dtor! {} +pub mod client_server; +pub mod ruma_wrapper; +pub mod server_server; diff --git a/src/api/router.rs b/src/api/router.rs deleted file mode 100644 index 3fbef275..00000000 --- a/src/api/router.rs +++ /dev/null @@ -1,301 +0,0 @@ -mod args; -mod auth; -mod handler; -mod request; -mod response; -pub mod state; - -use std::str::FromStr; - -use axum::{ - Router, - response::{IntoResponse, Redirect}, - routing::{any, get, post}, -}; -use conduwuit::{Server, err}; -use http::{Uri, uri}; - -use self::handler::RouterExt; -pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State}; -use crate::{client, server}; - -pub fn build(router: Router, server: &Server) -> Router { - let config = &server.config; - let mut router = router - .ruma_route(&client::get_timezone_key_route) - .ruma_route(&client::get_profile_key_route) - .ruma_route(&client::set_profile_key_route) - .ruma_route(&client::delete_profile_key_route) - .ruma_route(&client::set_timezone_key_route) - .ruma_route(&client::delete_timezone_key_route) - .ruma_route(&client::appservice_ping) - .ruma_route(&client::get_supported_versions_route) - .ruma_route(&client::get_register_available_route) - .ruma_route(&client::register_route) - .ruma_route(&client::get_login_types_route) - .ruma_route(&client::login_route) - .ruma_route(&client::login_token_route) - .ruma_route(&client::whoami_route) - .ruma_route(&client::logout_route) - .ruma_route(&client::logout_all_route) - .ruma_route(&client::change_password_route) - .ruma_route(&client::deactivate_route) - .ruma_route(&client::third_party_route) - .ruma_route(&client::request_3pid_management_token_via_email_route) - .ruma_route(&client::request_3pid_management_token_via_msisdn_route) - .ruma_route(&client::check_registration_token_validity) - .ruma_route(&client::get_capabilities_route) - .ruma_route(&client::get_pushrules_all_route) - .ruma_route(&client::get_pushrules_global_route) - .ruma_route(&client::set_pushrule_route) - .ruma_route(&client::get_pushrule_route) - .ruma_route(&client::set_pushrule_enabled_route) - .ruma_route(&client::get_pushrule_enabled_route) - .ruma_route(&client::get_pushrule_actions_route) - .ruma_route(&client::set_pushrule_actions_route) - .ruma_route(&client::delete_pushrule_route) - .ruma_route(&client::get_room_event_route) - .ruma_route(&client::get_room_aliases_route) - .ruma_route(&client::get_filter_route) - .ruma_route(&client::create_filter_route) - .ruma_route(&client::create_openid_token_route) - .ruma_route(&client::set_global_account_data_route) - .ruma_route(&client::set_room_account_data_route) - .ruma_route(&client::get_global_account_data_route) - .ruma_route(&client::get_room_account_data_route) - .ruma_route(&client::set_displayname_route) - .ruma_route(&client::get_displayname_route) - .ruma_route(&client::set_avatar_url_route) - .ruma_route(&client::get_avatar_url_route) - .ruma_route(&client::get_profile_route) - .ruma_route(&client::set_presence_route) - .ruma_route(&client::get_presence_route) - .ruma_route(&client::upload_keys_route) - .ruma_route(&client::get_keys_route) - .ruma_route(&client::claim_keys_route) - .ruma_route(&client::create_backup_version_route) - .ruma_route(&client::update_backup_version_route) - .ruma_route(&client::delete_backup_version_route) - .ruma_route(&client::get_latest_backup_info_route) - .ruma_route(&client::get_backup_info_route) - .ruma_route(&client::add_backup_keys_route) - .ruma_route(&client::add_backup_keys_for_room_route) - .ruma_route(&client::add_backup_keys_for_session_route) - .ruma_route(&client::delete_backup_keys_for_room_route) - .ruma_route(&client::delete_backup_keys_for_session_route) - .ruma_route(&client::delete_backup_keys_route) - .ruma_route(&client::get_backup_keys_for_room_route) - .ruma_route(&client::get_backup_keys_for_session_route) - .ruma_route(&client::get_backup_keys_route) - .ruma_route(&client::set_read_marker_route) - .ruma_route(&client::create_receipt_route) - .ruma_route(&client::create_typing_event_route) - .ruma_route(&client::create_room_route) - .ruma_route(&client::redact_event_route) - .ruma_route(&client::report_event_route) - .ruma_route(&client::report_room_route) - .ruma_route(&client::create_alias_route) - .ruma_route(&client::delete_alias_route) - .ruma_route(&client::get_alias_route) - .ruma_route(&client::join_room_by_id_route) - .ruma_route(&client::join_room_by_id_or_alias_route) - .ruma_route(&client::joined_members_route) - .ruma_route(&client::knock_room_route) - .ruma_route(&client::leave_room_route) - .ruma_route(&client::forget_room_route) - .ruma_route(&client::joined_rooms_route) - .ruma_route(&client::kick_user_route) - .ruma_route(&client::ban_user_route) - .ruma_route(&client::unban_user_route) - .ruma_route(&client::invite_user_route) - .ruma_route(&client::set_room_visibility_route) - .ruma_route(&client::get_room_visibility_route) - .ruma_route(&client::get_public_rooms_route) - .ruma_route(&client::get_public_rooms_filtered_route) - .ruma_route(&client::search_users_route) - .ruma_route(&client::get_member_events_route) - .ruma_route(&client::get_protocols_route) - .route("/_matrix/client/unstable/thirdparty/protocols", - get(client::get_protocols_route_unstable)) - .ruma_route(&client::send_message_event_route) - .ruma_route(&client::send_state_event_for_key_route) - .ruma_route(&client::get_state_events_route) - .ruma_route(&client::get_state_events_for_key_route) - // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes - // share one Ruma request / response type pair with {get,send}_state_event_for_key_route - .route( - "/_matrix/client/r0/rooms/:room_id/state/:event_type", - get(client::get_state_events_for_empty_key_route) - .put(client::send_state_event_for_empty_key_route), - ) - .route( - "/_matrix/client/v3/rooms/:room_id/state/:event_type", - get(client::get_state_events_for_empty_key_route) - .put(client::send_state_event_for_empty_key_route), - ) - // These two endpoints allow trailing slashes - .route( - "/_matrix/client/r0/rooms/:room_id/state/:event_type/", - get(client::get_state_events_for_empty_key_route) - .put(client::send_state_event_for_empty_key_route), - ) - .route( - "/_matrix/client/v3/rooms/:room_id/state/:event_type/", - get(client::get_state_events_for_empty_key_route) - .put(client::send_state_event_for_empty_key_route), - ) - .ruma_route(&client::sync_events_route) - .ruma_route(&client::sync_events_v4_route) - .ruma_route(&client::sync_events_v5_route) - .ruma_route(&client::get_context_route) - .ruma_route(&client::get_message_events_route) - .ruma_route(&client::search_events_route) - .ruma_route(&client::turn_server_route) - .ruma_route(&client::send_event_to_device_route) - .ruma_route(&client::create_content_route) - .ruma_route(&client::get_content_thumbnail_route) - .ruma_route(&client::get_content_route) - .ruma_route(&client::get_content_as_filename_route) - .ruma_route(&client::get_media_preview_route) - .ruma_route(&client::get_media_config_route) - .ruma_route(&client::get_devices_route) - .ruma_route(&client::get_device_route) - .ruma_route(&client::update_device_route) - .ruma_route(&client::delete_device_route) - .ruma_route(&client::delete_devices_route) - .ruma_route(&client::get_tags_route) - .ruma_route(&client::update_tag_route) - .ruma_route(&client::delete_tag_route) - .ruma_route(&client::upload_signing_keys_route) - .ruma_route(&client::upload_signatures_route) - .ruma_route(&client::get_key_changes_route) - .ruma_route(&client::get_pushers_route) - .ruma_route(&client::set_pushers_route) - .ruma_route(&client::upgrade_room_route) - .ruma_route(&client::get_threads_route) - .ruma_route(&client::get_relating_events_with_rel_type_and_event_type_route) - .ruma_route(&client::get_relating_events_with_rel_type_route) - .ruma_route(&client::get_relating_events_route) - .ruma_route(&client::get_hierarchy_route) - .ruma_route(&client::get_mutual_rooms_route) - .ruma_route(&client::get_room_summary) - .route( - "/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary", - get(client::get_room_summary_legacy) - ) - .ruma_route(&client::well_known_support) - .ruma_route(&client::well_known_client) - .route("/_conduwuit/server_version", get(client::conduwuit_server_version)) - .ruma_route(&client::room_initial_sync_route) - .route("/client/server.json", get(client::syncv3_client_server_json)); - - if config.allow_federation { - router = router - .ruma_route(&server::get_server_version_route) - .route("/_matrix/key/v2/server", get(server::get_server_keys_route)) - .route( - "/_matrix/key/v2/server/:key_id", - get(server::get_server_keys_deprecated_route), - ) - .ruma_route(&server::get_public_rooms_route) - .ruma_route(&server::get_public_rooms_filtered_route) - .ruma_route(&server::send_transaction_message_route) - .ruma_route(&server::get_event_route) - .ruma_route(&server::get_backfill_route) - .ruma_route(&server::get_missing_events_route) - .ruma_route(&server::get_event_authorization_route) - .ruma_route(&server::get_room_state_route) - .ruma_route(&server::get_room_state_ids_route) - .ruma_route(&server::create_leave_event_template_route) - .ruma_route(&server::create_knock_event_template_route) - .ruma_route(&server::create_leave_event_v1_route) - .ruma_route(&server::create_leave_event_v2_route) - .ruma_route(&server::create_knock_event_v1_route) - .ruma_route(&server::create_join_event_template_route) - .ruma_route(&server::create_join_event_v1_route) - .ruma_route(&server::create_join_event_v2_route) - .ruma_route(&server::create_invite_route) - .ruma_route(&server::get_devices_route) - .ruma_route(&server::get_room_information_route) - .ruma_route(&server::get_profile_information_route) - .ruma_route(&server::get_keys_route) - .ruma_route(&server::claim_keys_route) - .ruma_route(&server::get_openid_userinfo_route) - .ruma_route(&server::get_hierarchy_route) - .ruma_route(&server::well_known_server) - .ruma_route(&server::get_content_route) - .ruma_route(&server::get_content_thumbnail_route) - .route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count)); - } else { - router = router - .route("/_matrix/federation/*path", any(federation_disabled)) - .route("/.well-known/matrix/server", any(federation_disabled)) - .route("/_matrix/key/*path", any(federation_disabled)) - .route("/_conduwuit/local_user_count", any(federation_disabled)); - } - - if config.allow_legacy_media { - router = router - .ruma_route(&client::get_media_config_legacy_route) - .ruma_route(&client::get_media_preview_legacy_route) - .ruma_route(&client::get_content_legacy_route) - .ruma_route(&client::get_content_as_filename_legacy_route) - .ruma_route(&client::get_content_thumbnail_legacy_route) - .route("/_matrix/media/v1/config", get(client::get_media_config_legacy_legacy_route)) - .route("/_matrix/media/v1/upload", post(client::create_content_legacy_route)) - .route( - "/_matrix/media/v1/preview_url", - get(client::get_media_preview_legacy_legacy_route), - ) - .route( - "/_matrix/media/v1/download/:server_name/:media_id", - get(client::get_content_legacy_legacy_route), - ) - .route( - "/_matrix/media/v1/download/:server_name/:media_id/:file_name", - get(client::get_content_as_filename_legacy_legacy_route), - ) - .route( - "/_matrix/media/v1/thumbnail/:server_name/:media_id", - get(client::get_content_thumbnail_legacy_legacy_route), - ); - } else { - router = router - .route("/_matrix/media/v1/*path", any(legacy_media_disabled)) - .route("/_matrix/media/v3/config", any(legacy_media_disabled)) - .route("/_matrix/media/v3/download/*path", any(legacy_media_disabled)) - .route("/_matrix/media/v3/thumbnail/*path", any(legacy_media_disabled)) - .route("/_matrix/media/v3/preview_url", any(redirect_legacy_preview)) - .route("/_matrix/media/r0/config", any(legacy_media_disabled)) - .route("/_matrix/media/r0/download/*path", any(legacy_media_disabled)) - .route("/_matrix/media/r0/thumbnail/*path", any(legacy_media_disabled)) - .route("/_matrix/media/r0/preview_url", any(redirect_legacy_preview)); - } - - router -} - -async fn redirect_legacy_preview(uri: Uri) -> impl IntoResponse { - let path = "/_matrix/client/v1/media/preview_url"; - let query = uri.query().unwrap_or_default(); - - let path_and_query = format!("{path}?{query}"); - let path_and_query = uri::PathAndQuery::from_str(&path_and_query) - .expect("Failed to build PathAndQuery for media preview redirect URI"); - - let uri = uri::Builder::new() - .path_and_query(path_and_query) - .build() - .expect("Failed to build URI for redirect") - .to_string(); - - Redirect::temporary(&uri) -} - -async fn legacy_media_disabled() -> impl IntoResponse { - err!(Request(Forbidden("Unauthenticated media is disabled."))) -} - -async fn federation_disabled() -> impl IntoResponse { - err!(Request(Forbidden("Federation is disabled."))) -} diff --git a/src/api/router/args.rs b/src/api/router/args.rs deleted file mode 100644 index 26713dcc..00000000 --- a/src/api/router/args.rs +++ /dev/null @@ -1,191 +0,0 @@ -use std::{mem, ops::Deref}; - -use async_trait::async_trait; -use axum::{body::Body, extract::FromRequest}; -use bytes::{BufMut, Bytes, BytesMut}; -use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, - OwnedUserId, ServerName, UserId, api::IncomingRequest, -}; -use service::Services; - -use super::{auth, auth::Auth, request, request::Request}; -use crate::{State, service::appservice::RegistrationInfo}; - -/// Extractor for Ruma request structs -pub(crate) struct Args { - /// Request struct body - pub(crate) body: T, - - /// Federation server authentication: X-Matrix origin - /// None when not a federation server. - pub(crate) origin: Option, - - /// Local user authentication: user_id. - /// None when not an authenticated local user. - pub(crate) sender_user: Option, - - /// Local user authentication: device_id. - /// None when not an authenticated local user or no device. - pub(crate) sender_device: Option, - - /// Appservice authentication; registration info. - /// None when not an appservice. - pub(crate) appservice_info: Option, - - /// Parsed JSON content. - /// None when body is not a valid string - pub(crate) json_body: Option, -} - -impl Args -where - T: IncomingRequest + Send + Sync + 'static, -{ - #[inline] - pub(crate) fn sender(&self) -> (&UserId, &DeviceId) { - (self.sender_user(), self.sender_device()) - } - - #[inline] - pub(crate) fn sender_user(&self) -> &UserId { - self.sender_user - .as_deref() - .expect("user must be authenticated for this handler") - } - - #[inline] - pub(crate) fn sender_device(&self) -> &DeviceId { - self.sender_device - .as_deref() - .expect("user must be authenticated and device identified") - } - - #[inline] - pub(crate) fn origin(&self) -> &ServerName { - self.origin - .as_deref() - .expect("server must be authenticated for this handler") - } -} - -impl Deref for Args -where - T: IncomingRequest + Send + Sync + 'static, -{ - type Target = T; - - fn deref(&self) -> &Self::Target { &self.body } -} - -#[async_trait] -impl FromRequest for Args -where - T: IncomingRequest + Send + Sync + 'static, -{ - type Rejection = Error; - - async fn from_request( - request: hyper::Request, - services: &State, - ) -> Result { - let mut request = request::from(services, request).await?; - let mut json_body = serde_json::from_slice::(&request.body).ok(); - - // while very unusual and really shouldn't be recommended, Synapse accepts POST - // requests with a completely empty body. very old clients, libraries, and some - // appservices still call APIs like /join like this. so let's just default to - // empty object `{}` to copy synapse's behaviour - if json_body.is_none() - && request.parts.method == http::Method::POST - && !request.parts.uri.path().contains("/media/") - { - trace!("json_body from_request: {:?}", json_body.clone()); - debug_warn!( - "received a POST request with an empty body, defaulting/assuming to {{}} like \ - Synapse does" - ); - json_body = Some(CanonicalJsonValue::Object(CanonicalJsonObject::new())); - } - let auth = auth::auth(services, &mut request, json_body.as_ref(), &T::METADATA).await?; - Ok(Self { - body: make_body::(services, &mut request, json_body.as_mut(), &auth)?, - origin: auth.origin, - sender_user: auth.sender_user, - sender_device: auth.sender_device, - appservice_info: auth.appservice_info, - json_body, - }) - } -} - -fn make_body( - services: &Services, - request: &mut Request, - json_body: Option<&mut CanonicalJsonValue>, - auth: &Auth, -) -> Result -where - T: IncomingRequest, -{ - let body = take_body(services, request, json_body, auth); - let http_request = into_http_request(request, body); - T::try_from_http_request(http_request, &request.path) - .map_err(|e| err!(Request(BadJson(debug_warn!("{e}"))))) -} - -fn into_http_request(request: &Request, body: Bytes) -> hyper::Request { - let mut http_request = hyper::Request::builder() - .uri(request.parts.uri.clone()) - .method(request.parts.method.clone()); - - *http_request.headers_mut().expect("mutable http headers") = request.parts.headers.clone(); - - let http_request = http_request.body(body).expect("http request body"); - - let headers = http_request.headers(); - let method = http_request.method(); - let uri = http_request.uri(); - debug!("{method:?} {uri:?} {headers:?}"); - - http_request -} - -#[allow(clippy::needless_pass_by_value)] -fn take_body( - services: &Services, - request: &mut Request, - json_body: Option<&mut CanonicalJsonValue>, - auth: &Auth, -) -> Bytes { - let Some(CanonicalJsonValue::Object(json_body)) = json_body else { - return mem::take(&mut request.body); - }; - - let user_id = auth.sender_user.clone().unwrap_or_else(|| { - let server_name = services.globals.server_name(); - UserId::parse_with_server_name(EMPTY, server_name).expect("valid user_id") - }); - - let uiaa_request = json_body - .get("auth") - .and_then(CanonicalJsonValue::as_object) - .and_then(|auth| auth.get("session")) - .and_then(CanonicalJsonValue::as_str) - .and_then(|session| { - services - .uiaa - .get_uiaa_request(&user_id, auth.sender_device.as_deref(), session) - }); - - if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { - for (key, value) in initial_request { - json_body.entry(key).or_insert(value); - } - } - - let mut buf = BytesMut::new().writer(); - serde_json::to_writer(&mut buf, &json_body).expect("value serialization can't fail"); - buf.into_inner().freeze() -} diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs deleted file mode 100644 index 01254c32..00000000 --- a/src/api/router/auth.rs +++ /dev/null @@ -1,344 +0,0 @@ -use axum::RequestPartsExt; -use axum_extra::{ - TypedHeader, - headers::{Authorization, authorization::Bearer}, - typed_header::TypedHeaderRejectionReason, -}; -use conduwuit::{Err, Error, Result, debug_error, err, warn}; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, - api::{ - AuthScheme, IncomingRequest, Metadata, - client::{ - directory::get_public_rooms, - error::ErrorKind, - profile::{ - get_avatar_url, get_display_name, get_profile, get_profile_key, get_timezone_key, - }, - voip::get_turn_server_info, - }, - federation::{authentication::XMatrix, openid::get_openid_userinfo}, - }, -}; -use service::{ - Services, - server_keys::{PubKeyMap, PubKeys}, -}; - -use super::request::Request; -use crate::service::appservice::RegistrationInfo; - -enum Token { - Appservice(Box), - User((OwnedUserId, OwnedDeviceId)), - Invalid, - None, -} - -pub(super) struct Auth { - pub(super) origin: Option, - pub(super) sender_user: Option, - pub(super) sender_device: Option, - pub(super) appservice_info: Option, -} - -pub(super) async fn auth( - services: &Services, - request: &mut Request, - json_body: Option<&CanonicalJsonValue>, - metadata: &Metadata, -) -> Result { - let bearer: Option>> = request.parts.extract().await?; - let token = match &bearer { - | Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), - | None => request.query.access_token.as_deref(), - }; - - let token = if let Some(token) = token { - match services.appservice.find_from_token(token).await { - | Some(reg_info) => Token::Appservice(Box::new(reg_info)), - | _ => match services.users.find_from_token(token).await { - | Ok((user_id, device_id)) => Token::User((user_id, device_id)), - | _ => Token::Invalid, - }, - } - } else { - Token::None - }; - - if metadata.authentication == AuthScheme::None { - match metadata { - | &get_public_rooms::v3::Request::METADATA => { - if !services - .server - .config - .allow_public_room_directory_without_auth - { - match token { - | Token::Appservice(_) | Token::User(_) => { - // we should have validated the token above - // already - }, - | Token::None | Token::Invalid => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing or invalid access token.", - )); - }, - } - } - }, - | &get_profile::v3::Request::METADATA - | &get_profile_key::unstable::Request::METADATA - | &get_display_name::v3::Request::METADATA - | &get_avatar_url::v3::Request::METADATA - | &get_timezone_key::unstable::Request::METADATA => { - if services.server.config.require_auth_for_profile_requests { - match token { - | Token::Appservice(_) | Token::User(_) => { - // we should have validated the token above - // already - }, - | Token::None | Token::Invalid => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing or invalid access token.", - )); - }, - } - } - }, - | _ => {}, - } - } - - match (metadata.authentication, token) { - | (AuthScheme::AccessToken, Token::Appservice(info)) => - Ok(auth_appservice(services, request, info).await?), - | ( - AuthScheme::None | AuthScheme::AccessTokenOptional | AuthScheme::AppserviceToken, - Token::Appservice(info), - ) => Ok(Auth { - origin: None, - sender_user: None, - sender_device: None, - appservice_info: Some(*info), - }), - | (AuthScheme::AccessToken, Token::None) => match metadata { - | &get_turn_server_info::v3::Request::METADATA => { - if services.server.config.turn_allow_guests { - Ok(Auth { - origin: None, - sender_user: None, - sender_device: None, - appservice_info: None, - }) - } else { - Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token.")) - } - }, - | _ => Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token.")), - }, - | ( - AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None, - Token::User((user_id, device_id)), - ) => Ok(Auth { - origin: None, - sender_user: Some(user_id), - sender_device: Some(device_id), - appservice_info: None, - }), - | (AuthScheme::ServerSignatures, Token::None) => - Ok(auth_server(services, request, json_body).await?), - | ( - AuthScheme::None | AuthScheme::AppserviceToken | AuthScheme::AccessTokenOptional, - Token::None, - ) => Ok(Auth { - sender_user: None, - sender_device: None, - origin: None, - appservice_info: None, - }), - | (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => - Err(Error::BadRequest( - ErrorKind::Unauthorized, - "Only server signatures should be used on this endpoint.", - )), - | (AuthScheme::AppserviceToken, Token::User(_)) => Err(Error::BadRequest( - ErrorKind::Unauthorized, - "Only appservice access tokens should be used on this endpoint.", - )), - | (AuthScheme::None, Token::Invalid) => { - // OpenID federation endpoint uses a query param with the same name, drop this - // once query params for user auth are removed from the spec. This is - // required to make integration manager work. - if request.query.access_token.is_some() - && metadata == &get_openid_userinfo::v1::Request::METADATA - { - Ok(Auth { - origin: None, - sender_user: None, - sender_device: None, - appservice_info: None, - }) - } else { - Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown access token.", - )) - } - }, - | (_, Token::Invalid) => Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown access token.", - )), - } -} - -async fn auth_appservice( - services: &Services, - request: &Request, - info: Box, -) -> Result { - let user_id_default = || { - UserId::parse_with_server_name( - info.registration.sender_localpart.as_str(), - services.globals.server_name(), - ) - }; - - let Ok(user_id) = request - .query - .user_id - .clone() - .map_or_else(user_id_default, OwnedUserId::parse) - else { - return Err!(Request(InvalidUsername("Username is invalid."))); - }; - - if !info.is_user_match(&user_id) { - return Err!(Request(Exclusive("User is not in namespace."))); - } - - Ok(Auth { - origin: None, - sender_user: Some(user_id), - sender_device: None, - appservice_info: Some(*info), - }) -} - -async fn auth_server( - services: &Services, - request: &mut Request, - body: Option<&CanonicalJsonValue>, -) -> Result { - type Member = (String, CanonicalJsonValue); - type Object = CanonicalJsonObject; - type Value = CanonicalJsonValue; - - let x_matrix = parse_x_matrix(request).await?; - auth_server_checks(services, &x_matrix)?; - - let destination = services.globals.server_name(); - let origin = &x_matrix.origin; - let signature_uri = request - .parts - .uri - .path_and_query() - .expect("all requests have a path") - .to_string(); - - let signature: [Member; 1] = - [(x_matrix.key.as_str().into(), Value::String(x_matrix.sig.to_string()))]; - - let signatures: [Member; 1] = [(origin.as_str().into(), Value::Object(signature.into()))]; - - let authorization: Object = if let Some(body) = body.cloned() { - let authorization: [Member; 6] = [ - ("content".into(), body), - ("destination".into(), Value::String(destination.into())), - ("method".into(), Value::String(request.parts.method.as_str().into())), - ("origin".into(), Value::String(origin.as_str().into())), - ("signatures".into(), Value::Object(signatures.into())), - ("uri".into(), Value::String(signature_uri)), - ]; - - authorization.into() - } else { - let authorization: [Member; 5] = [ - ("destination".into(), Value::String(destination.into())), - ("method".into(), Value::String(request.parts.method.as_str().into())), - ("origin".into(), Value::String(origin.as_str().into())), - ("signatures".into(), Value::Object(signatures.into())), - ("uri".into(), Value::String(signature_uri)), - ]; - - authorization.into() - }; - - let key = services - .server_keys - .get_verify_key(origin, &x_matrix.key) - .await - .map_err(|e| err!(Request(Forbidden(warn!("Failed to fetch signing keys: {e}")))))?; - - let keys: PubKeys = [(x_matrix.key.to_string(), key.key)].into(); - let keys: PubKeyMap = [(origin.as_str().into(), keys)].into(); - if let Err(e) = ruma::signatures::verify_json(&keys, authorization) { - debug_error!("Failed to verify federation request from {origin}: {e}"); - if request.parts.uri.to_string().contains('@') { - warn!( - "Request uri contained '@' character. Make sure your reverse proxy gives \ - conduwuit the raw uri (apache: use nocanon)" - ); - } - - return Err!(Request(Forbidden("Failed to verify X-Matrix signatures."))); - } - - Ok(Auth { - origin: origin.to_owned().into(), - sender_user: None, - sender_device: None, - appservice_info: None, - }) -} - -fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { - if !services.config.allow_federation { - return Err!(Config("allow_federation", "Federation is disabled.")); - } - - let destination = services.globals.server_name(); - if x_matrix.destination.as_deref() != Some(destination) { - return Err!(Request(Forbidden("Invalid destination."))); - } - - let origin = &x_matrix.origin; - if services.moderation.is_remote_server_forbidden(origin) { - return Err!(Request(Forbidden(debug_warn!( - "Federation requests from {origin} denied." - )))); - } - - Ok(()) -} - -async fn parse_x_matrix(request: &mut Request) -> Result { - let TypedHeader(Authorization(x_matrix)) = request - .parts - .extract::>>() - .await - .map_err(|e| { - let msg = match e.reason() { - | TypedHeaderRejectionReason::Missing => "Missing Authorization header.", - | TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.", - | _ => "Unknown header-related error", - }; - - err!(Request(Forbidden(warn!("{msg}: {e}")))) - })?; - - Ok(x_matrix) -} diff --git a/src/api/router/handler.rs b/src/api/router/handler.rs deleted file mode 100644 index ab013945..00000000 --- a/src/api/router/handler.rs +++ /dev/null @@ -1,79 +0,0 @@ -use axum::{ - Router, - extract::FromRequestParts, - response::IntoResponse, - routing::{MethodFilter, on}, -}; -use conduwuit::Result; -use futures::{Future, TryFutureExt}; -use http::Method; -use ruma::api::IncomingRequest; - -use super::{Ruma, RumaResponse, State}; - -pub(in super::super) trait RumaHandler { - fn add_route(&'static self, router: Router, path: &str) -> Router; - fn add_routes(&'static self, router: Router) -> Router; -} - -pub(in super::super) trait RouterExt { - fn ruma_route(self, handler: &'static H) -> Self - where - H: RumaHandler; -} - -impl RouterExt for Router { - fn ruma_route(self, handler: &'static H) -> Self - where - H: RumaHandler, - { - handler.add_routes(self) - } -} - -macro_rules! ruma_handler { - ( $($tx:ident),* $(,)? ) => { - #[allow(non_snake_case)] - impl RumaHandler<($($tx,)* Ruma,)> for Fun - where - Fun: Fn($($tx,)* Ruma,) -> Fut + Send + Sync + 'static, - Fut: Future> + Send, - Req: IncomingRequest + Send + Sync + 'static, - Err: IntoResponse + Send, - ::OutgoingResponse: Send, - $( $tx: FromRequestParts + Send + Sync + 'static, )* - { - fn add_routes(&'static self, router: Router) -> Router { - Req::METADATA - .history - .all_paths() - .fold(router, |router, path| self.add_route(router, path)) - } - - fn add_route(&'static self, router: Router, path: &str) -> Router { - let action = |$($tx,)* req| self($($tx,)* req).map_ok(RumaResponse); - let method = method_to_filter(&Req::METADATA.method); - router.route(path, on(method, action)) - } - } - } -} -ruma_handler!(); -ruma_handler!(T1); -ruma_handler!(T1, T2); -ruma_handler!(T1, T2, T3); -ruma_handler!(T1, T2, T3, T4); - -const fn method_to_filter(method: &Method) -> MethodFilter { - match *method { - | Method::DELETE => MethodFilter::DELETE, - | Method::GET => MethodFilter::GET, - | Method::HEAD => MethodFilter::HEAD, - | Method::OPTIONS => MethodFilter::OPTIONS, - | Method::PATCH => MethodFilter::PATCH, - | Method::POST => MethodFilter::POST, - | Method::PUT => MethodFilter::PUT, - | Method::TRACE => MethodFilter::TRACE, - | _ => panic!("Unsupported HTTP method"), - } -} diff --git a/src/api/router/request.rs b/src/api/router/request.rs deleted file mode 100644 index 3cdc452b..00000000 --- a/src/api/router/request.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::str; - -use axum::{RequestExt, RequestPartsExt, extract::Path}; -use bytes::Bytes; -use conduwuit::{Result, err}; -use http::request::Parts; -use serde::Deserialize; -use service::Services; - -#[derive(Deserialize)] -pub(super) struct QueryParams { - pub(super) access_token: Option, - pub(super) user_id: Option, -} - -pub(super) struct Request { - pub(super) path: Path>, - pub(super) query: QueryParams, - pub(super) body: Bytes, - pub(super) parts: Parts, -} - -pub(super) async fn from( - services: &Services, - request: hyper::Request, -) -> Result { - let limited = request.with_limited_body(); - let (mut parts, body) = limited.into_parts(); - - let path: Path> = parts.extract().await?; - let query = parts.uri.query().unwrap_or_default(); - let query = serde_html_form::from_str(query) - .map_err(|e| err!(Request(Unknown("Failed to read query parameters: {e}"))))?; - - let max_body_size = services.server.config.max_request_size; - - let body = axum::body::to_bytes(body, max_body_size) - .await - .map_err(|e| err!(Request(TooLarge("Request body too large: {e}"))))?; - - Ok(Request { path, query, body, parts }) -} diff --git a/src/api/router/response.rs b/src/api/router/response.rs deleted file mode 100644 index 03c9060e..00000000 --- a/src/api/router/response.rs +++ /dev/null @@ -1,29 +0,0 @@ -use axum::response::{IntoResponse, Response}; -use bytes::BytesMut; -use conduwuit::{Error, error}; -use http::StatusCode; -use http_body_util::Full; -use ruma::api::{OutgoingResponse, client::uiaa::UiaaResponse}; - -pub(crate) struct RumaResponse(pub(crate) T) -where - T: OutgoingResponse; - -impl From for RumaResponse { - fn from(t: Error) -> Self { Self(t.into()) } -} - -impl IntoResponse for RumaResponse -where - T: OutgoingResponse, -{ - fn into_response(self) -> Response { - self.0 - .try_into_http_response::() - .inspect_err(|e| error!("response error: {e}")) - .map_or_else( - |_| StatusCode::INTERNAL_SERVER_ERROR.into_response(), - |r| r.map(BytesMut::freeze).map(Full::new).into_response(), - ) - } -} diff --git a/src/api/router/state.rs b/src/api/router/state.rs deleted file mode 100644 index 57eb94ca..00000000 --- a/src/api/router/state.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::{ops::Deref, sync::Arc}; - -use conduwuit_service::Services; - -#[derive(Clone, Copy)] -pub struct State { - services: *const Services, -} - -pub struct Guard { - services: Arc, -} - -pub fn create(services: Arc) -> (State, Guard) { - let state = State { - services: Arc::into_raw(services.clone()), - }; - - let guard = Guard { services }; - - (state, guard) -} - -impl Drop for Guard { - fn drop(&mut self) { - let ptr = Arc::as_ptr(&self.services); - // SAFETY: Parity with Arc::into_raw() called in create(). This revivifies the - // Arc lost to State so it can be dropped, otherwise Services will leak. - let arc = unsafe { Arc::from_raw(ptr) }; - debug_assert!( - Arc::strong_count(&arc) > 1, - "Services usually has more than one reference and is not dropped here" - ); - } -} - -impl Deref for State { - type Target = Services; - - fn deref(&self) -> &Self::Target { - deref(&self.services).expect("dereferenced Services pointer in State must not be null") - } -} - -/// SAFETY: State is a thin wrapper containing a raw const pointer to Services -/// in lieu of an Arc. Services is internally threadsafe. If State contains -/// additional fields this notice should be reevaluated. -unsafe impl Send for State {} - -/// SAFETY: State is a thin wrapper containing a raw const pointer to Services -/// in lieu of an Arc. Services is internally threadsafe. If State contains -/// additional fields this notice should be reevaluated. -unsafe impl Sync for State {} - -fn deref(services: &*const Services) -> Option<&Services> { - // SAFETY: We replaced Arc with *const Services in State. This is - // worth about 10 clones (20 reference count updates) for each request handled. - // Though this is not an incredibly large quantity, it's woefully unnecessary - // given the context as explained below; though it is not currently known to be - // a performance bottleneck, the front-line position justifies preempting it. - // - // Services is created prior to the axum/tower stack and Router, and prior - // to serving any requests through the handlers in this crate. It is then - // dropped only after all requests have completed, the listening sockets - // have been closed, axum/tower has been dropped. Thus Services is - // expected to live at least as long as any instance of State, making the - // constant updates to the prior Arc unnecessary to keep Services alive. - // - // Nevertheless if it is possible to accomplish this by annotating State - // with a lifetime to hold a reference (and be aware I have made a - // significant effort trying to make this work) this unsafety may not be - // necessary. It is either very difficult or impossible to get a - // lifetime'ed reference through Router / RumaHandler; though it is - // possible to pass a reference through axum's `with_state()` in trivial - // configurations as the only requirement of a State is Clone. - unsafe { services.as_ref() } -} diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs new file mode 100644 index 00000000..3de3b842 --- /dev/null +++ b/src/api/ruma_wrapper/axum.rs @@ -0,0 +1,451 @@ +use std::{collections::BTreeMap, str}; + +use axum::{ + async_trait, + body::{Full, HttpBody}, + extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader}, + headers::{ + authorization::{Bearer, Credentials}, + Authorization, + }, + response::{IntoResponse, Response}, + BoxError, RequestExt, RequestPartsExt, +}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use http::{uri::PathAndQuery, Request, StatusCode}; +use ruma::{ + api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, + CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, +}; +use serde::Deserialize; +use tracing::{debug, error, trace, warn}; + +use super::{Ruma, RumaResponse}; +use crate::{service::appservice::RegistrationInfo, services, Error, Result}; + +enum Token { + Appservice(Box), + User((OwnedUserId, OwnedDeviceId)), + Invalid, + None, +} + +#[derive(Deserialize)] +struct QueryParams { + access_token: Option, + user_id: Option, +} + +#[async_trait] +impl FromRequest for Ruma +where + T: IncomingRequest, + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into, +{ + type Rejection = Error; + + #[allow(unused_qualifications)] // async traits + async fn from_request(req: Request, _state: &S) -> Result { + let (mut parts, mut body) = match req.with_limited_body() { + Ok(limited_req) => { + let (parts, body) = limited_req.into_parts(); + let body = to_bytes(body) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + (parts, body) + }, + Err(original_req) => { + let (parts, body) = original_req.into_parts(); + let body = to_bytes(body) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + (parts, body) + }, + }; + + let metadata = T::METADATA; + let auth_header: Option>> = parts.extract().await?; + let path_params: Path> = parts.extract().await?; + + let query = parts.uri.query().unwrap_or_default(); + let query_params: QueryParams = match serde_html_form::from_str(query) { + Ok(params) => params, + Err(e) => { + error!(%query, "Failed to deserialize query parameters: {e}"); + return Err(Error::BadRequest(ErrorKind::Unknown, "Failed to read query parameters")); + }, + }; + + let token = match &auth_header { + Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), + None => query_params.access_token.as_deref(), + }; + + let token = if let Some(token) = token { + if let Some(reg_info) = services().appservice.find_from_token(token).await { + Token::Appservice(Box::new(reg_info)) + } else if let Some((user_id, device_id)) = services().users.find_from_token(token)? { + Token::User((user_id, OwnedDeviceId::from(device_id))) + } else { + Token::Invalid + } + } else { + Token::None + }; + + if metadata.authentication == AuthScheme::None { + match parts.uri.path() { + // TODO: can we check this better? + "/_matrix/client/v3/publicRooms" | "/_matrix/client/r0/publicRooms" => { + if !services() + .globals + .config + .allow_public_room_directory_without_auth + { + match token { + Token::Appservice(_) | Token::User(_) => { + // we should have validated the token above + // already + }, + Token::None | Token::Invalid => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing or invalid access token.", + )); + }, + } + } + }, + _ => {}, + }; + } + + let mut json_body = serde_json::from_slice::(&body).ok(); + + let (sender_user, sender_device, sender_servername, appservice_info) = match (metadata.authentication, token) { + (_, Token::Invalid) => { + return Err(Error::BadRequest( + ErrorKind::UnknownToken { + soft_logout: false, + }, + "Unknown access token.", + )) + }, + (AuthScheme::AccessToken | AuthScheme::AccessTokenOptional, Token::Appservice(info)) => { + let user_id = query_params + .user_id + .map_or_else( + || { + UserId::parse_with_server_name( + info.registration.sender_localpart.as_str(), + services().globals.server_name(), + ) + }, + UserId::parse, + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); + } + + if !services().users.exists(&user_id)? { + return Err(Error::BadRequest(ErrorKind::forbidden(), "User does not exist.")); + } + + (Some(user_id), None, None, Some(*info)) + }, + (AuthScheme::None | AuthScheme::AppserviceToken, Token::Appservice(info)) => { + (None, None, None, Some(*info)) + }, + (AuthScheme::AccessToken, Token::None) => { + return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing access token.")); + }, + ( + AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None, + Token::User((user_id, device_id)), + ) => (Some(user_id), Some(device_id), None, None), + (AuthScheme::ServerSignatures, Token::None) => { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let TypedHeader(Authorization(x_matrix)) = parts + .extract::>>() + .await + .map_err(|e| { + warn!("Missing or invalid Authorization header: {e}"); + + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => "Missing Authorization header.", + TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.", + _ => "Unknown header-related error", + }; + + Error::BadRequest(ErrorKind::forbidden(), msg) + })?; + + let origin_signatures = + BTreeMap::from_iter([(x_matrix.key.clone(), CanonicalJsonValue::String(x_matrix.sig))]); + + let signatures = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + )]); + + let server_destination = services().globals.server_name().as_str().to_owned(); + + if let Some(destination) = x_matrix.destination.as_ref() { + if destination != &server_destination { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Invalid authorization.")); + } + } + + let signature_uri = CanonicalJsonValue::String( + parts + .uri + .path_and_query() + .unwrap_or(&PathAndQuery::from_static("/")) + .to_string(), + ); + + let mut request_map = BTreeMap::from_iter([ + ("method".to_owned(), CanonicalJsonValue::String(parts.method.to_string())), + ("uri".to_owned(), signature_uri), + ( + "origin".to_owned(), + CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), + ), + ("destination".to_owned(), CanonicalJsonValue::String(server_destination)), + ("signatures".to_owned(), CanonicalJsonValue::Object(signatures)), + ]); + + if let Some(json_body) = &json_body { + request_map.insert("content".to_owned(), json_body.clone()); + }; + + let keys_result = services() + .rooms + .event_handler + .fetch_signing_keys_for_server(&x_matrix.origin, vec![x_matrix.key.clone()]) + .await; + + let keys = keys_result.map_err(|e| { + warn!("Failed to fetch signing keys: {e}"); + Error::BadRequest(ErrorKind::forbidden(), "Failed to fetch signing keys.") + })?; + + let pub_key_map = BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); + + match ruma::signatures::verify_json(&pub_key_map, &request_map) { + Ok(()) => (None, None, Some(x_matrix.origin), None), + Err(e) => { + warn!("Failed to verify json request from {}: {e}\n{request_map:?}", x_matrix.origin); + + if parts.uri.to_string().contains('@') { + warn!( + "Request uri contained '@' character. Make sure your reverse proxy gives Conduit the \ + raw uri (apache: use nocanon)" + ); + } + + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Failed to verify X-Matrix signatures.", + )); + }, + } + }, + (AuthScheme::None | AuthScheme::AppserviceToken | AuthScheme::AccessTokenOptional, Token::None) => { + (None, None, None, None) + }, + (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => { + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Only server signatures should be used on this endpoint.", + )); + }, + (AuthScheme::AppserviceToken, Token::User(_)) => { + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Only appservice access tokens should be used on this endpoint.", + )); + }, + }; + + let mut http_request = Request::builder().uri(parts.uri).method(parts.method); + *http_request.headers_mut().unwrap() = parts.headers; + + if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { + let user_id = sender_user.clone().unwrap_or_else(|| { + UserId::parse_with_server_name("", services().globals.server_name()).expect("we know this is valid") + }); + + let uiaa_request = json_body + .get("auth") + .and_then(|auth| auth.as_object()) + .and_then(|auth| auth.get("session")) + .and_then(|session| session.as_str()) + .and_then(|session| { + services().uiaa.get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) + }); + + if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { + for (key, value) in initial_request { + json_body.entry(key).or_insert(value); + } + } + + let mut buf = BytesMut::new().writer(); + serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail"); + body = buf.into_inner().freeze(); + } + + let http_request = http_request.body(&*body).unwrap(); + debug!( + "{:?} {:?} {:?}", + http_request.method(), + http_request.uri(), + http_request.headers() + ); + + trace!("{:?} {:?} {:?}", http_request.method(), http_request.uri(), json_body); + let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { + warn!("try_from_http_request failed: {e:?}\nPath parameters: {path_params:?}",); + debug!("JSON body: {:?}", json_body); + Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") + })?; + + Ok(Ruma { + body, + sender_user, + sender_device, + sender_servername, + json_body, + appservice_info, + }) + } +} + +struct XMatrix { + origin: OwnedServerName, + destination: Option, + key: String, // KeyName? + sig: String, +} + +impl Credentials for XMatrix { + const SCHEME: &'static str = "X-Matrix"; + + fn decode(value: &http::HeaderValue) -> Option { + debug_assert!( + value.as_bytes().starts_with(b"X-Matrix "), + "HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}", + ); + + let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) + .ok()? + .trim_start(); + + let mut origin = None; + let mut destination = None; + let mut key = None; + let mut sig = None; + + for entry in parameters.split_terminator(',') { + let (name, value) = entry.split_once('=')?; + + // It's not at all clear why some fields are quoted and others not in the spec, + // let's simply accept either form for every field. + let value = value + .strip_prefix('"') + .and_then(|rest| rest.strip_suffix('"')) + .unwrap_or(value); + + // FIXME: Catch multiple fields of the same name + match name { + "origin" => origin = Some(value.try_into().ok()?), + "key" => key = Some(value.to_owned()), + "sig" => sig = Some(value.to_owned()), + "destination" => destination = Some(value.to_owned()), + _ => debug!("Unexpected field `{name}` in X-Matrix Authorization header"), + } + } + + Some(Self { + origin: origin?, + key: key?, + sig: sig?, + destination, + }) + } + + fn encode(&self) -> http::HeaderValue { todo!() } +} + +impl IntoResponse for RumaResponse { + fn into_response(self) -> Response { + match self.0.try_into_http_response::() { + Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), + Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), + } + } +} + +// copied from hyper under the following license: +// Copyright (c) 2014-2021 Sean McArthur + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +pub(crate) async fn to_bytes(body: T) -> Result +where + T: HttpBody, +{ + futures_util::pin_mut!(body); + + // If there's only 1 chunk, we can just return Buf::to_bytes() + let mut first = if let Some(buf) = body.data().await { + buf? + } else { + return Ok(Bytes::new()); + }; + + let second = if let Some(buf) = body.data().await { + buf? + } else { + return Ok(first.copy_to_bytes(first.remaining())); + }; + + // With more than 1 buf, we gotta flatten into a Vec first. + let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; + let mut vec = Vec::with_capacity(cap); + vec.put(first); + vec.put(second); + + while let Some(buf) = body.data().await { + vec.put(buf?); + } + + Ok(vec.into()) +} diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs new file mode 100644 index 00000000..0dd3a617 --- /dev/null +++ b/src/api/ruma_wrapper/mod.rs @@ -0,0 +1,35 @@ +use std::ops::Deref; + +use ruma::{api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId}; + +use crate::{service::appservice::RegistrationInfo, Error}; + +mod axum; + +/// Extractor for Ruma request structs +pub struct Ruma { + pub body: T, + pub sender_user: Option, + pub sender_device: Option, + pub sender_servername: Option, + // This is None when body is not a valid string + pub json_body: Option, + pub appservice_info: Option, +} + +impl Deref for Ruma { + type Target = T; + + fn deref(&self) -> &Self::Target { &self.body } +} + +#[derive(Clone)] +pub struct RumaResponse(pub T); + +impl From for RumaResponse { + fn from(t: T) -> Self { Self(t) } +} + +impl From for RumaResponse { + fn from(t: Error) -> Self { t.to_response() } +} diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs deleted file mode 100644 index 3cfbcedc..00000000 --- a/src/api/server/backfill.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::cmp; - -use axum::extract::State; -use conduwuit::{ - PduCount, Result, - utils::{IterStream, ReadyExt, stream::TryTools}, -}; -use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill}; - -use super::AccessCheck; -use crate::Ruma; - -/// arbitrary number but synapse's is 100 and we can handle lots of these -/// anyways -const LIMIT_MAX: usize = 150; -/// no spec defined number but we can handle a lot of these -const LIMIT_DEFAULT: usize = 50; - -/// # `GET /_matrix/federation/v1/backfill/` -/// -/// Retrieves events from before the sender joined the room, if the room's -/// history visibility allows. -pub(crate) async fn get_backfill_route( - State(services): State, - ref body: Ruma, -) -> Result { - AccessCheck { - services: &services, - origin: body.origin(), - room_id: &body.room_id, - event_id: None, - } - .check() - .await?; - - let limit = body - .limit - .try_into() - .unwrap_or(LIMIT_DEFAULT) - .min(LIMIT_MAX); - - let from = body - .v - .iter() - .stream() - .filter_map(|event_id| { - services - .rooms - .timeline - .get_pdu_count(event_id) - .map(Result::ok) - }) - .ready_fold(PduCount::min(), cmp::max) - .await; - - Ok(get_backfill::v1::Response { - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - - origin: services.globals.server_name().to_owned(), - - pdus: services - .rooms - .timeline - .pdus_rev(None, &body.room_id, Some(from.saturating_add(1))) - .try_take(limit) - .try_filter_map(|(_, pdu)| async move { - Ok(services - .rooms - .state_accessor - .server_can_see_event(body.origin(), &pdu.room_id, &pdu.event_id) - .await - .then_some(pdu)) - }) - .try_filter_map(|pdu| async move { - Ok(services - .rooms - .timeline - .get_pdu_json(&pdu.event_id) - .await - .ok()) - }) - .and_then(|pdu| { - services - .sending - .convert_to_outgoing_federation_event(pdu) - .map(Ok) - }) - .try_collect() - .await?, - }) -} diff --git a/src/api/server/event.rs b/src/api/server/event.rs deleted file mode 100644 index 5846c6d7..00000000 --- a/src/api/server/event.rs +++ /dev/null @@ -1,49 +0,0 @@ -use axum::extract::State; -use conduwuit::{Result, err}; -use ruma::{MilliSecondsSinceUnixEpoch, RoomId, api::federation::event::get_event}; - -use super::AccessCheck; -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/event/{eventId}` -/// -/// Retrieves a single event from the server. -/// -/// - Only works if a user of this server is currently invited or joined the -/// room -pub(crate) async fn get_event_route( - State(services): State, - body: Ruma, -) -> Result { - let event = services - .rooms - .timeline - .get_pdu_json(&body.event_id) - .await - .map_err(|_| err!(Request(NotFound("Event not found."))))?; - - let room_id: &RoomId = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| err!(Database("Invalid event in database.")))? - .try_into() - .map_err(|_| err!(Database("Invalid room_id in event in database.")))?; - - AccessCheck { - services: &services, - origin: body.origin(), - room_id, - event_id: Some(&body.event_id), - } - .check() - .await?; - - Ok(get_event::v1::Response { - origin: services.globals.server_name().to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdu: services - .sending - .convert_to_outgoing_federation_event(event) - .await, - }) -} diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs deleted file mode 100644 index c9e210f5..00000000 --- a/src/api/server/event_auth.rs +++ /dev/null @@ -1,58 +0,0 @@ -use std::{borrow::Borrow, iter::once}; - -use axum::extract::State; -use conduwuit::{Error, Result, utils::stream::ReadyExt}; -use futures::StreamExt; -use ruma::{ - RoomId, - api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, -}; - -use super::AccessCheck; -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` -/// -/// Retrieves the auth chain for a given event. -/// -/// - This does not include the event itself -pub(crate) async fn get_event_authorization_route( - State(services): State, - body: Ruma, -) -> Result { - AccessCheck { - services: &services, - origin: body.origin(), - room_id: &body.room_id, - event_id: None, - } - .check() - .await?; - - let event = services - .rooms - .timeline - .get_pdu_json(&body.event_id) - .await - .map_err(|_| Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; - - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database."))?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; - - let auth_chain = services - .rooms - .auth_chain - .event_ids_iter(room_id, once(body.event_id.borrow())) - .ready_filter_map(Result::ok) - .filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() }) - .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) - .collect() - .await; - - Ok(get_event_authorization::v1::Response { auth_chain }) -} diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs deleted file mode 100644 index 04dc30ed..00000000 --- a/src/api/server/get_missing_events.rs +++ /dev/null @@ -1,90 +0,0 @@ -use axum::extract::State; -use conduwuit::{Result, debug, debug_error, utils::to_canonical_object}; -use ruma::api::federation::event::get_missing_events; - -use super::AccessCheck; -use crate::Ruma; - -/// arbitrary number but synapse's is 20 and we can handle lots of these anyways -const LIMIT_MAX: usize = 50; -/// spec says default is 10 -const LIMIT_DEFAULT: usize = 10; - -/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` -/// -/// Retrieves events that the sender is missing. -pub(crate) async fn get_missing_events_route( - State(services): State, - body: Ruma, -) -> Result { - AccessCheck { - services: &services, - origin: body.origin(), - room_id: &body.room_id, - event_id: None, - } - .check() - .await?; - - let limit = body - .limit - .try_into() - .unwrap_or(LIMIT_DEFAULT) - .min(LIMIT_MAX); - - let mut queued_events = body.latest_events.clone(); - // the vec will never have more entries the limit - let mut events = Vec::with_capacity(limit); - - let mut i: usize = 0; - while i < queued_events.len() && events.len() < limit { - let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { - debug!( - ?body.origin, - "Event {} does not exist locally, skipping", &queued_events[i] - ); - i = i.saturating_add(1); - continue; - }; - - if body.earliest_events.contains(&queued_events[i]) { - i = i.saturating_add(1); - continue; - } - - if !services - .rooms - .state_accessor - .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) - .await - { - debug!( - ?body.origin, - "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id - ); - i = i.saturating_add(1); - continue; - } - - let Ok(event) = to_canonical_object(&pdu) else { - debug_error!( - ?body.origin, - "Failed to convert PDU in database to canonical JSON: {pdu:?}" - ); - i = i.saturating_add(1); - continue; - }; - - let prev_events = pdu.prev_events.iter().map(ToOwned::to_owned); - - let event = services - .sending - .convert_to_outgoing_federation_event(event) - .await; - - queued_events.extend(prev_events); - events.push(event); - } - - Ok(get_missing_events::v1::Response { events }) -} diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs deleted file mode 100644 index 42c348f9..00000000 --- a/src/api/server/hierarchy.rs +++ /dev/null @@ -1,74 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Err, Result, - utils::stream::{BroadbandExt, IterStream}, -}; -use conduwuit_service::rooms::spaces::{ - Identifier, SummaryAccessibility, get_parent_children_via, -}; -use futures::{FutureExt, StreamExt}; -use ruma::api::federation::space::get_hierarchy; - -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/hierarchy/{roomId}` -/// -/// Gets the space tree in a depth-first manner to locate child rooms of a given -/// space. -pub(crate) async fn get_hierarchy_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.rooms.metadata.exists(&body.room_id).await { - return Err!(Request(NotFound("Room does not exist."))); - } - - let room_id = &body.room_id; - let suggested_only = body.suggested_only; - let ref identifier = Identifier::ServerName(body.origin()); - match services - .rooms - .spaces - .get_summary_and_children_local(room_id, identifier) - .await? - { - | None => Err!(Request(NotFound("The requested room was not found"))), - - | Some(SummaryAccessibility::Inaccessible) => { - Err!(Request(NotFound("The requested room is inaccessible"))) - }, - - | Some(SummaryAccessibility::Accessible(room)) => { - let (children, inaccessible_children) = - get_parent_children_via(&room, suggested_only) - .stream() - .broad_filter_map(|(child, _via)| async move { - match services - .rooms - .spaces - .get_summary_and_children_local(&child, identifier) - .await - .ok()? - { - | None => None, - - | Some(SummaryAccessibility::Inaccessible) => - Some((None, Some(child))), - - | Some(SummaryAccessibility::Accessible(summary)) => - Some((Some(summary), None)), - } - }) - .unzip() - .map(|(children, inaccessible_children): (Vec<_>, Vec<_>)| { - ( - children.into_iter().flatten().map(Into::into).collect(), - inaccessible_children.into_iter().flatten().collect(), - ) - }) - .await; - - Ok(get_hierarchy::v1::Response { room, children, inaccessible_children }) - }, - } -} diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs deleted file mode 100644 index f53e1a15..00000000 --- a/src/api/server/invite.rs +++ /dev/null @@ -1,166 +0,0 @@ -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{ - Err, Error, PduEvent, Result, err, pdu::gen_event_id, utils, utils::hash::sha256, warn, -}; -use ruma::{ - CanonicalJsonValue, OwnedUserId, UserId, - api::{client::error::ErrorKind, federation::membership::create_invite}, - events::room::member::{MembershipState, RoomMemberEventContent}, - serde::JsonObject, -}; - -use crate::Ruma; - -/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` -/// -/// Invites a remote user to a room. -#[tracing::instrument(skip_all, fields(%client), name = "invite")] -pub(crate) async fn create_invite_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - // ACL check origin - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if !services.server.supported_room_version(&body.room_version) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone() }, - "Server does not support this room version.", - )); - } - - if let Some(server) = body.room_id.server_name() { - if services.moderation.is_remote_server_forbidden(server) { - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - } - - if services - .moderation - .is_remote_server_forbidden(body.origin()) - { - warn!( - "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", - body.origin(), - body.room_id - ); - - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - - let mut signed_event = utils::to_canonical_object(&body.event) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; - - let invited_user: OwnedUserId = signed_event - .get("state_key") - .try_into() - .map(UserId::to_owned) - .map_err(|e| err!(Request(InvalidParam("Invalid state_key property: {e}"))))?; - - if !services.globals.server_is_ours(invited_user.server_name()) { - return Err!(Request(InvalidParam("User does not belong to this homeserver."))); - } - - // Make sure we're not ACL'ed from their room. - services - .rooms - .event_handler - .acl_check(invited_user.server_name(), &body.room_id) - .await?; - - services - .server_keys - .hash_and_sign_event(&mut signed_event, &body.room_version) - .map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?; - - // Generate event id - let event_id = gen_event_id(&signed_event, &body.room_version)?; - - // Add event_id back - signed_event.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.to_string())); - - let sender: &UserId = signed_event - .get("sender") - .try_into() - .map_err(|e| err!(Request(InvalidParam("Invalid sender property: {e}"))))?; - - if services.rooms.metadata.is_banned(&body.room_id).await - && !services.users.is_admin(&invited_user).await - { - return Err!(Request(Forbidden("This room is banned on this homeserver."))); - } - - if services.config.block_non_admin_invites && !services.users.is_admin(&invited_user).await { - return Err!(Request(Forbidden("This server does not allow room invites."))); - } - - let mut invite_state = body.invite_room_state.clone(); - - let mut event: JsonObject = serde_json::from_str(body.event.get()) - .map_err(|e| err!(Request(BadJson("Invalid invite event PDU: {e}"))))?; - - event.insert("event_id".to_owned(), "$placeholder".into()); - - let pdu: PduEvent = serde_json::from_value(event.into()) - .map_err(|e| err!(Request(BadJson("Invalid invite event PDU: {e}"))))?; - - invite_state.push(pdu.to_stripped_state_event()); - - // If we are active in the room, the remote server will notify us about the - // join/invite through /send. If we are not in the room, we need to manually - // record the invited state for client /sync through update_membership(), and - // send the invite PDU to the relevant appservices. - if !services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), &body.room_id) - .await - { - services - .rooms - .state_cache - .update_membership( - &body.room_id, - &invited_user, - RoomMemberEventContent::new(MembershipState::Invite), - sender, - Some(invite_state), - body.via.clone(), - true, - ) - .await?; - - for appservice in services.appservice.read().await.values() { - if appservice.is_user_match(&invited_user) { - services - .sending - .send_appservice_request( - appservice.registration.clone(), - ruma::api::appservice::event::push_events::v1::Request { - events: vec![pdu.to_room_event()], - txn_id: general_purpose::URL_SAFE_NO_PAD - .encode(sha256::hash(pdu.event_id.as_bytes())) - .into(), - ephemeral: Vec::new(), - to_device: Vec::new(), - }, - ) - .await?; - } - } - } - - Ok(create_invite::v2::Response { - event: services - .sending - .convert_to_outgoing_federation_event(signed_event) - .await, - }) -} diff --git a/src/api/server/key.rs b/src/api/server/key.rs deleted file mode 100644 index f9bd0926..00000000 --- a/src/api/server/key.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::{ - mem::take, - time::{Duration, SystemTime}, -}; - -use axum::{Json, extract::State, response::IntoResponse}; -use conduwuit::{Result, utils::timepoint_from_now}; -use ruma::{ - MilliSecondsSinceUnixEpoch, Signatures, - api::{ - OutgoingResponse, - federation::discovery::{OldVerifyKey, ServerSigningKeys, get_server_keys}, - }, - serde::Raw, -}; - -/// # `GET /_matrix/key/v2/server` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by -/// this will be valid forever. -// Response type for this endpoint is Json because we need to calculate a -// signature for the response -pub(crate) async fn get_server_keys_route( - State(services): State, -) -> Result { - let server_name = services.globals.server_name(); - let active_key_id = services.server_keys.active_key_id(); - let mut all_keys = services.server_keys.verify_keys_for(server_name).await; - - let verify_keys = all_keys - .remove_entry(active_key_id) - .expect("active verify_key is missing"); - - let old_verify_keys = all_keys - .into_iter() - .map(|(id, key)| (id, OldVerifyKey::new(expires_ts(), key.key))) - .collect(); - - let server_key = ServerSigningKeys { - verify_keys: [verify_keys].into(), - old_verify_keys, - server_name: server_name.to_owned(), - valid_until_ts: valid_until_ts(), - signatures: Signatures::new(), - }; - - let server_key = Raw::new(&server_key)?; - let mut response = get_server_keys::v2::Response::new(server_key) - .try_into_http_response::>() - .map(|mut response| take(response.body_mut())) - .and_then(|body| serde_json::from_slice(&body).map_err(Into::into))?; - - services.server_keys.sign_json(&mut response)?; - - Ok(Json(response)) -} - -fn valid_until_ts() -> MilliSecondsSinceUnixEpoch { - let dur = Duration::from_secs(86400 * 7); - let timepoint = timepoint_from_now(dur).expect("SystemTime should not overflow"); - MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") -} - -fn expires_ts() -> MilliSecondsSinceUnixEpoch { - let timepoint = SystemTime::now(); - MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") -} - -/// # `GET /_matrix/key/v2/server/{keyId}` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by -/// this will be valid forever. -pub(crate) async fn get_server_keys_deprecated_route( - State(services): State, -) -> impl IntoResponse { - get_server_keys_route(State(services)).await -} diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs deleted file mode 100644 index 3204c30c..00000000 --- a/src/api/server/make_join.rs +++ /dev/null @@ -1,218 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn, -}; -use conduwuit_service::Services; -use futures::StreamExt; -use ruma::{ - CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, - api::{client::error::ErrorKind, federation::membership::prepare_join_event}, - events::{ - StateEventType, - room::{ - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - }, -}; -use serde_json::value::to_raw_value; - -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` -/// -/// Creates a join template. -pub(crate) async fn create_join_event_template_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.rooms.metadata.exists(&body.room_id).await { - return Err!(Request(NotFound("Room is unknown to this server."))); - } - - if body.user_id.server_name() != body.origin() { - return Err!(Request(BadJson("Not allowed to join on behalf of another server/user."))); - } - - // ACL check origin server - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if services - .moderation - .is_remote_server_forbidden(body.origin()) - { - warn!( - "Server {} for remote user {} tried joining room ID {} which has a server name that \ - is globally forbidden. Rejecting.", - body.origin(), - &body.user_id, - &body.room_id, - ); - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - - if let Some(server) = body.room_id.server_name() { - if services.moderation.is_remote_server_forbidden(server) { - return Err!(Request(Forbidden(warn!( - "Room ID server name {server} is banned on this homeserver." - )))); - } - } - - let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; - if !body.ver.contains(&room_version_id) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, - "Room version not supported.", - )); - } - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let join_authorized_via_users_server: Option = { - use RoomVersionId::*; - if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { - // room version does not support restricted join rules - None - } else if user_can_perform_restricted_join( - &services, - &body.user_id, - &body.room_id, - &room_version_id, - ) - .await? - { - let Some(auth_user) = services - .rooms - .state_cache - .local_users_in_room(&body.room_id) - .filter(|user| { - services.rooms.state_accessor.user_can_invite( - &body.room_id, - user, - &body.user_id, - &state_lock, - ) - }) - .boxed() - .next() - .await - .map(ToOwned::to_owned) - else { - return Err!(Request(UnableToGrantJoin( - "No user on this server is able to assist in joining." - ))); - }; - Some(auth_user) - } else { - None - } - }; - - let (_pdu, mut pdu_json) = services - .rooms - .timeline - .create_hash_and_sign_event( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { - join_authorized_via_users_server, - ..RoomMemberEventContent::new(MembershipState::Join) - }), - &body.user_id, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - // room v3 and above removed the "event_id" field from remote PDU format - maybe_strip_event_id(&mut pdu_json, &room_version_id)?; - - Ok(prepare_join_event::v1::Response { - room_version: Some(room_version_id), - event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - }) -} - -/// Checks whether the given user can join the given room via a restricted join. -pub(crate) async fn user_can_perform_restricted_join( - services: &Services, - user_id: &UserId, - room_id: &RoomId, - room_version_id: &RoomVersionId, -) -> Result { - use RoomVersionId::*; - - // restricted rooms are not supported on <=v7 - if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { - return Ok(false); - } - - if services.rooms.state_cache.is_joined(user_id, room_id).await { - // joining user is already joined, there is nothing we need to do - return Ok(false); - } - - let Ok(join_rules_event_content) = services - .rooms - .state_accessor - .room_state_get_content::( - room_id, - &StateEventType::RoomJoinRules, - "", - ) - .await - else { - return Ok(false); - }; - - let (JoinRule::Restricted(r) | JoinRule::KnockRestricted(r)) = - join_rules_event_content.join_rule - else { - return Ok(false); - }; - - if r.allow.is_empty() { - debug_info!("{room_id} is restricted but the allow key is empty"); - return Ok(false); - } - - if r.allow - .iter() - .filter_map(|rule| { - if let AllowRule::RoomMembership(membership) = rule { - Some(membership) - } else { - None - } - }) - .stream() - .any(|m| services.rooms.state_cache.is_joined(user_id, &m.room_id)) - .await - { - Ok(true) - } else { - Err!(Request(UnableToAuthorizeJoin( - "Joining user is not known to be in any required room." - ))) - } -} - -pub(crate) fn maybe_strip_event_id( - pdu_json: &mut CanonicalJsonObject, - room_version_id: &RoomVersionId, -) -> Result { - use RoomVersionId::*; - - match room_version_id { - | V1 | V2 => Ok(()), - | _ => { - pdu_json.remove("event_id"); - Ok(()) - }, - } -} diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs deleted file mode 100644 index 423c8e81..00000000 --- a/src/api/server/make_knock.rs +++ /dev/null @@ -1,112 +0,0 @@ -use RoomVersionId::*; -use axum::extract::State; -use conduwuit::{Err, Error, Result, debug_warn, matrix::pdu::PduBuilder, warn}; -use ruma::{ - RoomVersionId, - api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, - events::room::member::{MembershipState, RoomMemberEventContent}, -}; -use serde_json::value::to_raw_value; - -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` -/// -/// Creates a knock template. -pub(crate) async fn create_knock_event_template_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.rooms.metadata.exists(&body.room_id).await { - return Err!(Request(NotFound("Room is unknown to this server."))); - } - - if body.user_id.server_name() != body.origin() { - return Err!(Request(BadJson("Not allowed to knock on behalf of another server/user."))); - } - - // ACL check origin server - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - if services - .moderation - .is_remote_server_forbidden(body.origin()) - { - warn!( - "Server {} for remote user {} tried knocking room ID {} which has a server name \ - that is globally forbidden. Rejecting.", - body.origin(), - &body.user_id, - &body.room_id, - ); - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - - if let Some(server) = body.room_id.server_name() { - if services.moderation.is_remote_server_forbidden(server) { - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - } - - let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; - - if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, - "Room version does not support knocking.", - )); - } - - if !body.ver.contains(&room_version_id) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { room_version: room_version_id }, - "Your homeserver does not support the features required to knock on this room.", - )); - } - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - if let Ok(membership) = services - .rooms - .state_accessor - .get_member(&body.room_id, &body.user_id) - .await - { - if membership.membership == MembershipState::Ban { - debug_warn!( - "Remote user {} is banned from {} but attempted to knock", - &body.user_id, - &body.room_id - ); - return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); - } - } - - let (_pdu, mut pdu_json) = services - .rooms - .timeline - .create_hash_and_sign_event( - PduBuilder::state( - body.user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Knock), - ), - &body.user_id, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - // room v3 and above removed the "event_id" field from remote PDU format - super::maybe_strip_event_id(&mut pdu_json, &room_version_id)?; - - Ok(create_knock_event_template::v1::Response { - room_version: room_version_id, - event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - }) -} diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs deleted file mode 100644 index cb6bd2fa..00000000 --- a/src/api/server/make_leave.rs +++ /dev/null @@ -1,62 +0,0 @@ -use axum::extract::State; -use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; -use ruma::{ - api::federation::membership::prepare_leave_event, - events::room::member::{MembershipState, RoomMemberEventContent}, -}; -use serde_json::value::to_raw_value; - -use super::make_join::maybe_strip_event_id; -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` -/// -/// Creates a leave template. -pub(crate) async fn create_leave_event_template_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.rooms.metadata.exists(&body.room_id).await { - return Err!(Request(NotFound("Room is unknown to this server."))); - } - - if body.user_id.server_name() != body.origin() { - return Err!(Request(Forbidden( - "Not allowed to leave on behalf of another server/user." - ))); - } - - // ACL check origin - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let (_pdu, mut pdu_json) = services - .rooms - .timeline - .create_hash_and_sign_event( - PduBuilder::state( - body.user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Leave), - ), - &body.user_id, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - // room v3 and above removed the "event_id" field from remote PDU format - maybe_strip_event_id(&mut pdu_json, &room_version_id)?; - - Ok(prepare_leave_event::v1::Response { - room_version: Some(room_version_id), - event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - }) -} diff --git a/src/api/server/media.rs b/src/api/server/media.rs deleted file mode 100644 index cbe8595b..00000000 --- a/src/api/server/media.rs +++ /dev/null @@ -1,97 +0,0 @@ -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Result, utils::content_disposition::make_content_disposition}; -use conduwuit_service::media::{Dim, FileMeta}; -use ruma::{ - Mxc, - api::federation::authenticated_media::{ - Content, ContentMetadata, FileOrLocation, get_content, get_content_thumbnail, - }, -}; - -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/media/download/{mediaId}` -/// -/// Load media from our server. -#[tracing::instrument( - name = "media_get", - level = "debug", - skip_all, - fields(%client) -)] -pub(crate) async fn get_content_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let mxc = Mxc { - server_name: services.globals.server_name(), - media_id: &body.media_id, - }; - - let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - else { - return Err!(Request(NotFound("Media not found."))); - }; - - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); - let content = Content { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - }; - - Ok(get_content::v1::Response { - content: FileOrLocation::File(content), - metadata: ContentMetadata::new(), - }) -} - -/// # `GET /_matrix/federation/v1/media/thumbnail/{mediaId}` -/// -/// Load media thumbnail from our server. -#[tracing::instrument( - name = "media_thumbnail_get", - level = "debug", - skip_all, - fields(%client) -)] -pub(crate) async fn get_content_thumbnail_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - let mxc = Mxc { - server_name: services.globals.server_name(), - media_id: &body.media_id, - }; - - let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get_thumbnail(&mxc, &dim).await? - else { - return Err!(Request(NotFound("Media not found."))); - }; - - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); - let content = Content { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - }; - - Ok(get_content_thumbnail::v1::Response { - content: FileOrLocation::File(content), - metadata: ContentMetadata::new(), - }) -} diff --git a/src/api/server/mod.rs b/src/api/server/mod.rs deleted file mode 100644 index 5c1ff3f7..00000000 --- a/src/api/server/mod.rs +++ /dev/null @@ -1,50 +0,0 @@ -pub(super) mod backfill; -pub(super) mod event; -pub(super) mod event_auth; -pub(super) mod get_missing_events; -pub(super) mod hierarchy; -pub(super) mod invite; -pub(super) mod key; -pub(super) mod make_join; -pub(super) mod make_knock; -pub(super) mod make_leave; -pub(super) mod media; -pub(super) mod openid; -pub(super) mod publicrooms; -pub(super) mod query; -pub(super) mod send; -pub(super) mod send_join; -pub(super) mod send_knock; -pub(super) mod send_leave; -pub(super) mod state; -pub(super) mod state_ids; -pub(super) mod user; -pub(super) mod version; -pub(super) mod well_known; - -pub(super) use backfill::*; -pub(super) use event::*; -pub(super) use event_auth::*; -pub(super) use get_missing_events::*; -pub(super) use hierarchy::*; -pub(super) use invite::*; -pub(super) use key::*; -pub(super) use make_join::*; -pub(super) use make_knock::*; -pub(super) use make_leave::*; -pub(super) use media::*; -pub(super) use openid::*; -pub(super) use publicrooms::*; -pub(super) use query::*; -pub(super) use send::*; -pub(super) use send_join::*; -pub(super) use send_knock::*; -pub(super) use send_leave::*; -pub(super) use state::*; -pub(super) use state_ids::*; -pub(super) use user::*; -pub(super) use version::*; -pub(super) use well_known::*; - -mod utils; -use utils::AccessCheck; diff --git a/src/api/server/openid.rs b/src/api/server/openid.rs deleted file mode 100644 index a09cd7ad..00000000 --- a/src/api/server/openid.rs +++ /dev/null @@ -1,20 +0,0 @@ -use axum::extract::State; -use conduwuit::Result; -use ruma::api::federation::openid::get_openid_userinfo; - -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/openid/userinfo` -/// -/// Get information about the user that generated the OpenID token. -pub(crate) async fn get_openid_userinfo_route( - State(services): State, - body: Ruma, -) -> Result { - Ok(get_openid_userinfo::v1::Response::new( - services - .users - .find_from_openid_token(&body.access_token) - .await?, - )) -} diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs deleted file mode 100644 index cf66ea71..00000000 --- a/src/api/server/publicrooms.rs +++ /dev/null @@ -1,87 +0,0 @@ -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{Error, Result}; -use ruma::{ - api::{ - client::error::ErrorKind, - federation::directory::{get_public_rooms, get_public_rooms_filtered}, - }, - directory::Filter, -}; - -use crate::Ruma; - -/// # `POST /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -#[tracing::instrument(name = "publicrooms", level = "debug", skip_all, fields(%client))] -pub(crate) async fn get_public_rooms_filtered_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - if !services - .server - .config - .allow_public_room_directory_over_federation - { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Room directory is not public")); - } - - let response = crate::client::get_public_rooms_filtered_helper( - &services, - None, - body.limit, - body.since.as_deref(), - &body.filter, - &body.room_network, - ) - .await - .map_err(|_| { - Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") - })?; - - Ok(get_public_rooms_filtered::v1::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }) -} - -/// # `GET /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -#[tracing::instrument(name = "publicrooms", level = "debug", skip_all, fields(%client))] -pub(crate) async fn get_public_rooms_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - if !services - .globals - .allow_public_room_directory_over_federation() - { - return Err(Error::BadRequest(ErrorKind::forbidden(), "Room directory is not public")); - } - - let response = crate::client::get_public_rooms_filtered_helper( - &services, - None, - body.limit, - body.since.as_deref(), - &Filter::default(), - &body.room_network, - ) - .await - .map_err(|_| { - Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") - })?; - - Ok(get_public_rooms::v1::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }) -} diff --git a/src/api/server/query.rs b/src/api/server/query.rs deleted file mode 100644 index 9d4fcf73..00000000 --- a/src/api/server/query.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::collections::BTreeMap; - -use axum::extract::State; -use conduwuit::{Error, Result, err}; -use futures::StreamExt; -use get_profile_information::v1::ProfileField; -use rand::seq::SliceRandom; -use ruma::{ - OwnedServerName, - api::{ - client::error::ErrorKind, - federation::query::{get_profile_information, get_room_information}, - }, -}; - -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/query/directory` -/// -/// Resolve a room alias to a room id. -pub(crate) async fn get_room_information_route( - State(services): State, - body: Ruma, -) -> Result { - let room_id = services - .rooms - .alias - .resolve_local_alias(&body.room_alias) - .await - .map_err(|_| err!(Request(NotFound("Room alias not found."))))?; - - let mut servers: Vec = services - .rooms - .state_cache - .room_servers(&room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - servers.sort_unstable(); - servers.dedup(); - - servers.shuffle(&mut rand::thread_rng()); - - // insert our server as the very first choice if in list - if let Some(server_index) = servers - .iter() - .position(|server| server == services.globals.server_name()) - { - servers.swap_remove(server_index); - servers.insert(0, services.globals.server_name().to_owned()); - } - - Ok(get_room_information::v1::Response { room_id, servers }) -} - -/// # `GET /_matrix/federation/v1/query/profile` -/// -/// -/// Gets information on a profile. -pub(crate) async fn get_profile_information_route( - State(services): State, - body: Ruma, -) -> Result { - if !services - .server - .config - .allow_inbound_profile_lookup_federation_requests - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Profile lookup over federation is not allowed on this homeserver.", - )); - } - - if !services.globals.server_is_ours(body.user_id.server_name()) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User does not belong to this server.", - )); - } - - let mut displayname = None; - let mut avatar_url = None; - let mut blurhash = None; - let mut tz = None; - let mut custom_profile_fields = BTreeMap::new(); - - match &body.field { - | Some(ProfileField::DisplayName) => { - displayname = services.users.displayname(&body.user_id).await.ok(); - }, - | Some(ProfileField::AvatarUrl) => { - avatar_url = services.users.avatar_url(&body.user_id).await.ok(); - blurhash = services.users.blurhash(&body.user_id).await.ok(); - }, - | Some(custom_field) => { - if let Ok(value) = services - .users - .profile_key(&body.user_id, custom_field.as_str()) - .await - { - custom_profile_fields.insert(custom_field.to_string(), value); - } - }, - | None => { - displayname = services.users.displayname(&body.user_id).await.ok(); - avatar_url = services.users.avatar_url(&body.user_id).await.ok(); - blurhash = services.users.blurhash(&body.user_id).await.ok(); - tz = services.users.timezone(&body.user_id).await.ok(); - custom_profile_fields = services - .users - .all_profile_keys(&body.user_id) - .collect() - .await; - }, - } - - // services.users.timezone will collect the MSC4175 timezone key if it exists - custom_profile_fields.remove("us.cloke.msc4175.tz"); - custom_profile_fields.remove("m.tz"); - - Ok(get_profile_information::v1::Response { - displayname, - avatar_url, - blurhash, - tz, - custom_profile_fields, - }) -} diff --git a/src/api/server/send.rs b/src/api/server/send.rs deleted file mode 100644 index 9c5bfd2b..00000000 --- a/src/api/server/send.rs +++ /dev/null @@ -1,590 +0,0 @@ -use std::{collections::BTreeMap, net::IpAddr, time::Instant}; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Error, Result, debug, - debug::INFO_SPAN_LEVEL, - debug_warn, err, error, - result::LogErr, - trace, - utils::{ - IterStream, ReadyExt, millis_since_unix_epoch, - stream::{BroadbandExt, TryBroadbandExt, automatic_width}, - }, - warn, -}; -use conduwuit_service::{ - Services, - sending::{EDU_LIMIT, PDU_LIMIT}, -}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use itertools::Itertools; -use ruma::{ - CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, - api::{ - client::error::ErrorKind, - federation::transactions::{ - edu::{ - DeviceListUpdateContent, DirectDeviceContent, Edu, PresenceContent, - PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap, SigningKeyUpdateContent, - TypingContent, - }, - send_transaction_message, - }, - }, - events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, - serde::Raw, - to_device::DeviceIdOrAllDevices, -}; - -use crate::Ruma; - -type ResolvedMap = BTreeMap; -type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); - -/// # `PUT /_matrix/federation/v1/send/{txnId}` -/// -/// Push EDUs and PDUs to this server. -#[tracing::instrument( - name = "txn", - level = INFO_SPAN_LEVEL, - skip_all, - fields( - %client, - origin = body.origin().as_str() - ), -)] -pub(crate) async fn send_transaction_message_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - if body.origin() != body.body.origin { - return Err!(Request(Forbidden( - "Not allowed to send transactions on behalf of other servers" - ))); - } - - if body.pdus.len() > PDU_LIMIT { - return Err!(Request(Forbidden( - "Not allowed to send more than {PDU_LIMIT} PDUs in one transaction" - ))); - } - - if body.edus.len() > EDU_LIMIT { - return Err!(Request(Forbidden( - "Not allowed to send more than {EDU_LIMIT} EDUs in one transaction" - ))); - } - - let txn_start_time = Instant::now(); - trace!( - pdus = body.pdus.len(), - edus = body.edus.len(), - elapsed = ?txn_start_time.elapsed(), - id = ?body.transaction_id, - origin =?body.origin(), - "Starting txn", - ); - - let pdus = body - .pdus - .iter() - .stream() - .broad_then(|pdu| services.rooms.event_handler.parse_incoming_pdu(pdu)) - .inspect_err(|e| debug_warn!("Could not parse PDU: {e}")) - .ready_filter_map(Result::ok); - - let edus = body - .edus - .iter() - .map(|edu| edu.json().get()) - .map(serde_json::from_str) - .filter_map(Result::ok) - .stream(); - - let results = handle(&services, &client, body.origin(), txn_start_time, pdus, edus).await?; - - debug!( - pdus = body.pdus.len(), - edus = body.edus.len(), - elapsed = ?txn_start_time.elapsed(), - id = ?body.transaction_id, - origin =?body.origin(), - "Finished txn", - ); - for (id, result) in &results { - if let Err(e) = result { - if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { - warn!("Incoming PDU failed {id}: {e:?}"); - } - } - } - - Ok(send_transaction_message::v1::Response { - pdus: results - .into_iter() - .map(|(e, r)| (e, r.map_err(error::sanitized_message))) - .collect(), - }) -} - -async fn handle( - services: &Services, - client: &IpAddr, - origin: &ServerName, - started: Instant, - pdus: impl Stream + Send, - edus: impl Stream + Send, -) -> Result { - // group pdus by room - let pdus = pdus - .collect() - .map(|mut pdus: Vec<_>| { - pdus.sort_by(|(room_a, ..), (room_b, ..)| room_a.cmp(room_b)); - pdus.into_iter() - .into_grouping_map_by(|(room_id, ..)| room_id.clone()) - .collect() - }) - .await; - - // we can evaluate rooms concurrently - let results: ResolvedMap = pdus - .into_iter() - .try_stream() - .broad_and_then(|(room_id, pdus): (_, Vec<_>)| { - handle_room(services, client, origin, started, room_id, pdus.into_iter()) - .map_ok(Vec::into_iter) - .map_ok(IterStream::try_stream) - }) - .try_flatten() - .try_collect() - .boxed() - .await?; - - // evaluate edus after pdus, at least for now. - edus.for_each_concurrent(automatic_width(), |edu| handle_edu(services, client, origin, edu)) - .boxed() - .await; - - Ok(results) -} - -async fn handle_room( - services: &Services, - _client: &IpAddr, - origin: &ServerName, - txn_start_time: Instant, - room_id: OwnedRoomId, - pdus: impl Iterator + Send, -) -> Result> { - let _room_lock = services - .rooms - .event_handler - .mutex_federation - .lock(&room_id) - .await; - - let room_id = &room_id; - pdus.try_stream() - .and_then(|(_, event_id, value)| async move { - services.server.check_running()?; - let pdu_start_time = Instant::now(); - let result = services - .rooms - .event_handler - .handle_incoming_pdu(origin, room_id, &event_id, value, true) - .await - .map(|_| ()); - - debug!( - pdu_elapsed = ?pdu_start_time.elapsed(), - txn_elapsed = ?txn_start_time.elapsed(), - "Finished PDU {event_id}", - ); - - Ok((event_id, result)) - }) - .try_collect() - .await -} - -async fn handle_edu(services: &Services, client: &IpAddr, origin: &ServerName, edu: Edu) { - match edu { - | Edu::Presence(presence) if services.server.config.allow_incoming_presence => - handle_edu_presence(services, client, origin, presence).await, - - | Edu::Receipt(receipt) if services.server.config.allow_incoming_read_receipts => - handle_edu_receipt(services, client, origin, receipt).await, - - | Edu::Typing(typing) if services.server.config.allow_incoming_typing => - handle_edu_typing(services, client, origin, typing).await, - - | Edu::DeviceListUpdate(content) => - handle_edu_device_list_update(services, client, origin, content).await, - - | Edu::DirectToDevice(content) => - handle_edu_direct_to_device(services, client, origin, content).await, - - | Edu::SigningKeyUpdate(content) => - handle_edu_signing_key_update(services, client, origin, content).await, - - | Edu::_Custom(ref _custom) => debug_warn!(?edu, "received custom/unknown EDU"), - - | _ => trace!(?edu, "skipped"), - } -} - -async fn handle_edu_presence( - services: &Services, - _client: &IpAddr, - origin: &ServerName, - presence: PresenceContent, -) { - presence - .push - .into_iter() - .stream() - .for_each_concurrent(automatic_width(), |update| { - handle_edu_presence_update(services, origin, update) - }) - .await; -} - -async fn handle_edu_presence_update( - services: &Services, - origin: &ServerName, - update: PresenceUpdate, -) { - if update.user_id.server_name() != origin { - debug_warn!( - %update.user_id, %origin, - "received presence EDU for user not belonging to origin" - ); - return; - } - - services - .presence - .set_presence( - &update.user_id, - &update.presence, - Some(update.currently_active), - Some(update.last_active_ago), - update.status_msg.clone(), - ) - .await - .log_err() - .ok(); -} - -async fn handle_edu_receipt( - services: &Services, - _client: &IpAddr, - origin: &ServerName, - receipt: ReceiptContent, -) { - receipt - .receipts - .into_iter() - .stream() - .for_each_concurrent(automatic_width(), |(room_id, room_updates)| { - handle_edu_receipt_room(services, origin, room_id, room_updates) - }) - .await; -} - -async fn handle_edu_receipt_room( - services: &Services, - origin: &ServerName, - room_id: OwnedRoomId, - room_updates: ReceiptMap, -) { - if services - .rooms - .event_handler - .acl_check(origin, &room_id) - .await - .is_err() - { - debug_warn!( - %origin, %room_id, - "received read receipt EDU from ACL'd server" - ); - return; - } - - let room_id = &room_id; - room_updates - .read - .into_iter() - .stream() - .for_each_concurrent(automatic_width(), |(user_id, user_updates)| async move { - handle_edu_receipt_room_user(services, origin, room_id, &user_id, user_updates).await; - }) - .await; -} - -async fn handle_edu_receipt_room_user( - services: &Services, - origin: &ServerName, - room_id: &RoomId, - user_id: &UserId, - user_updates: ReceiptData, -) { - if user_id.server_name() != origin { - debug_warn!( - %user_id, %origin, - "received read receipt EDU for user not belonging to origin" - ); - return; - } - - if !services - .rooms - .state_cache - .server_in_room(origin, room_id) - .await - { - debug_warn!( - %user_id, %room_id, %origin, - "received read receipt EDU from server who does not have a member in the room", - ); - return; - } - - let data = &user_updates.data; - user_updates - .event_ids - .into_iter() - .stream() - .for_each_concurrent(automatic_width(), |event_id| async move { - let user_data = [(user_id.to_owned(), data.clone())]; - let receipts = [(ReceiptType::Read, BTreeMap::from(user_data))]; - let content = [(event_id.clone(), BTreeMap::from(receipts))]; - services - .rooms - .read_receipt - .readreceipt_update(user_id, room_id, &ReceiptEvent { - content: ReceiptEventContent(content.into()), - room_id: room_id.to_owned(), - }) - .await; - }) - .await; -} - -async fn handle_edu_typing( - services: &Services, - _client: &IpAddr, - origin: &ServerName, - typing: TypingContent, -) { - if typing.user_id.server_name() != origin { - debug_warn!( - %typing.user_id, %origin, - "received typing EDU for user not belonging to origin" - ); - return; - } - - if services - .rooms - .event_handler - .acl_check(typing.user_id.server_name(), &typing.room_id) - .await - .is_err() - { - debug_warn!( - %typing.user_id, %typing.room_id, %origin, - "received typing EDU for ACL'd user's server" - ); - return; - } - - if !services - .rooms - .state_cache - .is_joined(&typing.user_id, &typing.room_id) - .await - { - debug_warn!( - %typing.user_id, %typing.room_id, %origin, - "received typing EDU for user not in room" - ); - return; - } - - if typing.typing { - let secs = services.server.config.typing_federation_timeout_s; - let timeout = millis_since_unix_epoch().saturating_add(secs.saturating_mul(1000)); - - services - .rooms - .typing - .typing_add(&typing.user_id, &typing.room_id, timeout) - .await - .log_err() - .ok(); - } else { - services - .rooms - .typing - .typing_remove(&typing.user_id, &typing.room_id) - .await - .log_err() - .ok(); - } -} - -async fn handle_edu_device_list_update( - services: &Services, - _client: &IpAddr, - origin: &ServerName, - content: DeviceListUpdateContent, -) { - let DeviceListUpdateContent { user_id, .. } = content; - - if user_id.server_name() != origin { - debug_warn!( - %user_id, %origin, - "received device list update EDU for user not belonging to origin" - ); - return; - } - - services.users.mark_device_key_update(&user_id).await; -} - -async fn handle_edu_direct_to_device( - services: &Services, - _client: &IpAddr, - origin: &ServerName, - content: DirectDeviceContent, -) { - let DirectDeviceContent { - ref sender, - ref ev_type, - ref message_id, - messages, - } = content; - - if sender.server_name() != origin { - debug_warn!( - %sender, %origin, - "received direct to device EDU for user not belonging to origin" - ); - return; - } - - // Check if this is a new transaction id - if services - .transaction_ids - .existing_txnid(sender, None, message_id) - .await - .is_ok() - { - return; - } - - // process messages concurrently for different users - let ev_type = ev_type.to_string(); - messages - .into_iter() - .stream() - .for_each_concurrent(automatic_width(), |(target_user_id, map)| { - handle_edu_direct_to_device_user(services, target_user_id, sender, &ev_type, map) - }) - .await; - - // Save transaction id with empty data - services - .transaction_ids - .add_txnid(sender, None, message_id, &[]); -} - -async fn handle_edu_direct_to_device_user( - services: &Services, - target_user_id: OwnedUserId, - sender: &UserId, - ev_type: &str, - map: BTreeMap>, -) { - for (target_device_id_maybe, event) in map { - let Ok(event) = event - .deserialize_as() - .map_err(|e| err!(Request(InvalidParam(error!("To-Device event is invalid: {e}"))))) - else { - continue; - }; - - handle_edu_direct_to_device_event( - services, - &target_user_id, - sender, - target_device_id_maybe, - ev_type, - event, - ) - .await; - } -} - -async fn handle_edu_direct_to_device_event( - services: &Services, - target_user_id: &UserId, - sender: &UserId, - target_device_id_maybe: DeviceIdOrAllDevices, - ev_type: &str, - event: serde_json::Value, -) { - match target_device_id_maybe { - | DeviceIdOrAllDevices::DeviceId(ref target_device_id) => { - services - .users - .add_to_device_event(sender, target_user_id, target_device_id, ev_type, event) - .await; - }, - - | DeviceIdOrAllDevices::AllDevices => { - services - .users - .all_device_ids(target_user_id) - .for_each(|target_device_id| { - services.users.add_to_device_event( - sender, - target_user_id, - target_device_id, - ev_type, - event.clone(), - ) - }) - .await; - }, - } -} - -async fn handle_edu_signing_key_update( - services: &Services, - _client: &IpAddr, - origin: &ServerName, - content: SigningKeyUpdateContent, -) { - let SigningKeyUpdateContent { user_id, master_key, self_signing_key } = content; - - if user_id.server_name() != origin { - debug_warn!( - %user_id, %origin, - "received signing key update EDU from server that does not belong to user's server" - ); - return; - } - - services - .users - .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) - .await - .log_err() - .ok(); -} diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs deleted file mode 100644 index 895eca81..00000000 --- a/src/api/server/send_join.rs +++ /dev/null @@ -1,345 +0,0 @@ -#![allow(deprecated)] - -use std::borrow::Borrow; - -use axum::extract::State; -use conduwuit::{ - Err, Result, at, err, - pdu::gen_event_id_canonical_json, - utils::stream::{IterStream, TryBroadbandExt}, - warn, -}; -use conduwuit_service::Services; -use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{ - CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, - ServerName, - api::federation::membership::create_join_event, - events::{ - StateEventType, - room::member::{MembershipState, RoomMemberEventContent}, - }, -}; -use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; - -use crate::Ruma; - -/// helper method for /send_join v1 and v2 -async fn create_join_event( - services: &Services, - origin: &ServerName, - room_id: &RoomId, - pdu: &RawJsonValue, -) -> Result { - if !services.rooms.metadata.exists(room_id).await { - return Err!(Request(NotFound("Room is unknown to this server."))); - } - - // ACL check origin server - services - .rooms - .event_handler - .acl_check(origin, room_id) - .await?; - - // We need to return the state prior to joining, let's keep a reference to that - // here - let shortstatehash = services - .rooms - .state - .get_room_shortstatehash(room_id) - .await - .map_err(|e| err!(Request(NotFound(error!("Room has no state: {e}")))))?; - - // We do not add the event_id field to the pdu here because of signature and - // hashes checks - let room_version_id = services.rooms.state.get_room_version(room_id).await?; - - let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { - // Event could not be converted to canonical json - return Err!(Request(BadJson("Could not convert event to canonical json."))); - }; - - let event_room_id: OwnedRoomId = serde_json::from_value( - value - .get("room_id") - .ok_or_else(|| err!(Request(BadJson("Event missing room_id property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("room_id field is not a valid room ID: {e}")))))?; - - if event_room_id != room_id { - return Err!(Request(BadJson("Event room_id does not match request path room ID."))); - } - - let event_type: StateEventType = serde_json::from_value( - value - .get("type") - .ok_or_else(|| err!(Request(BadJson("Event missing type property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("Event has invalid state event type: {e}")))))?; - - if event_type != StateEventType::RoomMember { - return Err!(Request(BadJson( - "Not allowed to send non-membership state event to join endpoint." - ))); - } - - let content: RoomMemberEventContent = serde_json::from_value( - value - .get("content") - .ok_or_else(|| err!(Request(BadJson("Event missing content property"))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("Event content is empty or invalid: {e}")))))?; - - if content.membership != MembershipState::Join { - return Err!(Request(BadJson( - "Not allowed to send a non-join membership event to join endpoint." - ))); - } - - // ACL check sender user server name - let sender: OwnedUserId = serde_json::from_value( - value - .get("sender") - .ok_or_else(|| err!(Request(BadJson("Event missing sender property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?; - - services - .rooms - .event_handler - .acl_check(sender.server_name(), room_id) - .await?; - - // check if origin server is trying to send for another server - if sender.server_name() != origin { - return Err!(Request(Forbidden("Not allowed to join on behalf of another server."))); - } - - let state_key: OwnedUserId = serde_json::from_value( - value - .get("state_key") - .ok_or_else(|| err!(Request(BadJson("Event missing state_key property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("State key is not a valid user ID: {e}")))))?; - - if state_key != sender { - return Err!(Request(BadJson("State key does not match sender user."))); - } - - if let Some(authorising_user) = content.join_authorized_via_users_server { - use ruma::RoomVersionId::*; - - if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { - return Err!(Request(InvalidParam( - "Room version {room_version_id} does not support restricted rooms but \ - join_authorised_via_users_server ({authorising_user}) was found in the event." - ))); - } - - if !services.globals.user_is_local(&authorising_user) { - return Err!(Request(InvalidParam( - "Cannot authorise membership event through {authorising_user} as they do not \ - belong to this homeserver" - ))); - } - - if !services - .rooms - .state_cache - .is_joined(&authorising_user, room_id) - .await - { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} is not in the room you are trying to join, \ - they cannot authorise your join." - ))); - } - - if !super::user_can_perform_restricted_join( - services, - &state_key, - room_id, - &room_version_id, - ) - .await? - { - return Err!(Request(UnableToAuthorizeJoin( - "Joining user did not pass restricted room's rules." - ))); - } - } - - services - .server_keys - .hash_and_sign_event(&mut value, &room_version_id) - .map_err(|e| err!(Request(InvalidParam(warn!("Failed to sign send_join event: {e}")))))?; - - let origin: OwnedServerName = serde_json::from_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event does not have an origin server name."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?; - - let mutex_lock = services - .rooms - .event_handler - .mutex_federation - .lock(room_id) - .await; - - let pdu_id = services - .rooms - .event_handler - .handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true) - .boxed() - .await? - .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; - - drop(mutex_lock); - - let state_ids: Vec = services - .rooms - .state_accessor - .state_full_ids(shortstatehash) - .map(at!(1)) - .collect() - .await; - - let state = state_ids - .iter() - .try_stream() - .broad_and_then(|event_id| services.rooms.timeline.get_pdu_json(event_id)) - .broad_and_then(|pdu| { - services - .sending - .convert_to_outgoing_federation_event(pdu) - .map(Ok) - }) - .try_collect() - .boxed() - .await?; - - let starting_events = state_ids.iter().map(Borrow::borrow); - let auth_chain = services - .rooms - .auth_chain - .event_ids_iter(room_id, starting_events) - .broad_and_then(|event_id| async move { - services.rooms.timeline.get_pdu_json(&event_id).await - }) - .broad_and_then(|pdu| { - services - .sending - .convert_to_outgoing_federation_event(pdu) - .map(Ok) - }) - .try_collect() - .boxed() - .await?; - - services.sending.send_pdu_room(room_id, &pdu_id).await?; - - Ok(create_join_event::v1::RoomState { - auth_chain, - state, - event: to_raw_value(&CanonicalJsonValue::Object(value)).ok(), - }) -} - -/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -pub(crate) async fn create_join_event_v1_route( - State(services): State, - body: Ruma, -) -> Result { - if services - .moderation - .is_remote_server_forbidden(body.origin()) - { - warn!( - "Server {} tried joining room ID {} through us who has a server name that is \ - globally forbidden. Rejecting.", - body.origin(), - &body.room_id, - ); - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - - if let Some(server) = body.room_id.server_name() { - if services.moderation.is_remote_server_forbidden(server) { - warn!( - "Server {} tried joining room ID {} through us which has a server name that is \ - globally forbidden. Rejecting.", - body.origin(), - &body.room_id, - ); - return Err!(Request(Forbidden(warn!( - "Room ID server name {server} is banned on this homeserver." - )))); - } - } - - let room_state = create_join_event(&services, body.origin(), &body.room_id, &body.pdu) - .boxed() - .await?; - - Ok(create_join_event::v1::Response { room_state }) -} - -/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -pub(crate) async fn create_join_event_v2_route( - State(services): State, - body: Ruma, -) -> Result { - if services - .moderation - .is_remote_server_forbidden(body.origin()) - { - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - - if let Some(server) = body.room_id.server_name() { - if services.moderation.is_remote_server_forbidden(server) { - warn!( - "Server {} tried joining room ID {} through us which has a server name that is \ - globally forbidden. Rejecting.", - body.origin(), - &body.room_id, - ); - return Err!(Request(Forbidden(warn!( - "Room ID server name {server} is banned on this homeserver." - )))); - } - } - - let create_join_event::v1::RoomState { auth_chain, state, event } = - create_join_event(&services, body.origin(), &body.room_id, &body.pdu) - .boxed() - .await?; - let room_state = create_join_event::v2::RoomState { - members_omitted: false, - auth_chain, - state, - event, - servers_in_room: None, - }; - - Ok(create_join_event::v2::Response { room_state }) -} diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs deleted file mode 100644 index 8d3697d2..00000000 --- a/src/api/server/send_knock.rs +++ /dev/null @@ -1,181 +0,0 @@ -use axum::extract::State; -use conduwuit::{ - Err, Result, err, - matrix::pdu::{PduEvent, gen_event_id_canonical_json}, - warn, -}; -use futures::FutureExt; -use ruma::{ - OwnedServerName, OwnedUserId, - RoomVersionId::*, - api::federation::knock::send_knock, - events::{ - StateEventType, - room::member::{MembershipState, RoomMemberEventContent}, - }, - serde::JsonObject, -}; - -use crate::Ruma; - -/// # `PUT /_matrix/federation/v1/send_knock/{roomId}/{eventId}` -/// -/// Submits a signed knock event. -pub(crate) async fn create_knock_event_v1_route( - State(services): State, - body: Ruma, -) -> Result { - if services - .moderation - .is_remote_server_forbidden(body.origin()) - { - warn!( - "Server {} tried knocking room ID {} who has a server name that is globally \ - forbidden. Rejecting.", - body.origin(), - &body.room_id, - ); - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - - if let Some(server) = body.room_id.server_name() { - if services.moderation.is_remote_server_forbidden(server) { - warn!( - "Server {} tried knocking room ID {} which has a server name that is globally \ - forbidden. Rejecting.", - body.origin(), - &body.room_id, - ); - return Err!(Request(Forbidden("Server is banned on this homeserver."))); - } - } - - if !services.rooms.metadata.exists(&body.room_id).await { - return Err!(Request(NotFound("Room is unknown to this server."))); - } - - // ACL check origin server - services - .rooms - .event_handler - .acl_check(body.origin(), &body.room_id) - .await?; - - let room_version_id = services.rooms.state.get_room_version(&body.room_id).await?; - - if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6) { - return Err!(Request(Forbidden("Room version does not support knocking."))); - } - - let Ok((event_id, value)) = gen_event_id_canonical_json(&body.pdu, &room_version_id) else { - // Event could not be converted to canonical json - return Err!(Request(InvalidParam("Could not convert event to canonical json."))); - }; - - let event_type: StateEventType = serde_json::from_value( - value - .get("type") - .ok_or_else(|| err!(Request(InvalidParam("Event has no event type."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(InvalidParam("Event has invalid event type: {e}"))))?; - - if event_type != StateEventType::RoomMember { - return Err!(Request(InvalidParam( - "Not allowed to send non-membership state event to knock endpoint.", - ))); - } - - let content: RoomMemberEventContent = serde_json::from_value( - value - .get("content") - .ok_or_else(|| err!(Request(InvalidParam("Membership event has no content"))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(InvalidParam("Event has invalid membership content: {e}"))))?; - - if content.membership != MembershipState::Knock { - return Err!(Request(InvalidParam( - "Not allowed to send a non-knock membership event to knock endpoint." - ))); - } - - // ACL check sender server name - let sender: OwnedUserId = serde_json::from_value( - value - .get("sender") - .ok_or_else(|| err!(Request(InvalidParam("Event has no sender user ID."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson("Event sender is not a valid user ID: {e}"))))?; - - services - .rooms - .event_handler - .acl_check(sender.server_name(), &body.room_id) - .await?; - - // check if origin server is trying to send for another server - if sender.server_name() != body.origin() { - return Err!(Request(BadJson("Not allowed to knock on behalf of another server/user."))); - } - - let state_key: OwnedUserId = serde_json::from_value( - value - .get("state_key") - .ok_or_else(|| err!(Request(InvalidParam("Event does not have a state_key"))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson("Event does not have a valid state_key: {e}"))))?; - - if state_key != sender { - return Err!(Request(InvalidParam("state_key does not match sender user of event."))); - } - - let origin: OwnedServerName = serde_json::from_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event does not have an origin server name."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson("Event has an invalid origin server name: {e}"))))?; - - let mut event: JsonObject = serde_json::from_str(body.pdu.get()) - .map_err(|e| err!(Request(InvalidParam("Invalid knock event PDU: {e}"))))?; - - event.insert("event_id".to_owned(), "$placeholder".into()); - - let pdu: PduEvent = serde_json::from_value(event.into()) - .map_err(|e| err!(Request(InvalidParam("Invalid knock event PDU: {e}"))))?; - - let mutex_lock = services - .rooms - .event_handler - .mutex_federation - .lock(&body.room_id) - .await; - - let pdu_id = services - .rooms - .event_handler - .handle_incoming_pdu(&origin, &body.room_id, &event_id, value.clone(), true) - .boxed() - .await? - .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; - - drop(mutex_lock); - - services - .sending - .send_pdu_room(&body.room_id, &pdu_id) - .await?; - - let knock_room_state = services.rooms.state.summary_stripped(&pdu).await; - - Ok(send_knock::v1::Response { knock_room_state }) -} diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs deleted file mode 100644 index d3dc994c..00000000 --- a/src/api/server/send_leave.rs +++ /dev/null @@ -1,167 +0,0 @@ -#![allow(deprecated)] - -use axum::extract::State; -use conduwuit::{Err, Result, err, matrix::pdu::gen_event_id_canonical_json}; -use conduwuit_service::Services; -use futures::FutureExt; -use ruma::{ - OwnedRoomId, OwnedUserId, RoomId, ServerName, - api::federation::membership::create_leave_event, - events::{ - StateEventType, - room::member::{MembershipState, RoomMemberEventContent}, - }, -}; -use serde_json::value::RawValue as RawJsonValue; - -use crate::Ruma; - -/// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` -/// -/// Submits a signed leave event. -pub(crate) async fn create_leave_event_v1_route( - State(services): State, - body: Ruma, -) -> Result { - create_leave_event(&services, body.origin(), &body.room_id, &body.pdu).await?; - - Ok(create_leave_event::v1::Response::new()) -} - -/// # `PUT /_matrix/federation/v2/send_leave/{roomId}/{eventId}` -/// -/// Submits a signed leave event. -pub(crate) async fn create_leave_event_v2_route( - State(services): State, - body: Ruma, -) -> Result { - create_leave_event(&services, body.origin(), &body.room_id, &body.pdu).await?; - - Ok(create_leave_event::v2::Response::new()) -} - -async fn create_leave_event( - services: &Services, - origin: &ServerName, - room_id: &RoomId, - pdu: &RawJsonValue, -) -> Result { - if !services.rooms.metadata.exists(room_id).await { - return Err!(Request(NotFound("Room is unknown to this server."))); - } - - // ACL check origin - services - .rooms - .event_handler - .acl_check(origin, room_id) - .await?; - - // We do not add the event_id field to the pdu here because of signature and - // hashes checks - let room_version_id = services.rooms.state.get_room_version(room_id).await?; - let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { - // Event could not be converted to canonical json - return Err!(Request(BadJson("Could not convert event to canonical json."))); - }; - - let event_room_id: OwnedRoomId = serde_json::from_value( - serde_json::to_value( - value - .get("room_id") - .ok_or_else(|| err!(Request(BadJson("Event missing room_id property."))))?, - ) - .expect("CanonicalJson is valid json value"), - ) - .map_err(|e| err!(Request(BadJson(warn!("room_id field is not a valid room ID: {e}")))))?; - - if event_room_id != room_id { - return Err!(Request(BadJson("Event room_id does not match request path room ID."))); - } - - let content: RoomMemberEventContent = serde_json::from_value( - value - .get("content") - .ok_or_else(|| err!(Request(BadJson("Event missing content property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("Event content is empty or invalid: {e}")))))?; - - if content.membership != MembershipState::Leave { - return Err!(Request(BadJson( - "Not allowed to send a non-leave membership event to leave endpoint." - ))); - } - - let event_type: StateEventType = serde_json::from_value( - value - .get("type") - .ok_or_else(|| err!(Request(BadJson("Event missing type property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("Event has invalid state event type: {e}")))))?; - - if event_type != StateEventType::RoomMember { - return Err!(Request(BadJson( - "Not allowed to send non-membership state event to leave endpoint." - ))); - } - - // ACL check sender server name - let sender: OwnedUserId = serde_json::from_value( - value - .get("sender") - .ok_or_else(|| err!(Request(BadJson("Event missing sender property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("sender property is not a valid user ID: {e}")))))?; - - services - .rooms - .event_handler - .acl_check(sender.server_name(), room_id) - .await?; - - if sender.server_name() != origin { - return Err!(Request(BadJson("Not allowed to leave on behalf of another server/user."))); - } - - let state_key: OwnedUserId = serde_json::from_value( - value - .get("state_key") - .ok_or_else(|| err!(Request(BadJson("Event missing state_key property."))))? - .clone() - .into(), - ) - .map_err(|e| err!(Request(BadJson(warn!("State key is not a valid user ID: {e}")))))?; - - if state_key != sender { - return Err!(Request(BadJson("State key does not match sender user."))); - } - - let mutex_lock = services - .rooms - .event_handler - .mutex_federation - .lock(room_id) - .await; - - let pdu_id = services - .rooms - .event_handler - .handle_incoming_pdu(origin, room_id, &event_id, value, true) - .boxed() - .await? - .ok_or_else(|| err!(Request(InvalidParam("Could not accept as timeline event."))))?; - - drop(mutex_lock); - - services - .sending - .send_pdu_room(room_id, &pdu_id) - .boxed() - .await -} diff --git a/src/api/server/state.rs b/src/api/server/state.rs deleted file mode 100644 index 8c786815..00000000 --- a/src/api/server/state.rs +++ /dev/null @@ -1,70 +0,0 @@ -use std::{borrow::Borrow, iter::once}; - -use axum::extract::State; -use conduwuit::{Result, at, err, utils::IterStream}; -use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{OwnedEventId, api::federation::event::get_room_state}; - -use super::AccessCheck; -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/state/{roomId}` -/// -/// Retrieves a snapshot of a room's state at a given event. -pub(crate) async fn get_room_state_route( - State(services): State, - body: Ruma, -) -> Result { - AccessCheck { - services: &services, - origin: body.origin(), - room_id: &body.room_id, - event_id: None, - } - .check() - .await?; - - let shortstatehash = services - .rooms - .state_accessor - .pdu_shortstatehash(&body.event_id) - .await - .map_err(|_| err!(Request(NotFound("PDU state not found."))))?; - - let state_ids: Vec = services - .rooms - .state_accessor - .state_full_ids(shortstatehash) - .map(at!(1)) - .collect() - .await; - - let pdus = state_ids - .iter() - .try_stream() - .and_then(|id| services.rooms.timeline.get_pdu_json(id)) - .and_then(|pdu| { - services - .sending - .convert_to_outgoing_federation_event(pdu) - .map(Ok) - }) - .try_collect() - .await?; - - let auth_chain = services - .rooms - .auth_chain - .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) - .and_then(|pdu| { - services - .sending - .convert_to_outgoing_federation_event(pdu) - .map(Ok) - }) - .try_collect() - .await?; - - Ok(get_room_state::v1::Response { auth_chain, pdus }) -} diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs deleted file mode 100644 index 648d4575..00000000 --- a/src/api/server/state_ids.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::{borrow::Borrow, iter::once}; - -use axum::extract::State; -use conduwuit::{Result, at, err}; -use futures::{StreamExt, TryStreamExt}; -use ruma::{OwnedEventId, api::federation::event::get_room_state_ids}; - -use super::AccessCheck; -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/state_ids/{roomId}` -/// -/// Retrieves a snapshot of a room's state at a given event, in the form of -/// event IDs. -pub(crate) async fn get_room_state_ids_route( - State(services): State, - body: Ruma, -) -> Result { - AccessCheck { - services: &services, - origin: body.origin(), - room_id: &body.room_id, - event_id: None, - } - .check() - .await?; - - let shortstatehash = services - .rooms - .state_accessor - .pdu_shortstatehash(&body.event_id) - .await - .map_err(|_| err!(Request(NotFound("Pdu state not found."))))?; - - let pdu_ids: Vec = services - .rooms - .state_accessor - .state_full_ids(shortstatehash) - .map(at!(1)) - .collect() - .await; - - let auth_chain_ids = services - .rooms - .auth_chain - .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .try_collect() - .await?; - - Ok(get_room_state_ids::v1::Response { auth_chain_ids, pdu_ids }) -} diff --git a/src/api/server/user.rs b/src/api/server/user.rs deleted file mode 100644 index 80c353ab..00000000 --- a/src/api/server/user.rs +++ /dev/null @@ -1,130 +0,0 @@ -use axum::extract::State; -use conduwuit::{Error, Result}; -use futures::{FutureExt, StreamExt, TryFutureExt}; -use ruma::api::{ - client::error::ErrorKind, - federation::{ - device::get_devices::{self, v1::UserDevice}, - keys::{claim_keys, get_keys}, - }, -}; - -use crate::{ - Ruma, - client::{claim_keys_helper, get_keys_helper}, -}; - -/// # `GET /_matrix/federation/v1/user/devices/{userId}` -/// -/// Gets information on all devices of the user. -pub(crate) async fn get_devices_route( - State(services): State, - body: Ruma, -) -> Result { - if !services.globals.user_is_local(&body.user_id) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to access user from other server.", - )); - } - - let user_id = &body.user_id; - Ok(get_devices::v1::Response { - user_id: user_id.clone(), - stream_id: services - .users - .get_devicelist_version(user_id) - .await - .unwrap_or(0) - .try_into()?, - devices: services - .users - .all_devices_metadata(user_id) - .filter_map(|metadata| async move { - let device_id = metadata.device_id.clone(); - let device_id_clone = device_id.clone(); - let device_id_string = device_id.as_str().to_owned(); - let device_display_name = if services.globals.allow_device_name_federation() { - metadata.display_name.clone() - } else { - Some(device_id_string) - }; - - services - .users - .get_device_keys(user_id, &device_id_clone) - .map_ok(|keys| UserDevice { device_id, keys, device_display_name }) - .map(Result::ok) - .await - }) - .collect() - .await, - master_key: services - .users - .get_master_key(None, &body.user_id, &|u| u.server_name() == body.origin()) - .await - .ok(), - self_signing_key: services - .users - .get_self_signing_key(None, &body.user_id, &|u| u.server_name() == body.origin()) - .await - .ok(), - }) -} - -/// # `POST /_matrix/federation/v1/user/keys/query` -/// -/// Gets devices and identity keys for the given users. -pub(crate) async fn get_keys_route( - State(services): State, - body: Ruma, -) -> Result { - if body - .device_keys - .iter() - .any(|(u, _)| !services.globals.user_is_local(u)) - { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User does not belong to this server.", - )); - } - - let result = get_keys_helper( - &services, - None, - &body.device_keys, - |u| Some(u.server_name()) == body.origin.as_deref(), - services.globals.allow_device_name_federation(), - ) - .await?; - - Ok(get_keys::v1::Response { - device_keys: result.device_keys, - master_keys: result.master_keys, - self_signing_keys: result.self_signing_keys, - }) -} - -/// # `POST /_matrix/federation/v1/user/keys/claim` -/// -/// Claims one-time keys. -pub(crate) async fn claim_keys_route( - State(services): State, - body: Ruma, -) -> Result { - if body - .one_time_keys - .iter() - .any(|(u, _)| !services.globals.user_is_local(u)) - { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to access user from other server.", - )); - } - - let result = claim_keys_helper(&services, &body.one_time_keys).await?; - - Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys }) -} diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs deleted file mode 100644 index 5696e44b..00000000 --- a/src/api/server/utils.rs +++ /dev/null @@ -1,70 +0,0 @@ -use conduwuit::{Err, Result, implement, is_false}; -use conduwuit_service::Services; -use futures::{FutureExt, StreamExt, future::OptionFuture, join}; -use ruma::{EventId, RoomId, ServerName}; - -pub(super) struct AccessCheck<'a> { - pub(super) services: &'a Services, - pub(super) origin: &'a ServerName, - pub(super) room_id: &'a RoomId, - pub(super) event_id: Option<&'a EventId>, -} - -#[implement(AccessCheck, params = "<'_>")] -pub(super) async fn check(&self) -> Result { - let acl_check = self - .services - .rooms - .event_handler - .acl_check(self.origin, self.room_id) - .map(|result| result.is_ok()); - - let world_readable = self - .services - .rooms - .state_accessor - .is_world_readable(self.room_id); - - let server_in_room = self - .services - .rooms - .state_cache - .server_in_room(self.origin, self.room_id); - - // if any user on our homeserver is trying to knock this room, we'll need to - // acknowledge bans or leaves - let user_is_knocking = self - .services - .rooms - .state_cache - .room_members_knocked(self.room_id) - .count(); - - let server_can_see: OptionFuture<_> = self - .event_id - .map(|event_id| { - self.services.rooms.state_accessor.server_can_see_event( - self.origin, - self.room_id, - event_id, - ) - }) - .into(); - - let (world_readable, server_in_room, server_can_see, acl_check, user_is_knocking) = - join!(world_readable, server_in_room, server_can_see, acl_check, user_is_knocking); - - if !acl_check { - return Err!(Request(Forbidden("Server access denied."))); - } - - if !world_readable && !server_in_room && user_is_knocking == 0 { - return Err!(Request(Forbidden("Server is not in room."))); - } - - if server_can_see.is_some_and(is_false!()) { - return Err!(Request(Forbidden("Server is not allowed to see event."))); - } - - Ok(()) -} diff --git a/src/api/server/version.rs b/src/api/server/version.rs deleted file mode 100644 index b08ff77a..00000000 --- a/src/api/server/version.rs +++ /dev/null @@ -1,18 +0,0 @@ -use conduwuit::Result; -use ruma::api::federation::discovery::get_server_version; - -use crate::Ruma; - -/// # `GET /_matrix/federation/v1/version` -/// -/// Get version information on this server. -pub(crate) async fn get_server_version_route( - _body: Ruma, -) -> Result { - Ok(get_server_version::v1::Response { - server: Some(get_server_version::v1::Server { - name: Some(conduwuit::version::name().into()), - version: Some(conduwuit::version::version().into()), - }), - }) -} diff --git a/src/api/server/well_known.rs b/src/api/server/well_known.rs deleted file mode 100644 index 75c7cf5d..00000000 --- a/src/api/server/well_known.rs +++ /dev/null @@ -1,20 +0,0 @@ -use axum::extract::State; -use conduwuit::{Error, Result}; -use ruma::api::{client::error::ErrorKind, federation::discovery::discover_homeserver}; - -use crate::Ruma; - -/// # `GET /.well-known/matrix/server` -/// -/// Returns the .well-known URL if it is configured, otherwise returns 404. -pub(crate) async fn well_known_server( - State(services): State, - _body: Ruma, -) -> Result { - Ok(discover_homeserver::Response { - server: match services.server.config.well_known.server.as_ref() { - | Some(server_name) => server_name.to_owned(), - | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }, - }) -} diff --git a/src/api/server_server.rs b/src/api/server_server.rs new file mode 100644 index 00000000..12de8d45 --- /dev/null +++ b/src/api/server_server.rs @@ -0,0 +1,1920 @@ +#![allow(deprecated)] +// Conduit implements the older APIs + +use std::{ + collections::BTreeMap, + sync::Arc, + time::{Duration, Instant, SystemTime}, +}; + +use axum::{response::IntoResponse, Json}; +use get_profile_information::v1::ProfileField; +use rand::seq::SliceRandom; +use ruma::{ + api::{ + client::error::ErrorKind, + federation::{ + authorization::get_event_authorization, + backfill::get_backfill, + device::get_devices::{self, v1::UserDevice}, + directory::{get_public_rooms, get_public_rooms_filtered}, + discovery::{discover_homeserver, get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + keys::{claim_keys, get_keys}, + membership::{ + create_invite, create_join_event, create_leave_event, prepare_join_event, prepare_leave_event, + }, + query::{get_profile_information, get_room_information}, + space::get_hierarchy, + transactions::{ + edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, + send_transaction_message, + }, + }, + OutgoingResponse, + }, + directory::Filter, + events::{ + receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, + room::{ + join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, + }, + serde::{Base64, JsonObject, Raw}, + to_device::DeviceIdOrAllDevices, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, + OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, RoomVersionId, ServerName, +}; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use tokio::sync::RwLock; +use tracing::{debug, error, info, trace, warn}; + +use crate::{ + api::client_server::{self, claim_keys_helper, get_keys_helper}, + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, +}; + +/// # `GET /_matrix/federation/v1/version` +/// +/// Get version information on this server. +pub async fn get_server_version_route( + _body: Ruma, +) -> Result { + let version = match option_env!("CONDUIT_VERSION_EXTRA") { + Some(extra) => format!("{} ({})", env!("CARGO_PKG_VERSION"), extra), + None => env!("CARGO_PKG_VERSION").to_owned(), + }; + + Ok(get_server_version::v1::Response { + server: Some(get_server_version::v1::Server { + name: Some("Conduwuit".to_owned()), + version: Some(version), + }), + }) +} + +/// # `GET /_matrix/key/v2/server` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by +/// this will be valid +/// forever. +// Response type for this endpoint is Json because we need to calculate a +// signature for the response +pub async fn get_server_keys_route() -> Result { + let mut verify_keys: BTreeMap = BTreeMap::new(); + verify_keys.insert( + format!("ed25519:{}", services().globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), + VerifyKey { + key: Base64::new(services().globals.keypair().public_key().to_vec()), + }, + ); + let mut response = serde_json::from_slice( + get_server_keys::v2::Response { + server_key: Raw::new(&ServerSigningKeys { + server_name: services().globals.server_name().to_owned(), + verify_keys, + old_verify_keys: BTreeMap::new(), + signatures: BTreeMap::new(), + valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(86400 * 7), + ) + .expect("time is valid"), + }) + .expect("static conversion, no errors"), + } + .try_into_http_response::>() + .unwrap() + .body(), + ) + .unwrap(); + + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut response, + ) + .unwrap(); + + Ok(Json(response)) +} + +/// # `GET /_matrix/key/v2/server/{keyId}` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by +/// this will be valid +/// forever. +pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { get_server_keys_route().await } + +/// # `POST /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_filtered_route( + body: Ruma, +) -> Result { + if !services() + .globals + .allow_public_room_directory_over_federation() + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Room directory is not public")); + } + + let response = client_server::get_public_rooms_filtered_helper( + None, + body.limit, + body.since.as_deref(), + &body.filter, + &body.room_network, + ) + .await + .map_err(|e| { + warn!("Failed to return our /publicRooms: {e}"); + Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") + })?; + + Ok(get_public_rooms_filtered::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `GET /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_route( + body: Ruma, +) -> Result { + if !services() + .globals + .allow_public_room_directory_over_federation() + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Room directory is not public")); + } + + let response = client_server::get_public_rooms_filtered_helper( + None, + body.limit, + body.since.as_deref(), + &Filter::default(), + &body.room_network, + ) + .await + .map_err(|e| { + warn!("Failed to return our /publicRooms: {e}"); + Error::BadRequest(ErrorKind::Unknown, "Failed to return this server's public room list.") + })?; + + Ok(get_public_rooms::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +pub fn parse_incoming_pdu(pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + warn!("Error parsing incoming event {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let room_id: OwnedRoomId = value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid room id in pdu"))?; + + let Ok(room_version_id) = services().rooms.state.get_room_version(&room_id) else { + return Err(Error::Error(format!("Server is not in room {room_id}"))); + }; + + let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; + + Ok((event_id, value, room_id)) +} + +/// # `PUT /_matrix/federation/v1/send/{txnId}` +/// +/// Push EDUs and PDUs to this server. +pub async fn send_transaction_message_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + // This is all the auth_events that have been recursively fetched so they don't + // have to be deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like + // structure so that once an auth event is known it would know (using indexes + // maybe) all of the auth events that it references. + // let mut auth_cache = EventMap::new(); + + let txn_start_time = Instant::now(); + let mut parsed_pdus = Vec::with_capacity(body.pdus.len()); + for pdu in &body.pdus { + parsed_pdus.push(match parse_incoming_pdu(pdu) { + Ok(t) => t, + Err(e) => { + warn!("Could not parse PDU: {e}"); + continue; + }, + }); + + // We do not add the event_id field to the pdu here because of signature + // and hashes checks + } + + trace!( + pdus = ?parsed_pdus.len(), + edus = ?body.edus.len(), + elapsed = ?txn_start_time.elapsed(), + id = ?body.transaction_id, + origin =?body.origin, + "Starting txn", + ); + + // We go through all the signatures we see on the PDUs and fetch the + // corresponding signing keys + let pub_key_map = RwLock::new(BTreeMap::new()); + if !parsed_pdus.is_empty() { + services() + .rooms + .event_handler + .fetch_required_signing_keys(parsed_pdus.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map) + .await + .unwrap_or_else(|e| { + warn!("Could not fetch all signatures for PDUs from {}: {:?}", sender_servername, e); + }); + + debug!( + elapsed = ?txn_start_time.elapsed(), + "Fetched signing keys" + ); + } + + let mut resolved_map = BTreeMap::new(); + for (event_id, value, room_id) in parsed_pdus { + let pdu_start_time = Instant::now(); + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + resolved_map.insert( + event_id.clone(), + services() + .rooms + .event_handler + .handle_incoming_pdu(sender_servername, &event_id, &room_id, value, true, &pub_key_map) + .await + .map(|_| ()), + ); + drop(mutex_lock); + + debug!( + pdu_elapsed = ?pdu_start_time.elapsed(), + txn_elapsed = ?txn_start_time.elapsed(), + "Finished PDU {event_id}", + ); + } + + for pdu in &resolved_map { + if let Err(e) = pdu.1 { + if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { + warn!("Incoming PDU failed {:?}", pdu); + } + } + } + + for edu in body + .edus + .iter() + .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) + { + match edu { + Edu::Presence(presence) => { + if !services().globals.allow_incoming_presence() { + continue; + } + + for update in presence.push { + services().presence.set_presence( + &update.user_id, + &update.presence, + Some(update.currently_active), + Some(update.last_active_ago), + update.status_msg.clone(), + )?; + } + }, + Edu::Receipt(receipt) => { + if !services().globals.allow_incoming_read_receipts() { + continue; + } + + for (room_id, room_updates) in receipt.receipts { + for (user_id, user_updates) in room_updates.read { + if let Some((event_id, _)) = user_updates + .event_ids + .iter() + .filter_map(|id| { + services() + .rooms + .timeline + .get_pdu_count(id) + .ok() + .flatten() + .map(|r| (id, r)) + }) + .max_by_key(|(_, count)| *count) + { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert(user_id.clone(), user_updates.data); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event_id.to_owned(), receipts); + + let event = ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + }; + services() + .rooms + .read_receipt + .readreceipt_update(&user_id, &room_id, event)?; + } else { + // TODO fetch missing events + debug!("No known event ids in read receipt: {:?}", user_updates); + } + } + } + }, + Edu::Typing(typing) => { + if !services().globals.config.allow_incoming_typing { + continue; + } + + if services() + .rooms + .state_cache + .is_joined(&typing.user_id, &typing.room_id)? + { + if typing.typing { + let timeout = utils::millis_since_unix_epoch() + + services().globals.config.typing_federation_timeout_s * 1000; + services() + .rooms + .typing + .typing_add(&typing.user_id, &typing.room_id, timeout) + .await?; + } else { + services() + .rooms + .typing + .typing_remove(&typing.user_id, &typing.room_id) + .await?; + } + } + }, + Edu::DeviceListUpdate(DeviceListUpdateContent { + user_id, + .. + }) => { + services().users.mark_device_key_update(&user_id)?; + }, + Edu::DirectToDevice(DirectDeviceContent { + sender, + ev_type, + message_id, + messages, + }) => { + // Check if this is a new transaction id + if services() + .transaction_ids + .existing_txnid(&sender, None, &message_id)? + .is_some() + { + continue; + } + + for (target_user_id, map) in &messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + services().users.add_to_device_event( + &sender, + target_user_id, + target_device_id, + &ev_type.to_string(), + event.deserialize_as().map_err(|e| { + warn!("To-Device event is invalid: {event:?} {e}"); + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + )?; + }, + + DeviceIdOrAllDevices::AllDevices => { + for target_device_id in services().users.all_device_ids(target_user_id) { + services().users.add_to_device_event( + &sender, + target_user_id, + &target_device_id?, + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + )?; + } + }, + } + } + } + + // Save transaction id with empty data + services() + .transaction_ids + .add_txnid(&sender, None, &message_id, &[])?; + }, + Edu::SigningKeyUpdate(SigningKeyUpdateContent { + user_id, + master_key, + self_signing_key, + }) => { + if user_id.server_name() != sender_servername { + continue; + } + if let Some(master_key) = master_key { + services() + .users + .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true)?; + } + }, + Edu::_Custom(_) => {}, + } + } + + debug!( + pdus = ?body.pdus.len(), + edus = ?body.edus.len(), + elapsed = ?txn_start_time.elapsed(), + id = ?body.transaction_id, + origin =?body.origin, + "Finished txn", + ); + + Ok(send_transaction_message::v1::Response { + pdus: resolved_map + .into_iter() + .map(|(e, r)| (e, r.map_err(|e| e.sanitized_error()))) + .collect(), + }) +} + +/// # `GET /_matrix/federation/v1/event/{eventId}` +/// +/// Retrieves a single event from the server. +/// +/// - Only works if a user of this server is currently invited or joined the +/// room +pub async fn get_event_route(body: Ruma) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let event = services() + .rooms + .timeline + .get_pdu_json(&body.event_id)? + .ok_or_else(|| { + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + })?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, room_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room")); + } + + if !services() + .rooms + .state_accessor + .server_can_see_event(sender_servername, room_id, &body.event_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not allowed to see event.")); + } + + Ok(get_event::v1::Response { + origin: services().globals.server_name().to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdu: PduEvent::convert_to_outgoing_federation_event(event), + }) +} + +/// # `GET /_matrix/federation/v1/backfill/` +/// +/// Retrieves events from before the sender joined the room, if the room's +/// history visibility allows. +pub async fn get_backfill_route(body: Ruma) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + debug!("Got backfill request from: {}", sender_servername); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let until = body + .v + .iter() + .map(|eventid| services().rooms.timeline.get_pdu_count(eventid)) + .filter_map(|r| r.ok().flatten()) + .max() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "No known eventid in v"))?; + + let limit = body.limit.min(uint!(100)); + + let all_events = services() + .rooms + .timeline + .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? + .take(limit.try_into().unwrap()); + + let events = all_events + .filter_map(Result::ok) + .filter(|(_, e)| { + matches!( + services() + .rooms + .state_accessor + .server_can_see_event(sender_servername, &e.room_id, &e.event_id,), + Ok(true), + ) + }) + .map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id)) + .filter_map(|r| r.ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(); + + Ok(get_backfill::v1::Response { + origin: services().globals.server_name().to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdus: events, + }) +} + +/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` +/// +/// Retrieves events that the sender is missing. +pub async fn get_missing_events_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room")); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let mut queued_events = body.latest_events.clone(); + let mut events = Vec::new(); + + let mut i = 0; + while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { + if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { + let room_id_str = pdu + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let event_room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if event_room_id != body.room_id { + warn!( + "Evil event detected: Event {} found while searching in room {}", + queued_events[i], body.room_id + ); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Evil event detected")); + } + + if body.earliest_events.contains(&queued_events[i]) { + i += 1; + continue; + } + + if !services().rooms.state_accessor.server_can_see_event( + sender_servername, + &body.room_id, + &queued_events[i], + )? { + i += 1; + continue; + } + + queued_events.extend_from_slice( + &serde_json::from_value::>( + serde_json::to_value( + pdu.get("prev_events") + .cloned() + .ok_or_else(|| Error::bad_database("Event in db has no prev_events field."))?, + ) + .expect("canonical json is valid json value"), + ) + .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, + ); + events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); + } + i += 1; + } + + Ok(get_missing_events::v1::Response { + events, + }) +} + +/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` +/// +/// Retrieves the auth chain for a given event. +/// +/// - This does not include the event itself +pub async fn get_event_authorization_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let event = services() + .rooms + .timeline + .get_pdu_json(&body.event_id)? + .ok_or_else(|| { + warn!("Event not found, event ID: {:?}", &body.event_id); + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + })?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + let auth_chain_ids = services() + .rooms + .auth_chain + .event_ids_iter(room_id, vec![Arc::from(&*body.event_id)]) + .await?; + + Ok(get_event_authorization::v1::Response { + auth_chain: auth_chain_ids + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok()?) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +/// # `GET /_matrix/federation/v1/state/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_route(body: Ruma) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let shortstatehash = services() + .rooms + .state_accessor + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Pdu state not found."))?; + + let pdus = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await? + .into_values() + .map(|id| { + PduEvent::convert_to_outgoing_federation_event( + services() + .rooms + .timeline + .get_pdu_json(&id) + .unwrap() + .unwrap(), + ) + }) + .collect(); + + let auth_chain_ids = services() + .rooms + .auth_chain + .event_ids_iter(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; + + Ok(get_room_state::v1::Response { + auth_chain: auth_chain_ids + .filter_map(|id| { + if let Some(json) = services().rooms.timeline.get_pdu_json(&id).ok()? { + Some(PduEvent::convert_to_outgoing_federation_event(json)) + } else { + error!("Could not find event json for {id} in db."); + None + } + }) + .collect(), + pdus, + }) +} + +/// # `GET /_matrix/federation/v1/state_ids/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_ids_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Server is not in room.")); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let shortstatehash = services() + .rooms + .state_accessor + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Pdu state not found."))?; + + let pdu_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await? + .into_values() + .map(|id| (*id).to_owned()) + .collect(); + + let auth_chain_ids = services() + .rooms + .auth_chain + .event_ids_iter(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; + + Ok(get_room_state_ids::v1::Response { + auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), + pdu_ids, + }) +} + +/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` +/// +/// Creates a join template. +pub async fn create_join_event_template_route( + body: Ruma, +) -> Result { + if !services().rooms.metadata.exists(&body.room_id)? { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + if services() + .globals + .config + .forbidden_remote_server_names + .contains(sender_servername) + { + warn!( + "Server {sender_servername} for remote user {} tried joining room ID {} which has a server name that is \ + globally forbidden. Rejecting.", + &body.user_id, &body.room_id, + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + + if let Some(server) = body.room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + } + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let join_rules_event = + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + let join_authorized_via_users_server = if let Some(join_rules_event_content) = join_rules_event_content { + if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rules_event_content.join_rule { + if r.allow + .iter() + .filter_map(|rule| { + if let AllowRule::RoomMembership(membership) = rule { + Some(membership) + } else { + None + } + }) + .any(|m| { + services() + .rooms + .state_cache + .is_joined(&body.user_id, &m.room_id) + .unwrap_or(false) + }) { + if services() + .rooms + .state_cache + .is_left(&body.user_id, &body.room_id) + .unwrap_or(true) + { + let members: Vec<_> = services() + .rooms + .state_cache + .room_members(&body.room_id) + .filter_map(Result::ok) + .filter(|user| user.server_name() == services().globals.server_name()) + .collect(); + + let mut auth_user = None; + + for user in members { + if services() + .rooms + .state_accessor + .user_can_invite(&body.room_id, &user, &body.user_id, &state_lock) + .await + .unwrap_or(false) + { + auth_user = Some(user); + break; + } + } + if auth_user.is_some() { + auth_user + } else { + return Err(Error::BadRequest( + ErrorKind::UnableToGrantJoin, + "No user on this server is able to assist in joining.", + )); + } + } else { + // If the user has any state other than leave, either: + // - the auth_check will deny them (ban, knock - (until/unless MSC4123 is + // merged)) + // - they are able to join via other methods (invite) + // - they are already in the room (join) + None + } + } else { + return Err(Error::BadRequest( + ErrorKind::UnableToAuthorizeJoin, + "User is not known to be in any required room.", + )); + } + } else { + None + } + } else { + None + }; + + let room_version_id = services().rooms.state.get_room_version(&body.room_id)?; + if !body.ver.contains(&room_version_id) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Room version not supported.", + )); + } + + let content = to_raw_value(&RoomMemberEventContent { + avatar_url: None, + blurhash: None, + displayname: None, + is_direct: None, + membership: MembershipState::Join, + third_party_invite: None, + reason: None, + join_authorized_via_users_server, + }) + .expect("member event is valid value"); + + let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &body.user_id, + &body.room_id, + &state_lock, + )?; + + drop(state_lock); + + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 + | RoomVersionId::V11 => { + pdu_json.remove("event_id"); + }, + _ => { + warn!("Unexpected or unsupported room version {room_version_id}"); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + }; + + Ok(prepare_join_event::v1::Response { + room_version: Some(room_version_id), + event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), + }) +} + +async fn create_join_event( + sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, +) -> Result { + if !services().rooms.metadata.exists(room_id)? { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, room_id)?; + + // We need to return the state prior to joining, let's keep a reference to that + // here + let shortstatehash = services() + .rooms + .state + .get_room_shortstatehash(room_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Pdu state not found."))?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + // let mut auth_cache = EventMap::new(); + + // We do not add the event_id field to the pdu here because of signature and + // hashes checks + let room_version_id = services().rooms.state.get_room_version(room_id)?; + + let Ok((event_id, mut value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; + + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut value, + &room_version_id, + ) + .map_err(|e| { + warn!("Failed to sign event: {e}"); + Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event.") + })?; + + let origin: OwnedServerName = serde_json::from_value( + serde_json::to_value( + value + .get("origin") + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event needs an origin field."))?, + ) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + services() + .rooms + .event_handler + .fetch_required_signing_keys([&value], &pub_key_map) + .await?; + + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value.clone(), true, &pub_key_map) + .await? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + drop(mutex_lock); + + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .event_ids_iter(room_id, state_ids.values().cloned().collect()) + .await?; + + services().sending.send_pdu_room(room_id, &pdu_id)?; + + Ok(create_join_event::v1::RoomState { + auth_chain: auth_chain_ids + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + state: state_ids + .iter() + .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + // Event field is required if the room version supports restricted join rules. + event: Some( + to_raw_value(&CanonicalJsonValue::Object(value.clone())) + .expect("To raw json should not fail since only change was adding signature"), + ), + }) +} + +/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v1_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if services() + .globals + .config + .forbidden_remote_server_names + .contains(sender_servername) + { + warn!( + "Server {sender_servername} tried joining room ID {} who has a server name that is globally forbidden. \ + Rejecting.", + &body.room_id, + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + + if let Some(server) = body.room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + { + warn!( + "Server {sender_servername} tried joining room ID {} which has a server name that is globally \ + forbidden. Rejecting.", + &body.room_id, + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + } + + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v1::Response { + room_state, + }) +} + +/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v2_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if services() + .globals + .config + .forbidden_remote_server_names + .contains(sender_servername) + { + warn!( + "Server {sender_servername} tried joining room ID {} who has a server name that is globally forbidden. \ + Rejecting.", + &body.room_id, + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + + if let Some(server) = body.room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + } + + let create_join_event::v1::RoomState { + auth_chain, + state, + event, + } = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + let room_state = create_join_event::v2::RoomState { + members_omitted: false, + auth_chain, + state, + event, + servers_in_room: None, + }; + + Ok(create_join_event::v2::Response { + room_state, + }) +} + +/// # `PUT /_matrix/federation/v1/make_leave/{roomId}/{eventId}` +/// +/// Creates a leave template. +pub async fn create_leave_event_template_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let room_version_id = services().rooms.state.get_room_version(&body.room_id)?; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let content = to_raw_value(&RoomMemberEventContent { + avatar_url: None, + blurhash: None, + displayname: None, + is_direct: None, + membership: MembershipState::Leave, + third_party_invite: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("member event is valid value"); + + let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &body.user_id, + &body.room_id, + &state_lock, + )?; + + drop(state_lock); + + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 + | RoomVersionId::V11 => { + pdu_json.remove("event_id"); + }, + _ => { + warn!("Unexpected or unsupported room version {room_version_id}"); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + }; + + Ok(prepare_leave_event::v1::Response { + room_version: Some(room_version_id), + event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), + }) +} + +async fn create_leave_event(sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue) -> Result<()> { + if !services().rooms.metadata.exists(room_id)? { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server.")); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, room_id)?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + + // We do not add the event_id field to the pdu here because of signature and + // hashes checks + let room_version_id = services().rooms.state.get_room_version(room_id)?; + let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; + + let origin: OwnedServerName = serde_json::from_value( + serde_json::to_value( + value + .get("origin") + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event needs an origin field."))?, + ) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + + drop(mutex_lock); + + let servers = services() + .rooms + .state_cache + .room_servers(room_id) + .filter_map(Result::ok) + .filter(|server| &**server != services().globals.server_name()); + + services().sending.send_pdu_servers(servers, &pdu_id)?; + + Ok(()) +} + +/// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` +/// +/// Submits a signed leave event. +pub async fn create_leave_event_v1_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + create_leave_event(sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_leave_event::v1::Response::new()) +} + +/// # `PUT /_matrix/federation/v2/send_leave/{roomId}/{eventId}` +/// +/// Submits a signed leave event. +pub async fn create_leave_event_v2_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + create_leave_event(sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_leave_event::v2::Response::new()) +} + +/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` +/// +/// Invites a remote user to a room. +pub async fn create_invite_route(body: Ruma) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + if !services() + .globals + .supported_room_versions() + .contains(&body.room_version) + { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: body.room_version.clone(), + }, + "Server does not support this room version.", + )); + } + + if let Some(server) = body.room_id.server_name() { + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&server.to_owned()) + { + warn!( + "Received federated/remote invite from server {sender_servername} for room ID {} which has a banned \ + server name. Rejecting.", + body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + } + + if services() + .globals + .config + .forbidden_remote_server_names + .contains(&sender_servername.to_owned()) + { + warn!( + "Received federated/remote invite from banned server {sender_servername} for room ID {}. Rejecting.", + body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Server is banned on this homeserver.", + )); + } + + if let Some(via) = &body.via { + if via.is_empty() { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "via field must not be empty.")); + } + } + + let mut signed_event = utils::to_canonical_object(&body.event).map_err(|e| { + error!("Failed to convert invite event to canonical JSON: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid.") + })?; + + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut signed_event, + &body.room_version, + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&signed_event, &body.room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + signed_event.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.to_string())); + + let sender: OwnedUserId = serde_json::from_value( + signed_event + .get("sender") + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event had no sender field."))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + + let invited_user: Box<_> = serde_json::from_value( + signed_event + .get("state_key") + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Event had no state_key field."))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; + + if services().rooms.metadata.is_banned(&body.room_id)? && !services().users.is_admin(&invited_user)? { + info!( + "Received remote invite from server {} for room {} and for user {invited_user}, but room is banned by us.", + &sender_servername, &body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This room is banned on this homeserver.", + )); + } + + if services().globals.block_non_admin_invites() && !services().users.is_admin(&invited_user)? { + info!( + "Received remote invite from server {} for room {} and for user {invited_user} who is not an admin, but \ + \"block_non_admin_invites\" is enabled, rejecting.", + &sender_servername, &body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "This server does not allow room invites.", + )); + } + + let mut invite_state = body.invite_room_state.clone(); + + let mut event: JsonObject = serde_json::from_str(body.event.get()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + + event.insert("event_id".to_owned(), "$placeholder".into()); + + let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })?; + + invite_state.push(pdu.to_stripped_state_event()); + + // If we are active in the room, the remote server will notify us about the join + // via /send + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), &body.room_id)? + { + services().rooms.state_cache.update_membership( + &body.room_id, + &invited_user, + RoomMemberEventContent::new(MembershipState::Invite), + &sender, + Some(invite_state), + body.via.clone(), + true, + )?; + } + + Ok(create_invite::v2::Response { + event: PduEvent::convert_to_outgoing_federation_event(signed_event), + }) +} + +/// # `GET /_matrix/federation/v1/user/devices/{userId}` +/// +/// Gets information on all devices of the user. +pub async fn get_devices_route(body: Ruma) -> Result { + if body.user_id.server_name() != services().globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to access user from other server.", + )); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + Ok(get_devices::v1::Response { + user_id: body.user_id.clone(), + stream_id: services() + .users + .get_devicelist_version(&body.user_id)? + .unwrap_or(0) + .try_into() + .expect("version will not grow that large"), + devices: services() + .users + .all_devices_metadata(&body.user_id) + .filter_map(Result::ok) + .filter_map(|metadata| { + let device_id_string = metadata.device_id.as_str().to_owned(); + let device_display_name = if services().globals.allow_device_name_federation() { + metadata.display_name + } else { + Some(device_id_string) + }; + Some(UserDevice { + keys: services() + .users + .get_device_keys(&body.user_id, &metadata.device_id) + .ok()??, + device_id: metadata.device_id, + device_display_name, + }) + }) + .collect(), + master_key: services() + .users + .get_master_key(None, &body.user_id, &|u| u.server_name() == sender_servername)?, + self_signing_key: services() + .users + .get_self_signing_key(None, &body.user_id, &|u| u.server_name() == sender_servername)?, + }) +} + +/// # `GET /_matrix/federation/v1/query/directory` +/// +/// Resolve a room alias to a room id. +pub async fn get_room_information_route( + body: Ruma, +) -> Result { + let room_id = services() + .rooms + .alias + .resolve_local_alias(&body.room_alias)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Room alias not found."))?; + + let mut servers: Vec = services() + .rooms + .state_cache + .room_servers(&room_id) + .filter_map(Result::ok) + .collect(); + + servers.sort_unstable(); + servers.dedup(); + + servers.shuffle(&mut rand::thread_rng()); + + // insert our server as the very first choice if in list + if let Some(server_index) = servers + .iter() + .position(|server| server == services().globals.server_name()) + { + servers.remove(server_index); + servers.insert(0, services().globals.server_name().to_owned()); + } + + Ok(get_room_information::v1::Response { + room_id, + servers, + }) +} + +/// # `GET /_matrix/federation/v1/query/profile` +/// +/// +/// Gets information on a profile. +pub async fn get_profile_information_route( + body: Ruma, +) -> Result { + if !services() + .globals + .allow_profile_lookup_federation_requests() + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Profile lookup over federation is not allowed on this homeserver.", + )); + } + + if body.user_id.server_name() != services().globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "User does not belong to this server", + )); + } + + let mut displayname = None; + let mut avatar_url = None; + let mut blurhash = None; + + match &body.field { + Some(ProfileField::DisplayName) => { + displayname = services().users.displayname(&body.user_id)?; + }, + Some(ProfileField::AvatarUrl) => { + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)?; + }, + // TODO: what to do with custom + Some(_) => {}, + None => { + displayname = services().users.displayname(&body.user_id)?; + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)?; + }, + } + + Ok(get_profile_information::v1::Response { + displayname, + avatar_url, + blurhash, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/query` +/// +/// Gets devices and identity keys for the given users. +pub async fn get_keys_route(body: Ruma) -> Result { + if body + .device_keys + .iter() + .any(|(u, _)| u.server_name() != services().globals.server_name()) + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "User does not belong to this server.", + )); + } + + let result = get_keys_helper( + None, + &body.device_keys, + |u| Some(u.server_name()) == body.sender_servername.as_deref(), + services().globals.allow_device_name_federation(), + ) + .await?; + + Ok(get_keys::v1::Response { + device_keys: result.device_keys, + master_keys: result.master_keys, + self_signing_keys: result.self_signing_keys, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/claim` +/// +/// Claims one-time keys. +pub async fn claim_keys_route(body: Ruma) -> Result { + if body + .one_time_keys + .iter() + .any(|(u, _)| u.server_name() != services().globals.server_name()) + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to access user from other server.", + )); + } + + let result = claim_keys_helper(&body.one_time_keys).await?; + + Ok(claim_keys::v1::Response { + one_time_keys: result.one_time_keys, + }) +} + +/// # `GET /.well-known/matrix/server` +/// +/// Returns the .well-known URL if it is configured, otherwise returns 404. +pub async fn well_known_server(_body: Ruma) -> Result { + Ok(discover_homeserver::Response { + server: match services().globals.well_known_server() { + Some(server_name) => server_name.to_owned(), + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), + }, + }) +} + +/// # `GET /_matrix/federation/v1/hierarchy/{roomId}` +/// +/// Gets the space tree in a depth-first manner to locate child rooms of a given +/// space. +pub async fn get_hierarchy_route(body: Ruma) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if services().rooms.metadata.exists(&body.room_id)? { + services() + .rooms + .spaces + .get_federation_hierarchy(&body.room_id, sender_servername, body.suggested_only) + .await + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Room does not exist.")) + } +} diff --git a/src/clap.rs b/src/clap.rs new file mode 100644 index 00000000..9bfe475d --- /dev/null +++ b/src/clap.rs @@ -0,0 +1,33 @@ +//! Integration with `clap` + +use std::path::PathBuf; + +use clap::Parser; + +/// Returns the current version of the crate with extra info if supplied +/// +/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to +/// include it in parenthesis after the SemVer version. A common value are git +/// commit hashes. +#[allow(clippy::doc_markdown)] +fn version() -> String { + let cargo_pkg_version = env!("CARGO_PKG_VERSION"); + + match option_env!("CONDUIT_VERSION_EXTRA") { + Some(x) => format!("{} ({})", cargo_pkg_version, x), + None => cargo_pkg_version.to_owned(), + } +} + +/// Commandline arguments +#[derive(Parser, Debug)] +#[clap(version = version(), about, long_about = None)] +pub struct Args { + #[arg(short, long)] + /// Optional argument to the path of a conduwuit config TOML file + pub config: Option, +} + +/// Parse commandline arguments into structured data +#[must_use] +pub fn parse() -> Args { Args::parse() } diff --git a/src/config/check.rs b/src/config/check.rs new file mode 100644 index 00000000..f221cbd5 --- /dev/null +++ b/src/config/check.rs @@ -0,0 +1,166 @@ +#[cfg(unix)] +use std::path::Path; // not unix specific, just only for UNIX sockets stuff and *nix container checks + +use tracing::{debug, error, info, warn}; + +use crate::{utils::error::Error, Config}; + +pub fn check(config: &Config) -> Result<(), Error> { + config.warn_deprecated(); + config.warn_unknown_key(); + + if cfg!(feature = "hardened_malloc") && cfg!(feature = "jemalloc") { + warn!( + "hardened_malloc and jemalloc were built together, this causes neither to be used. Conduwuit will still \ + function, but consider rebuilding and pick one as this is now no-op." + ); + } + + if config.unix_socket_path.is_some() && !cfg!(unix) { + return Err(Error::bad_config( + "UNIX socket support is only available on *nix platforms. Please remove \"unix_socket_path\" from your \ + config.", + )); + } + + if config.address.is_loopback() && cfg!(unix) { + debug!( + "Found loopback listening address {}, running checks if we're in a container.", + config.address + ); + + #[cfg(unix)] + if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists() + /* Host */ + { + error!( + "You are detected using OpenVZ with a loopback/localhost listening address of {}. If you are using \ + OpenVZ for containers and you use NAT-based networking to communicate with the host and guest, this \ + will NOT work. Please change this to \"0.0.0.0\". If this is expected, you can ignore.", + config.address + ); + } + + #[cfg(unix)] + if Path::new("/.dockerenv").exists() { + error!( + "You are detected using Docker with a loopback/localhost listening address of {}. If you are using a \ + reverse proxy on the host and require communication to conduwuit in the Docker container via \ + NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, \ + you can ignore.", + config.address + ); + } + + #[cfg(unix)] + if Path::new("/run/.containerenv").exists() { + error!( + "You are detected using Podman with a loopback/localhost listening address of {}. If you are using a \ + reverse proxy on the host and require communication to conduwuit in the Podman container via \ + NAT-based networking, this will NOT work. Please change this to \"0.0.0.0\". If this is expected, \ + you can ignore.", + config.address + ); + } + } + + // rocksdb does not allow max_log_files to be 0 + if config.rocksdb_max_log_files == 0 && cfg!(feature = "rocksdb") { + return Err(Error::bad_config( + "When using RocksDB, rocksdb_max_log_files cannot be 0. Please set a value at least 1.", + )); + } + + // yeah, unless the user built a debug build hopefully for local testing only + if config.server_name == "your.server.name" && !cfg!(debug_assertions) { + return Err(Error::bad_config( + "You must specify a valid server name for production usage of conduwuit.", + )); + } + + if cfg!(debug_assertions) { + info!("Note: conduwuit was built without optimisations (i.e. debug build)"); + } + + // check if the user specified a registration token as `""` + if config.registration_token == Some(String::new()) { + return Err(Error::bad_config("Registration token was specified but is empty (\"\")")); + } + + if config.max_request_size < 16384 { + return Err(Error::bad_config("Max request size is less than 16KB. Please increase it.")); + } + + // check if user specified valid IP CIDR ranges on startup + for cidr in &config.ip_range_denylist { + if let Err(e) = ipaddress::IPAddress::parse(cidr) { + error!("Error parsing specified IP CIDR range from string: {e}"); + return Err(Error::bad_config("Error parsing specified IP CIDR ranges from strings")); + } + } + + if config.allow_registration + && !config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse + && config.registration_token.is_none() + { + return Err(Error::bad_config( + "!! You have `allow_registration` enabled without a token configured in your config which means you are \ + allowing ANYONE to register on your conduwuit instance without any 2nd-step (e.g. registration token).\n +If this is not the intended behaviour, please set a registration token with the `registration_token` config option.\n +For security and safety reasons, conduwuit will shut down. If you are extra sure this is the desired behaviour you \ + want, please set the following config option to true: +`yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`", + )); + } + + if config.allow_registration + && config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse + && config.registration_token.is_none() + { + warn!( + "Open registration is enabled via setting \ + `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` and `allow_registration` to \ + true without a registration token configured. You are expected to be aware of the risks now.\n + If this is not the desired behaviour, please set a registration token." + ); + } + + if config.allow_outgoing_presence && !config.allow_local_presence { + return Err(Error::bad_config( + "Outgoing presence requires allowing local presence. Please enable \"allow_local_presence\".", + )); + } + + if config + .url_preview_domain_contains_allowlist + .contains(&"*".to_owned()) + { + warn!( + "All URLs are allowed for URL previews via setting \"url_preview_domain_contains_allowlist\" to \"*\". \ + This opens up significant attack surface to your server. You are expected to be aware of the risks by \ + doing this." + ); + } + if config + .url_preview_domain_explicit_allowlist + .contains(&"*".to_owned()) + { + warn!( + "All URLs are allowed for URL previews via setting \"url_preview_domain_explicit_allowlist\" to \"*\". \ + This opens up significant attack surface to your server. You are expected to be aware of the risks by \ + doing this." + ); + } + if config + .url_preview_url_contains_allowlist + .contains(&"*".to_owned()) + { + warn!( + "All URLs are allowed for URL previews via setting \"url_preview_url_contains_allowlist\" to \"*\". This \ + opens up significant attack surface to your server. You are expected to be aware of the risks by doing \ + this." + ); + } + + Ok(()) +} diff --git a/src/config/mod.rs b/src/config/mod.rs new file mode 100644 index 00000000..f6ec335a --- /dev/null +++ b/src/config/mod.rs @@ -0,0 +1,987 @@ +use std::{ + collections::BTreeMap, + fmt::{self, Write as _}, + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::PathBuf, +}; + +use either::{ + Either, + Either::{Left, Right}, +}; +use figment::{ + providers::{Env, Format, Toml}, + Figment, +}; +use itertools::Itertools; +use regex::RegexSet; +use ruma::{ + api::client::discovery::discover_support::ContactRole, OwnedRoomId, OwnedServerName, OwnedUserId, RoomVersionId, +}; +use serde::{de::IgnoredAny, Deserialize}; +use tracing::{debug, error, warn}; +use url::Url; + +use self::proxy::ProxyConfig; +use crate::utils::error::Error; + +mod check; +mod proxy; + +#[derive(Deserialize, Clone, Debug)] +#[serde(transparent)] +pub struct ListeningPort { + #[serde(with = "either::serde_untagged")] + pub ports: Either>, +} + +/// all the config options for conduwuit +#[derive(Clone, Debug, Deserialize)] +#[allow(clippy::struct_excessive_bools)] +pub struct Config { + /// [`IpAddr`] conduwuit will listen on (can be IPv4 or IPv6) + #[serde(default = "default_address")] + pub address: IpAddr, + /// default TCP port(s) conduwuit will listen on + #[serde(default = "default_port")] + pub port: ListeningPort, + pub tls: Option, + pub unix_socket_path: Option, + #[serde(default = "default_unix_socket_perms")] + pub unix_socket_perms: u32, + pub server_name: OwnedServerName, + #[serde(default = "default_database_backend")] + pub database_backend: String, + pub database_path: PathBuf, + pub database_backup_path: Option, + #[serde(default = "default_database_backups_to_keep")] + pub database_backups_to_keep: i16, + #[serde(default = "default_db_cache_capacity_mb")] + pub db_cache_capacity_mb: f64, + #[serde(default = "default_new_user_displayname_suffix")] + pub new_user_displayname_suffix: String, + #[serde(default)] + pub allow_check_for_updates: bool, + + #[serde(default = "default_pdu_cache_capacity")] + pub pdu_cache_capacity: u32, + #[serde(default = "default_conduit_cache_capacity_modifier")] + pub conduit_cache_capacity_modifier: f64, + #[serde(default = "default_auth_chain_cache_capacity")] + pub auth_chain_cache_capacity: u32, + #[serde(default = "default_shorteventid_cache_capacity")] + pub shorteventid_cache_capacity: u32, + #[serde(default = "default_eventidshort_cache_capacity")] + pub eventidshort_cache_capacity: u32, + #[serde(default = "default_shortstatekey_cache_capacity")] + pub shortstatekey_cache_capacity: u32, + #[serde(default = "default_statekeyshort_cache_capacity")] + pub statekeyshort_cache_capacity: u32, + #[serde(default = "default_server_visibility_cache_capacity")] + pub server_visibility_cache_capacity: u32, + #[serde(default = "default_user_visibility_cache_capacity")] + pub user_visibility_cache_capacity: u32, + #[serde(default = "default_stateinfo_cache_capacity")] + pub stateinfo_cache_capacity: u32, + #[serde(default = "default_roomid_spacehierarchy_cache_capacity")] + pub roomid_spacehierarchy_cache_capacity: u32, + + #[serde(default = "default_cleanup_second_interval")] + pub cleanup_second_interval: u32, + #[serde(default = "default_dns_cache_entries")] + pub dns_cache_entries: u32, + #[serde(default = "default_dns_min_ttl")] + pub dns_min_ttl: u64, + #[serde(default = "default_dns_min_ttl_nxdomain")] + pub dns_min_ttl_nxdomain: u64, + #[serde(default = "default_dns_attempts")] + pub dns_attempts: u16, + #[serde(default = "default_dns_timeout")] + pub dns_timeout: u64, + #[serde(default = "true_fn")] + pub dns_tcp_fallback: bool, + #[serde(default = "true_fn")] + pub query_all_nameservers: bool, + #[serde(default = "default_max_request_size")] + pub max_request_size: u32, + #[serde(default = "default_max_concurrent_requests")] + pub max_concurrent_requests: u16, + #[serde(default = "default_max_fetch_prev_events")] + pub max_fetch_prev_events: u16, + #[serde(default = "default_request_conn_timeout")] + pub request_conn_timeout: u64, + #[serde(default = "default_request_timeout")] + pub request_timeout: u64, + #[serde(default = "default_request_idle_per_host")] + pub request_idle_per_host: u16, + #[serde(default = "default_request_idle_timeout")] + pub request_idle_timeout: u64, + #[serde(default = "default_well_known_conn_timeout")] + pub well_known_conn_timeout: u64, + #[serde(default = "default_well_known_timeout")] + pub well_known_timeout: u64, + #[serde(default = "default_federation_timeout")] + pub federation_timeout: u64, + #[serde(default = "default_federation_idle_per_host")] + pub federation_idle_per_host: u16, + #[serde(default = "default_federation_idle_timeout")] + pub federation_idle_timeout: u64, + #[serde(default = "default_sender_timeout")] + pub sender_timeout: u64, + #[serde(default = "default_sender_idle_timeout")] + pub sender_idle_timeout: u64, + #[serde(default = "default_appservice_timeout")] + pub appservice_timeout: u64, + #[serde(default = "default_appservice_idle_timeout")] + pub appservice_idle_timeout: u64, + #[serde(default = "default_pusher_idle_timeout")] + pub pusher_idle_timeout: u64, + #[serde(default)] + pub allow_registration: bool, + #[serde(default)] + pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool, + pub registration_token: Option, + #[serde(default = "true_fn")] + pub allow_encryption: bool, + #[serde(default = "true_fn")] + pub allow_federation: bool, + #[serde(default)] + pub allow_public_room_directory_over_federation: bool, + #[serde(default)] + pub allow_public_room_directory_without_auth: bool, + #[serde(default)] + pub lockdown_public_room_directory: bool, + #[serde(default)] + pub allow_device_name_federation: bool, + #[serde(default = "true_fn")] + pub allow_profile_lookup_federation_requests: bool, + #[serde(default = "true_fn")] + pub allow_room_creation: bool, + #[serde(default = "true_fn")] + pub allow_unstable_room_versions: bool, + #[serde(default = "default_default_room_version")] + pub default_room_version: RoomVersionId, + #[serde(default)] + pub well_known: WellKnownConfig, + #[serde(default)] + pub allow_jaeger: bool, + #[serde(default)] + pub tracing_flame: bool, + #[serde(default)] + pub proxy: ProxyConfig, + pub jwt_secret: Option, + #[serde(default = "default_trusted_servers")] + pub trusted_servers: Vec, + #[serde(default = "true_fn")] + pub query_trusted_key_servers_first: bool, + #[serde(default = "default_log")] + pub log: String, + #[serde(default)] + pub turn_username: String, + #[serde(default)] + pub turn_password: String, + #[serde(default = "Vec::new")] + pub turn_uris: Vec, + #[serde(default)] + pub turn_secret: String, + #[serde(default = "default_turn_ttl")] + pub turn_ttl: u64, + + #[serde(default = "Vec::new")] + pub auto_join_rooms: Vec, + + #[serde(default = "default_rocksdb_log_level")] + pub rocksdb_log_level: String, + #[serde(default)] + pub rocksdb_log_stderr: bool, + #[serde(default = "default_rocksdb_max_log_file_size")] + pub rocksdb_max_log_file_size: usize, + #[serde(default = "default_rocksdb_log_time_to_roll")] + pub rocksdb_log_time_to_roll: usize, + #[serde(default)] + pub rocksdb_optimize_for_spinning_disks: bool, + #[serde(default = "default_rocksdb_parallelism_threads")] + pub rocksdb_parallelism_threads: usize, + #[serde(default = "default_rocksdb_max_log_files")] + pub rocksdb_max_log_files: usize, + #[serde(default = "default_rocksdb_compression_algo")] + pub rocksdb_compression_algo: String, + #[serde(default = "default_rocksdb_compression_level")] + pub rocksdb_compression_level: i32, + #[serde(default = "default_rocksdb_bottommost_compression_level")] + pub rocksdb_bottommost_compression_level: i32, + #[serde(default)] + pub rocksdb_bottommost_compression: bool, + #[serde(default = "default_rocksdb_recovery_mode")] + pub rocksdb_recovery_mode: u8, + #[serde(default)] + pub rocksdb_repair: bool, + #[serde(default)] + pub rocksdb_read_only: bool, + #[serde(default)] + pub rocksdb_periodic_cleanup: bool, + + pub emergency_password: Option, + + #[serde(default = "default_notification_push_path")] + pub notification_push_path: String, + + #[serde(default = "true_fn")] + pub allow_local_presence: bool, + #[serde(default = "true_fn")] + pub allow_incoming_presence: bool, + #[serde(default = "true_fn")] + pub allow_outgoing_presence: bool, + #[serde(default = "default_presence_idle_timeout_s")] + pub presence_idle_timeout_s: u64, + #[serde(default = "default_presence_offline_timeout_s")] + pub presence_offline_timeout_s: u64, + #[serde(default = "true_fn")] + pub presence_timeout_remote_users: bool, + + #[serde(default = "true_fn")] + pub allow_incoming_read_receipts: bool, + #[serde(default = "true_fn")] + pub allow_outgoing_read_receipts: bool, + + #[serde(default = "true_fn")] + pub allow_outgoing_typing: bool, + #[serde(default = "true_fn")] + pub allow_incoming_typing: bool, + #[serde(default = "default_typing_federation_timeout_s")] + pub typing_federation_timeout_s: u64, + #[serde(default = "default_typing_client_timeout_min_s")] + pub typing_client_timeout_min_s: u64, + #[serde(default = "default_typing_client_timeout_max_s")] + pub typing_client_timeout_max_s: u64, + + #[serde(default)] + pub zstd_compression: bool, + #[serde(default)] + pub gzip_compression: bool, + #[serde(default)] + pub brotli_compression: bool, + + #[serde(default)] + pub allow_guest_registration: bool, + #[serde(default)] + pub log_guest_registrations: bool, + #[serde(default)] + pub allow_guests_auto_join_rooms: bool, + + #[serde(default = "Vec::new")] + pub prevent_media_downloads_from: Vec, + #[serde(default = "Vec::new")] + pub forbidden_remote_server_names: Vec, + #[serde(default = "Vec::new")] + pub forbidden_remote_room_directory_server_names: Vec, + + #[serde(default = "default_ip_range_denylist")] + pub ip_range_denylist: Vec, + + #[serde(default = "Vec::new")] + pub url_preview_domain_contains_allowlist: Vec, + #[serde(default = "Vec::new")] + pub url_preview_domain_explicit_allowlist: Vec, + #[serde(default = "Vec::new")] + pub url_preview_domain_explicit_denylist: Vec, + #[serde(default = "Vec::new")] + pub url_preview_url_contains_allowlist: Vec, + #[serde(default = "default_url_preview_max_spider_size")] + pub url_preview_max_spider_size: usize, + #[serde(default)] + pub url_preview_check_root_domain: bool, + + #[serde(default = "RegexSet::empty")] + #[serde(with = "serde_regex")] + pub forbidden_alias_names: RegexSet, + + #[serde(default = "RegexSet::empty")] + #[serde(with = "serde_regex")] + pub forbidden_usernames: RegexSet, + + #[serde(default = "true_fn")] + pub startup_netburst: bool, + #[serde(default = "default_startup_netburst_keep")] + pub startup_netburst_keep: i64, + + #[serde(default)] + pub block_non_admin_invites: bool, + + #[serde(default)] + pub sentry: bool, + #[serde(default)] + pub sentry_send_server_name: bool, + #[serde(default = "default_sentry_traces_sample_rate")] + pub sentry_traces_sample_rate: f32, + + #[serde(flatten)] + #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime + pub catchall: BTreeMap, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct TlsConfig { + pub certs: String, + pub key: String, + #[serde(default)] + /// Whether to listen and allow for HTTP and HTTPS connections (insecure!) + /// Only works / does something if the `axum_dual_protocol` feature flag was + /// built + pub dual_protocol: bool, +} + +#[derive(Clone, Debug, Deserialize, Default)] +pub struct WellKnownConfig { + pub client: Option, + pub server: Option, + pub support_page: Option, + pub support_role: Option, + pub support_email: Option, + pub support_mxid: Option, +} + +const DEPRECATED_KEYS: &[&str] = &[ + "cache_capacity", + "well_known_client", + "well_known_server", + "well_known_support_page", + "well_known_support_role", + "well_known_support_email", + "well_known_support_mxid", +]; + +impl Config { + /// Initialize config + pub fn new(path: Option) -> Result { + let raw_config = if let Some(config_file_env) = Env::var("CONDUIT_CONFIG") { + Figment::new() + .merge(Toml::file(config_file_env).nested()) + .merge(Env::prefixed("CONDUIT_").global()) + } else if let Some(config_file_arg) = path { + Figment::new() + .merge(Toml::file(config_file_arg).nested()) + .merge(Env::prefixed("CONDUIT_").global()) + } else { + Figment::new().merge(Env::prefixed("CONDUIT_").global()) + }; + + let config = match raw_config.extract::() { + Err(e) => return Err(Error::BadConfig(format!("{e}"))), + Ok(config) => config, + }; + + check::check(&config)?; + + // don't start if we're listening on both UNIX sockets and TCP at same time + if config.is_dual_listening(&raw_config) { + return Err(Error::bad_config("dual listening on UNIX and TCP sockets not allowed.")); + }; + + Ok(config) + } + + /// Iterates over all the keys in the config file and warns if there is a + /// deprecated key specified + pub fn warn_deprecated(&self) { + debug!("Checking for deprecated config keys"); + let mut was_deprecated = false; + for key in self + .catchall + .keys() + .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) + { + warn!("Config parameter \"{}\" is deprecated, ignoring.", key); + was_deprecated = true; + } + + if was_deprecated { + warn!( + "Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check \ + your configuration if any new configuration parameters should be adjusted" + ); + } + } + + /// iterates over all the catchall keys (unknown config options) and warns + /// if there are any. + pub fn warn_unknown_key(&self) { + debug!("Checking for unknown config keys"); + for key in self + .catchall + .keys() + .filter(|key| "config".to_owned().ne(key.to_owned()) /* "config" is expected */) + { + warn!("Config parameter \"{}\" is unknown to conduwuit, ignoring.", key); + } + } + + /// Checks the presence of the `address` and `unix_socket_path` keys in the + /// raw_config, exiting the process if both keys were detected. + fn is_dual_listening(&self, raw_config: &Figment) -> bool { + let check_address = raw_config.find_value("address"); + let check_unix_socket = raw_config.find_value("unix_socket_path"); + + // are the check_address and check_unix_socket keys both Ok (specified) at the + // same time? + if check_address.is_ok() && check_unix_socket.is_ok() { + error!("TOML keys \"address\" and \"unix_socket_path\" were both defined. Please specify only one option."); + return true; + } + + false + } + + #[must_use] + pub fn get_bind_addrs(&self) -> Vec { + match &self.port.ports { + Left(port) => { + // Left is only 1 value, so make a vec with 1 value only + let port_vec = [port]; + + port_vec + .iter() + .copied() + .map(|port| SocketAddr::from((self.address, *port))) + .collect::>() + }, + Right(ports) => ports + .iter() + .copied() + .map(|port| SocketAddr::from((self.address, port))) + .collect::>(), + } + } +} + +impl fmt::Display for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Prepare a list of config values to show + let lines = [ + ("Server name", self.server_name.host()), + ("Database backend", &self.database_backend), + ("Database path", &self.database_path.to_string_lossy()), + ( + "Database backup path", + match &self.database_backup_path { + Some(path) => path.to_str().unwrap(), + None => "", + }, + ), + ("Database backups to keep", &self.database_backups_to_keep.to_string()), + ("Database cache capacity (MB)", &self.db_cache_capacity_mb.to_string()), + ("Cache capacity modifier", &self.conduit_cache_capacity_modifier.to_string()), + ("PDU cache capacity", &self.pdu_cache_capacity.to_string()), + ("Auth chain cache capacity", &self.auth_chain_cache_capacity.to_string()), + ("Short eventid cache capacity", &self.shorteventid_cache_capacity.to_string()), + ("Eventid short cache capacity", &self.eventidshort_cache_capacity.to_string()), + ("Short statekey cache capacity", &self.shortstatekey_cache_capacity.to_string()), + ("Statekey short cache capacity", &self.statekeyshort_cache_capacity.to_string()), + ( + "Server visibility cache capacity", + &self.server_visibility_cache_capacity.to_string(), + ), + ( + "User visibility cache capacity", + &self.user_visibility_cache_capacity.to_string(), + ), + ("Stateinfo cache capacity", &self.stateinfo_cache_capacity.to_string()), + ( + "Roomid space hierarchy cache capacity", + &self.roomid_spacehierarchy_cache_capacity.to_string(), + ), + ("Cleanup interval in seconds", &self.cleanup_second_interval.to_string()), + ("DNS cache entry limit", &self.dns_cache_entries.to_string()), + ("DNS minimum ttl", &self.dns_min_ttl.to_string()), + ("DNS minimum nxdomain ttl", &self.dns_min_ttl_nxdomain.to_string()), + ("DNS attempts", &self.dns_attempts.to_string()), + ("DNS timeout", &self.dns_timeout.to_string()), + ("DNS fallback to TCP", &self.dns_tcp_fallback.to_string()), + ("Query all nameservers", &self.query_all_nameservers.to_string()), + ("Maximum request size (bytes)", &self.max_request_size.to_string()), + ("Maximum concurrent requests", &self.max_concurrent_requests.to_string()), + ("Request connect timeout", &self.request_conn_timeout.to_string()), + ("Request timeout", &self.request_timeout.to_string()), + ("Idle connections per host", &self.request_idle_per_host.to_string()), + ("Request pool idle timeout", &self.request_idle_timeout.to_string()), + ("Well_known connect timeout", &self.well_known_conn_timeout.to_string()), + ("Well_known timeout", &self.well_known_timeout.to_string()), + ("Federation timeout", &self.federation_timeout.to_string()), + ("Federation pool idle per host", &self.federation_idle_per_host.to_string()), + ("Federation pool idle timeout", &self.federation_idle_timeout.to_string()), + ("Sender timeout", &self.sender_timeout.to_string()), + ("Sender pool idle timeout", &self.sender_idle_timeout.to_string()), + ("Appservice timeout", &self.appservice_timeout.to_string()), + ("Appservice pool idle timeout", &self.appservice_idle_timeout.to_string()), + ("Pusher pool idle timeout", &self.pusher_idle_timeout.to_string()), + ("Allow registration", &self.allow_registration.to_string()), + ( + "Registration token", + match self.registration_token { + Some(_) => "set", + None => "not set (open registration!)", + }, + ), + ( + "Allow guest registration (inherently false if allow registration is false)", + &self.allow_guest_registration.to_string(), + ), + ( + "Log guest registrations in admin room", + &self.log_guest_registrations.to_string(), + ), + ( + "Allow guests to auto join rooms", + &self.allow_guests_auto_join_rooms.to_string(), + ), + ("New user display name suffix", &self.new_user_displayname_suffix), + ("Allow encryption", &self.allow_encryption.to_string()), + ("Allow federation", &self.allow_federation.to_string()), + ( + "Allow incoming federated presence requests (updates)", + &self.allow_incoming_presence.to_string(), + ), + ( + "Allow outgoing federated presence requests (updates)", + &self.allow_outgoing_presence.to_string(), + ), + ( + "Allow local presence requests (updates)", + &self.allow_local_presence.to_string(), + ), + ( + "Allow incoming remote read receipts", + &self.allow_incoming_read_receipts.to_string(), + ), + ( + "Allow outgoing remote read receipts", + &self.allow_outgoing_read_receipts.to_string(), + ), + ( + "Block non-admin room invites (local and remote, admins can still send and receive invites)", + &self.block_non_admin_invites.to_string(), + ), + ("Allow outgoing federated typing", &self.allow_outgoing_typing.to_string()), + ("Allow incoming federated typing", &self.allow_incoming_typing.to_string()), + ( + "Incoming federated typing timeout", + &self.typing_federation_timeout_s.to_string(), + ), + ("Client typing timeout minimum", &self.typing_client_timeout_min_s.to_string()), + ("Client typing timeout maxmimum", &self.typing_client_timeout_max_s.to_string()), + ("Allow device name federation", &self.allow_device_name_federation.to_string()), + ( + "Allow incoming profile lookup federation requests", + &self.allow_profile_lookup_federation_requests.to_string(), + ), + ("Notification push path", &self.notification_push_path), + ("Allow room creation", &self.allow_room_creation.to_string()), + ( + "Allow public room directory over federation", + &self.allow_public_room_directory_over_federation.to_string(), + ), + ( + "Allow public room directory without authentication", + &self.allow_public_room_directory_without_auth.to_string(), + ), + ( + "Lockdown public room directory (only allow admins to publish)", + &self.lockdown_public_room_directory.to_string(), + ), + ( + "JWT secret", + match self.jwt_secret { + Some(_) => "set", + None => "not set", + }, + ), + ("Trusted key servers", { + let mut lst = vec![]; + for server in &self.trusted_servers { + lst.push(server.host()); + } + &lst.join(", ") + }), + ( + "Query Trusted Key Servers First", + &self.query_trusted_key_servers_first.to_string(), + ), + ( + "TURN username", + if self.turn_username.is_empty() { + "not set" + } else { + &self.turn_username + }, + ), + ("TURN password", { + if self.turn_password.is_empty() { + "not set" + } else { + "set" + } + }), + ("TURN secret", { + if self.turn_secret.is_empty() { + "not set" + } else { + "set" + } + }), + ("Turn TTL", &self.turn_ttl.to_string()), + ("Turn URIs", { + let mut lst = vec![]; + for item in self.turn_uris.iter().cloned().enumerate() { + let (_, uri): (usize, String) = item; + lst.push(uri); + } + &lst.join(", ") + }), + ("Auto Join Rooms", { + let mut lst = vec![]; + for room in &self.auto_join_rooms { + lst.push(room); + } + &lst.into_iter().join(", ") + }), + #[cfg(feature = "zstd_compression")] + ("Zstd HTTP Compression", &self.zstd_compression.to_string()), + #[cfg(feature = "gzip_compression")] + ("Gzip HTTP Compression", &self.gzip_compression.to_string()), + #[cfg(feature = "brotli_compression")] + ("Brotli HTTP Compression", &self.brotli_compression.to_string()), + #[cfg(feature = "rocksdb")] + ("RocksDB database LOG level", &self.rocksdb_log_level), + #[cfg(feature = "rocksdb")] + ("RocksDB database LOG to stderr", &self.rocksdb_log_stderr.to_string()), + #[cfg(feature = "rocksdb")] + ("RocksDB database LOG time-to-roll", &self.rocksdb_log_time_to_roll.to_string()), + #[cfg(feature = "rocksdb")] + ("RocksDB Max LOG Files", &self.rocksdb_max_log_files.to_string()), + #[cfg(feature = "rocksdb")] + ( + "RocksDB database max LOG file size", + &self.rocksdb_max_log_file_size.to_string(), + ), + #[cfg(feature = "rocksdb")] + ( + "RocksDB database optimize for spinning disks", + &self.rocksdb_optimize_for_spinning_disks.to_string(), + ), + #[cfg(feature = "rocksdb")] + ("RocksDB Parallelism Threads", &self.rocksdb_parallelism_threads.to_string()), + #[cfg(feature = "rocksdb")] + ("RocksDB Compression Algorithm", &self.rocksdb_compression_algo), + #[cfg(feature = "rocksdb")] + ("RocksDB Compression Level", &self.rocksdb_compression_level.to_string()), + #[cfg(feature = "rocksdb")] + ( + "RocksDB Bottommost Compression Level", + &self.rocksdb_bottommost_compression_level.to_string(), + ), + #[cfg(feature = "rocksdb")] + ( + "RocksDB Bottommost Level Compression", + &self.rocksdb_bottommost_compression.to_string(), + ), + #[cfg(feature = "rocksdb")] + ("RocksDB Recovery Mode", &self.rocksdb_recovery_mode.to_string()), + ("RocksDB Repair Mode", &self.rocksdb_repair.to_string()), + ("RocksDB Read-only Mode", &self.rocksdb_read_only.to_string()), + ("RocksDB Periodic Cleanup", &self.rocksdb_periodic_cleanup.to_string()), + ("Prevent Media Downloads From", { + let mut lst = vec![]; + for domain in &self.prevent_media_downloads_from { + lst.push(domain.host()); + } + &lst.join(", ") + }), + ("Forbidden Remote Server Names (\"Global\" ACLs)", { + let mut lst = vec![]; + for domain in &self.forbidden_remote_server_names { + lst.push(domain.host()); + } + &lst.join(", ") + }), + ("Forbidden Remote Room Directory Server Names", { + let mut lst = vec![]; + for domain in &self.forbidden_remote_room_directory_server_names { + lst.push(domain.host()); + } + &lst.join(", ") + }), + ("Outbound Request IP Range Denylist", { + let mut lst = vec![]; + for item in self.ip_range_denylist.iter().cloned().enumerate() { + let (_, ip): (usize, String) = item; + lst.push(ip); + } + &lst.join(", ") + }), + ("Forbidden usernames", { + &self.forbidden_usernames.patterns().iter().join(", ") + }), + ("Forbidden room aliases", { + &self.forbidden_alias_names.patterns().iter().join(", ") + }), + ( + "URL preview domain contains allowlist", + &self.url_preview_domain_contains_allowlist.join(", "), + ), + ( + "URL preview domain explicit allowlist", + &self.url_preview_domain_explicit_allowlist.join(", "), + ), + ( + "URL preview domain explicit denylist", + &self.url_preview_domain_explicit_denylist.join(", "), + ), + ( + "URL preview URL contains allowlist", + &self.url_preview_url_contains_allowlist.join(", "), + ), + ("URL preview maximum spider size", &self.url_preview_max_spider_size.to_string()), + ("URL preview check root domain", &self.url_preview_check_root_domain.to_string()), + ( + "Allow check for updates / announcements check", + &self.allow_check_for_updates.to_string(), + ), + ("Enable netburst on startup", &self.startup_netburst.to_string()), + #[cfg(feature = "sentry_telemetry")] + ("Sentry.io reporting and tracing", &self.sentry.to_string()), + #[cfg(feature = "sentry_telemetry")] + ("Sentry.io send server_name in logs", &self.sentry_send_server_name.to_string()), + #[cfg(feature = "sentry_telemetry")] + ("Sentry.io tracing sample rate", &self.sentry_traces_sample_rate.to_string()), + ( + "Well-known server name", + &if let Some(server) = &self.well_known.server { + server.to_string() + } else { + String::new() + }, + ), + ( + "Well-known support email", + &if let Some(support_email) = &self.well_known.support_email { + support_email.to_string() + } else { + String::new() + }, + ), + ( + "Well-known support Matrix ID", + &if let Some(support_mxid) = &self.well_known.support_mxid { + support_mxid.to_string() + } else { + String::new() + }, + ), + ( + "Well-known support role", + &if let Some(support_role) = &self.well_known.support_role { + support_role.to_string() + } else { + String::new() + }, + ), + ( + "Well-known support page/URL", + &if let Some(support_page) = &self.well_known.support_page { + support_page.to_string() + } else { + String::new() + }, + ), + ]; + + let mut msg: String = "Active config values:\n\n".to_owned(); + + for line in lines.into_iter().enumerate() { + let _ = writeln!(msg, "{}: {}", line.1 .0, line.1 .1); + } + + write!(f, "{msg}") + } +} + +fn true_fn() -> bool { true } + +fn default_address() -> IpAddr { Ipv4Addr::LOCALHOST.into() } + +fn default_port() -> ListeningPort { + ListeningPort { + ports: Left(8008), + } +} + +fn default_unix_socket_perms() -> u32 { 660 } + +fn default_database_backups_to_keep() -> i16 { 1 } + +fn default_database_backend() -> String { "rocksdb".to_owned() } + +fn default_db_cache_capacity_mb() -> f64 { 256.0 } + +fn default_pdu_cache_capacity() -> u32 { 150_000 } + +fn default_conduit_cache_capacity_modifier() -> f64 { 1.0 } + +fn default_auth_chain_cache_capacity() -> u32 { 100_000 } + +fn default_shorteventid_cache_capacity() -> u32 { 500_000 } + +fn default_eventidshort_cache_capacity() -> u32 { 100_000 } + +fn default_shortstatekey_cache_capacity() -> u32 { 100_000 } + +fn default_statekeyshort_cache_capacity() -> u32 { 100_000 } + +fn default_server_visibility_cache_capacity() -> u32 { 100 } + +fn default_user_visibility_cache_capacity() -> u32 { 100 } + +fn default_stateinfo_cache_capacity() -> u32 { 100 } + +fn default_roomid_spacehierarchy_cache_capacity() -> u32 { 100 } + +fn default_cleanup_second_interval() -> u32 { + 1800 // every 30 minutes +} + +fn default_dns_cache_entries() -> u32 { 12288 } + +fn default_dns_min_ttl() -> u64 { 60 * 180 } + +fn default_dns_min_ttl_nxdomain() -> u64 { 60 * 60 * 24 } + +fn default_dns_attempts() -> u16 { 10 } + +fn default_dns_timeout() -> u64 { 10 } + +fn default_max_request_size() -> u32 { + 20 * 1024 * 1024 // Default to 20 MB +} + +fn default_max_concurrent_requests() -> u16 { 500 } + +fn default_request_conn_timeout() -> u64 { 10 } + +fn default_request_timeout() -> u64 { 35 } + +fn default_request_idle_per_host() -> u16 { 1 } + +fn default_request_idle_timeout() -> u64 { 5 } + +fn default_well_known_conn_timeout() -> u64 { 6 } + +fn default_well_known_timeout() -> u64 { 10 } + +fn default_federation_timeout() -> u64 { 300 } + +fn default_federation_idle_per_host() -> u16 { 1 } + +fn default_federation_idle_timeout() -> u64 { 25 } + +fn default_sender_timeout() -> u64 { 180 } + +fn default_sender_idle_timeout() -> u64 { 180 } + +fn default_appservice_timeout() -> u64 { 120 } + +fn default_appservice_idle_timeout() -> u64 { 300 } + +fn default_pusher_idle_timeout() -> u64 { 15 } + +fn default_max_fetch_prev_events() -> u16 { 100_u16 } + +fn default_trusted_servers() -> Vec { vec![OwnedServerName::try_from("matrix.org").unwrap()] } + +fn default_log() -> String { + // do debug logging by default for debug builds + if cfg!(debug_assertions) { + "debug".to_owned() + } else { + "warn,ruma_state_res=warn".to_owned() + } +} + +fn default_notification_push_path() -> String { "/_matrix/push/v1/notify".to_owned() } + +fn default_turn_ttl() -> u64 { 60 * 60 * 24 } + +fn default_presence_idle_timeout_s() -> u64 { 5 * 60 } + +fn default_presence_offline_timeout_s() -> u64 { 30 * 60 } + +fn default_typing_federation_timeout_s() -> u64 { 30 } + +fn default_typing_client_timeout_min_s() -> u64 { 15 } + +fn default_typing_client_timeout_max_s() -> u64 { 45 } + +fn default_rocksdb_recovery_mode() -> u8 { 1 } + +fn default_rocksdb_log_level() -> String { "error".to_owned() } + +fn default_rocksdb_log_time_to_roll() -> usize { 0 } + +fn default_rocksdb_max_log_files() -> usize { 3 } + +fn default_rocksdb_max_log_file_size() -> usize { + // 4 megabytes + 4 * 1024 * 1024 +} + +fn default_rocksdb_parallelism_threads() -> usize { 0 } + +fn default_rocksdb_compression_algo() -> String { "zstd".to_owned() } + +/// Default RocksDB compression level is 32767, which is internally read by +/// RocksDB as the default magic number and translated to the library's default +/// compression level as they all differ. See their `kDefaultCompressionLevel`. +#[allow(clippy::doc_markdown)] +fn default_rocksdb_compression_level() -> i32 { 32767 } + +/// Default RocksDB compression level is 32767, which is internally read by +/// RocksDB as the default magic number and translated to the library's default +/// compression level as they all differ. See their `kDefaultCompressionLevel`. +#[allow(clippy::doc_markdown)] +fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 } + +// I know, it's a great name +pub(crate) fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 } + +fn default_ip_range_denylist() -> Vec { + vec![ + "127.0.0.0/8".to_owned(), + "10.0.0.0/8".to_owned(), + "172.16.0.0/12".to_owned(), + "192.168.0.0/16".to_owned(), + "100.64.0.0/10".to_owned(), + "192.0.0.0/24".to_owned(), + "169.254.0.0/16".to_owned(), + "192.88.99.0/24".to_owned(), + "198.18.0.0/15".to_owned(), + "192.0.2.0/24".to_owned(), + "198.51.100.0/24".to_owned(), + "203.0.113.0/24".to_owned(), + "224.0.0.0/4".to_owned(), + "::1/128".to_owned(), + "fe80::/10".to_owned(), + "fc00::/7".to_owned(), + "2001:db8::/32".to_owned(), + "ff00::/8".to_owned(), + "fec0::/10".to_owned(), + ] +} + +fn default_url_preview_max_spider_size() -> usize { + 384_000 // 384KB +} + +fn default_new_user_displayname_suffix() -> String { "🏳️‍⚧️".to_owned() } + +fn default_sentry_traces_sample_rate() -> f32 { 0.15 } + +fn default_startup_netburst_keep() -> i64 { 50 } diff --git a/src/core/config/proxy.rs b/src/config/proxy.rs similarity index 71% rename from src/core/config/proxy.rs rename to src/config/proxy.rs index ea388f24..bf9a672d 100644 --- a/src/core/config/proxy.rs +++ b/src/config/proxy.rs @@ -42,11 +42,13 @@ pub enum ProxyConfig { impl ProxyConfig { pub fn to_proxy(&self) -> Result> { Ok(match self.clone() { - | Self::None => None, - | Self::Global { url } => Some(Proxy::all(url)?), - | Self::ByDomain(proxies) => Some(Proxy::custom(move |url| { - // first matching proxy - proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() + ProxyConfig::None => None, + ProxyConfig::Global { + url, + } => Some(Proxy::all(url)?), + ProxyConfig::ByDomain(proxies) => Some(Proxy::custom(move |url| { + proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching + // proxy })), }) } @@ -62,7 +64,6 @@ pub struct PartialProxyConfig { exclude: Vec, } impl PartialProxyConfig { - #[must_use] pub fn for_url(&self, url: &Url) -> Option<&Url> { let domain = url.domain()?; let mut included_because = None; // most specific reason it was included @@ -74,26 +75,24 @@ impl PartialProxyConfig { for wc_domain in &self.include { if wc_domain.matches(domain) { match included_because { - | Some(prev) if !wc_domain.more_specific_than(prev) => (), - | _ => included_because = Some(wc_domain), + Some(prev) if !wc_domain.more_specific_than(prev) => (), + _ => included_because = Some(wc_domain), } } } for wc_domain in &self.exclude { if wc_domain.matches(domain) { match excluded_because { - | Some(prev) if !wc_domain.more_specific_than(prev) => (), - | _ => excluded_because = Some(wc_domain), + Some(prev) if !wc_domain.more_specific_than(prev) => (), + _ => excluded_because = Some(wc_domain), } } } match (included_because, excluded_because) { - | (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), /* included for - * a more specific - * reason */ + (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), /* included for a more specific reason */ // than excluded - | (Some(_), None) => Some(&self.url), - | _ => None, + (Some(_), None) => Some(&self.url), + _ => None, } } } @@ -108,34 +107,33 @@ enum WildCardedDomain { impl WildCardedDomain { fn matches(&self, domain: &str) -> bool { match self { - | Self::WildCard => true, - | Self::WildCarded(d) => domain.ends_with(d), - | Self::Exact(d) => domain == d, + WildCardedDomain::WildCard => true, + WildCardedDomain::WildCarded(d) => domain.ends_with(d), + WildCardedDomain::Exact(d) => domain == d, } } fn more_specific_than(&self, other: &Self) -> bool { match (self, other) { - | (Self::WildCard, Self::WildCard) => false, - | (_, Self::WildCard) => true, - | (Self::Exact(a), Self::WildCarded(_)) => other.matches(a), - | (Self::WildCarded(a), Self::WildCarded(b)) => a != b && a.ends_with(b), - | _ => false, + (WildCardedDomain::WildCard, WildCardedDomain::WildCard) => false, + (_, WildCardedDomain::WildCard) => true, + (WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => other.matches(a), + (WildCardedDomain::WildCarded(a), WildCardedDomain::WildCarded(b)) => a != b && a.ends_with(b), + _ => false, } } } impl std::str::FromStr for WildCardedDomain { type Err = std::convert::Infallible; - #[allow(clippy::string_slice)] fn from_str(s: &str) -> Result { // maybe do some domain validation? Ok(if s.starts_with("*.") { - Self::WildCarded(s[1..].to_owned()) + WildCardedDomain::WildCarded(s[1..].to_owned()) } else if s == "*" { - Self::WildCarded(String::new()) + WildCardedDomain::WildCarded(String::new()) } else { - Self::Exact(s.to_owned()) + WildCardedDomain::Exact(s.to_owned()) }) } } diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml deleted file mode 100644 index f42b049b..00000000 --- a/src/core/Cargo.toml +++ /dev/null @@ -1,124 +0,0 @@ -[package] -name = "conduwuit_core" -categories.workspace = true -description.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version.workspace = true - -[lib] -path = "mod.rs" -crate-type = [ - "rlib", -# "dylib", -] - -[features] -brotli_compression = [ - "reqwest/brotli", -] -conduwuit_mods = [ - "dep:libloading" -] -gzip_compression = [ - "reqwest/gzip", -] -hardened_malloc = [ - "dep:hardened_malloc-rs" -] -jemalloc = [ - "dep:tikv-jemalloc-sys", - "dep:tikv-jemalloc-ctl", - "dep:tikv-jemallocator", -] -jemalloc_conf = [] -jemalloc_prof = [ - "tikv-jemalloc-sys/profiling", -] -jemalloc_stats = [ - "tikv-jemalloc-sys/stats", - "tikv-jemalloc-ctl/stats", - "tikv-jemallocator/stats", -] -perf_measurements = [] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", -] -sentry_telemetry = [] -zstd_compression = [ - "reqwest/zstd", -] - -[dependencies] -argon2.workspace = true -arrayvec.workspace = true -axum.workspace = true -axum-extra.workspace = true -bytes.workspace = true -bytesize.workspace = true -cargo_toml.workspace = true -checked_ops.workspace = true -chrono.workspace = true -clap.workspace = true -conduwuit-macros.workspace = true -const-str.workspace = true -core_affinity.workspace = true -ctor.workspace = true -cyborgtime.workspace = true -either.workspace = true -figment.workspace = true -futures.workspace = true -http-body-util.workspace = true -http.workspace = true -ipaddress.workspace = true -itertools.workspace = true -libc.workspace = true -libloading.workspace = true -libloading.optional = true -log.workspace = true -num-traits.workspace = true -rand.workspace = true -regex.workspace = true -reqwest.workspace = true -ring.workspace = true -ruma.workspace = true -sanitize-filename.workspace = true -serde_json.workspace = true -serde_regex.workspace = true -serde_yaml.workspace = true -serde.workspace = true -smallvec.workspace = true -smallstr.workspace = true -thiserror.workspace = true -tikv-jemallocator.optional = true -tikv-jemallocator.workspace = true -tikv-jemalloc-ctl.optional = true -tikv-jemalloc-ctl.workspace = true -tikv-jemalloc-sys.optional = true -tikv-jemalloc-sys.workspace = true -tokio.workspace = true -tokio-metrics.workspace = true -toml.workspace = true -tracing-core.workspace = true -tracing-subscriber.workspace = true -tracing.workspace = true -url.workspace = true - -[target.'cfg(unix)'.dependencies] -nix.workspace = true - -[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies] -hardened_malloc-rs.workspace = true -hardened_malloc-rs.optional = true - -[dev-dependencies] -maplit.workspace = true - -[lints] -workspace = true diff --git a/src/core/alloc/default.rs b/src/core/alloc/default.rs deleted file mode 100644 index 65354b7d..00000000 --- a/src/core/alloc/default.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Default allocator with no special features - -/// Always returns Ok -pub fn trim>>(_: I) -> crate::Result { Ok(()) } - -/// Always returns None -#[must_use] -pub fn memory_stats(_opts: &str) -> Option { None } - -/// Always returns None -#[must_use] -pub fn memory_usage() -> Option { None } diff --git a/src/core/alloc/hardened.rs b/src/core/alloc/hardened.rs deleted file mode 100644 index 5f850673..00000000 --- a/src/core/alloc/hardened.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! hmalloc allocator - -#[global_allocator] -static HMALLOC: hardened_malloc_rs::HardenedMalloc = hardened_malloc_rs::HardenedMalloc; - -pub fn trim>>(_: I) -> crate::Result { Ok(()) } - -#[must_use] -//TODO: get usage -pub fn memory_usage() -> Option { None } - -#[must_use] -pub fn memory_stats(_opts: &str) -> Option { - Some("Extended statistics are not available from hardened_malloc.".to_owned()) -} diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs deleted file mode 100644 index 2424e99c..00000000 --- a/src/core/alloc/je.rs +++ /dev/null @@ -1,391 +0,0 @@ -//! jemalloc allocator - -use std::{ - cell::OnceCell, - ffi::{CStr, c_char, c_void}, - fmt::Debug, - sync::RwLock, -}; - -use arrayvec::ArrayVec; -use tikv_jemalloc_ctl as mallctl; -use tikv_jemalloc_sys as ffi; -use tikv_jemallocator as jemalloc; - -use crate::{ - Result, err, is_equal_to, is_nonzero, - utils::{math, math::Tried}, -}; - -#[cfg(feature = "jemalloc_conf")] -#[unsafe(no_mangle)] -pub static malloc_conf: &[u8] = const_str::concat_bytes!( - "lg_extent_max_active_fit:4", - ",oversize_threshold:16777216", - ",tcache_max:2097152", - ",dirty_decay_ms:16000", - ",muzzy_decay_ms:144000", - ",percpu_arena:percpu", - ",metadata_thp:always", - ",background_thread:true", - ",max_background_threads:-1", - MALLOC_CONF_PROF, - 0 -); - -#[cfg(all(feature = "jemalloc_conf", feature = "jemalloc_prof"))] -const MALLOC_CONF_PROF: &str = ",prof_active:false"; -#[cfg(all(feature = "jemalloc_conf", not(feature = "jemalloc_prof")))] -const MALLOC_CONF_PROF: &str = ""; - -#[global_allocator] -static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; -static CONTROL: RwLock<()> = RwLock::new(()); - -type Name = ArrayVec; -type Key = ArrayVec; - -const NAME_MAX: usize = 128; -const KEY_SEGS: usize = 8; - -#[crate::ctor] -fn _static_initialization() { - acq_epoch().expect("pre-initialization of jemalloc failed"); - acq_epoch().expect("pre-initialization of jemalloc failed"); -} - -#[must_use] -#[cfg(feature = "jemalloc_stats")] -pub fn memory_usage() -> Option { - use mallctl::stats; - - let mibs = |input: Result| { - let input = input.unwrap_or_default(); - let kibs = input / 1024; - let kibs = u32::try_from(kibs).unwrap_or_default(); - let kibs = f64::from(kibs); - kibs / 1024.0 - }; - - // Acquire the epoch; ensure latest stats are pulled in - acq_epoch().ok()?; - - let allocated = mibs(stats::allocated::read()); - let active = mibs(stats::active::read()); - let mapped = mibs(stats::mapped::read()); - let metadata = mibs(stats::metadata::read()); - let resident = mibs(stats::resident::read()); - let retained = mibs(stats::retained::read()); - Some(format!( - "allocated: {allocated:.2} MiB\nactive: {active:.2} MiB\nmapped: {mapped:.2} \ - MiB\nmetadata: {metadata:.2} MiB\nresident: {resident:.2} MiB\nretained: {retained:.2} \ - MiB\n" - )) -} - -#[must_use] -#[cfg(not(feature = "jemalloc_stats"))] -pub fn memory_usage() -> Option { None } - -pub fn memory_stats(opts: &str) -> Option { - const MAX_LENGTH: usize = 1_048_576; - - let mut str = String::new(); - let opaque = std::ptr::from_mut(&mut str).cast::(); - let opts_p: *const c_char = std::ffi::CString::new(opts) - .expect("cstring") - .into_raw() - .cast_const(); - - // Acquire the epoch; ensure latest stats are pulled in - acq_epoch().ok()?; - - // SAFETY: calls malloc_stats_print() with our string instance which must remain - // in this frame. https://docs.rs/tikv-jemalloc-sys/latest/tikv_jemalloc_sys/fn.malloc_stats_print.html - unsafe { ffi::malloc_stats_print(Some(malloc_stats_cb), opaque, opts_p) }; - - str.truncate(MAX_LENGTH); - - Some(str) -} - -unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { - // SAFETY: we have to trust the opaque points to our String - let res: &mut String = unsafe { - opaque - .cast::() - .as_mut() - .expect("failed to cast void* to &mut String") - }; - - // SAFETY: we have to trust the string is null terminated. - let msg = unsafe { CStr::from_ptr(msg) }; - - let msg = String::from_utf8_lossy(msg.to_bytes()); - res.push_str(msg.as_ref()); -} - -macro_rules! mallctl { - ($name:expr_2021) => {{ - thread_local! { - static KEY: OnceCell = OnceCell::default(); - }; - - KEY.with(|once| { - once.get_or_init(move || key($name).expect("failed to translate name into mib key")) - .clone() - }) - }}; -} - -pub mod this_thread { - use super::{Debug, Key, OnceCell, Result, is_nonzero, key, math}; - - thread_local! { - static ALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; - static DEALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; - } - - pub fn trim() -> Result { decay().and_then(|()| purge()) } - - pub fn purge() -> Result { notify(mallctl!("arena.0.purge")) } - - pub fn decay() -> Result { notify(mallctl!("arena.0.decay")) } - - pub fn idle() -> Result { super::notify(&mallctl!("thread.idle")) } - - pub fn flush() -> Result { super::notify(&mallctl!("thread.tcache.flush")) } - - pub fn set_muzzy_decay(decay_ms: isize) -> Result { - set(mallctl!("arena.0.muzzy_decay_ms"), decay_ms) - } - - pub fn get_muzzy_decay() -> Result { get(mallctl!("arena.0.muzzy_decay_ms")) } - - pub fn set_dirty_decay(decay_ms: isize) -> Result { - set(mallctl!("arena.0.dirty_decay_ms"), decay_ms) - } - - pub fn get_dirty_decay() -> Result { get(mallctl!("arena.0.dirty_decay_ms")) } - - pub fn cache_enable(enable: bool) -> Result { - super::set::(&mallctl!("thread.tcache.enabled"), enable.into()).map(is_nonzero!()) - } - - pub fn is_cache_enabled() -> Result { - super::get::(&mallctl!("thread.tcache.enabled")).map(is_nonzero!()) - } - - pub fn set_arena(id: usize) -> Result { - super::set::(&mallctl!("thread.arena"), id.try_into()?).and_then(math::try_into) - } - - pub fn arena_id() -> Result { - super::get::(&mallctl!("thread.arena")).and_then(math::try_into) - } - - pub fn prof_enable(enable: bool) -> Result { - super::set::(&mallctl!("thread.prof.active"), enable.into()).map(is_nonzero!()) - } - - pub fn is_prof_enabled() -> Result { - super::get::(&mallctl!("thread.prof.active")).map(is_nonzero!()) - } - - pub fn reset_peak() -> Result { super::notify(&mallctl!("thread.peak.reset")) } - - pub fn peak() -> Result { super::get(&mallctl!("thread.peak.read")) } - - #[inline] - #[must_use] - pub fn allocated() -> u64 { - *ALLOCATED_BYTES.with(|once| init_tls_cell(once, "thread.allocatedp")) - } - - #[inline] - #[must_use] - pub fn deallocated() -> u64 { - *DEALLOCATED_BYTES.with(|once| init_tls_cell(once, "thread.deallocatedp")) - } - - fn notify(key: Key) -> Result { super::notify_by_arena(Some(arena_id()?), key) } - - fn set(key: Key, val: T) -> Result - where - T: Copy + Debug, - { - super::set_by_arena(Some(arena_id()?), key, val) - } - - fn get(key: Key) -> Result - where - T: Copy + Debug, - { - super::get_by_arena(Some(arena_id()?), key) - } - - fn init_tls_cell(cell: &OnceCell<&'static u64>, name: &str) -> &'static u64 { - cell.get_or_init(|| { - let ptr: *const u64 = super::get(&mallctl!(name)).expect("failed to obtain pointer"); - - // SAFETY: ptr points directly to the internal state of jemalloc for this thread - unsafe { ptr.as_ref() }.expect("pointer must not be null") - }) - } -} - -pub fn stats_reset() -> Result { notify(&mallctl!("stats.mutexes.reset")) } - -pub fn prof_reset() -> Result { notify(&mallctl!("prof.reset")) } - -pub fn prof_enable(enable: bool) -> Result { - set::(&mallctl!("prof.active"), enable.into()).map(is_nonzero!()) -} - -pub fn is_prof_enabled() -> Result { - get::(&mallctl!("prof.active")).map(is_nonzero!()) -} - -pub fn trim> + Copy>(arena: I) -> Result { - decay(arena).and_then(|()| purge(arena)) -} - -pub fn purge>>(arena: I) -> Result { - notify_by_arena(arena.into(), mallctl!("arena.4096.purge")) -} - -pub fn decay>>(arena: I) -> Result { - notify_by_arena(arena.into(), mallctl!("arena.4096.decay")) -} - -pub fn set_muzzy_decay>>(arena: I, decay_ms: isize) -> Result { - match arena.into() { - | Some(arena) => - set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms), - | _ => set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms), - } -} - -pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Result { - match arena.into() { - | Some(arena) => - set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms), - | _ => set(&mallctl!("arenas.dirty_decay_ms"), decay_ms), - } -} - -#[inline] -#[must_use] -pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() } - -#[inline] -#[must_use] -pub fn is_percpu_arena() -> bool { percpu_arenas().is_ok_and(is_equal_to!("percpu")) } - -#[inline] -#[must_use] -pub fn is_phycpu_arena() -> bool { percpu_arenas().is_ok_and(is_equal_to!("phycpu")) } - -pub fn percpu_arenas() -> Result<&'static str> { - let ptr = get::<*const c_char>(&mallctl!("opt.percpu_arena"))?; - //SAFETY: ptr points to a null-terminated string returned for opt.percpu_arena. - let cstr = unsafe { CStr::from_ptr(ptr) }; - cstr.to_str().map_err(Into::into) -} - -pub fn arenas() -> Result { - get::(&mallctl!("arenas.narenas")).and_then(math::try_into) -} - -pub fn inc_epoch() -> Result { xchg(&mallctl!("epoch"), 1_u64) } - -pub fn acq_epoch() -> Result { xchg(&mallctl!("epoch"), 0_u64) } - -fn notify_by_arena(id: Option, mut key: Key) -> Result { - key[1] = id.unwrap_or(4096); - notify(&key) -} - -fn set_by_arena(id: Option, mut key: Key, val: T) -> Result -where - T: Copy + Debug, -{ - key[1] = id.unwrap_or(4096); - set(&key, val) -} - -fn get_by_arena(id: Option, mut key: Key) -> Result -where - T: Copy + Debug, -{ - key[1] = id.unwrap_or(4096); - get(&key) -} - -fn notify(key: &Key) -> Result { xchg(key, ()) } - -fn set(key: &Key, val: T) -> Result -where - T: Copy + Debug, -{ - let _lock = CONTROL.write()?; - let res = xchg(key, val)?; - inc_epoch()?; - - Ok(res) -} - -#[tracing::instrument( - name = "get", - level = "trace" - skip_all, - fields(?key) -)] -fn get(key: &Key) -> Result -where - T: Copy + Debug, -{ - acq_epoch()?; - acq_epoch()?; - - // SAFETY: T must be perfectly valid to receive value. - unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) -} - -#[tracing::instrument( - name = "xchg", - level = "trace" - skip_all, - fields(?key, ?val) -)] -fn xchg(key: &Key, val: T) -> Result -where - T: Copy + Debug, -{ - // SAFETY: T must be the exact expected type. - unsafe { mallctl::raw::update_mib(key.as_slice(), val) }.map_err(map_err) -} - -fn key(name: &str) -> Result { - // tikv asserts the output buffer length is tight to the number of required mibs - // so we slice that down here. - let segs = name.chars().filter(is_equal_to!(&'.')).count().try_add(1)?; - - let name = self::name(name)?; - let mut buf = [0_usize; KEY_SEGS]; - mallctl::raw::name_to_mib(name.as_slice(), &mut buf[0..segs]) - .map_err(map_err) - .map(move |()| buf.into_iter().take(segs).collect()) -} - -fn name(name: &str) -> Result { - let mut buf = Name::new(); - buf.try_extend_from_slice(name.as_bytes())?; - buf.try_extend_from_slice(b"\0")?; - - Ok(buf) -} - -fn map_err(error: tikv_jemalloc_ctl::Error) -> crate::Error { - err!("mallctl: {}", error.to_string()) -} diff --git a/src/core/alloc/mod.rs b/src/core/alloc/mod.rs deleted file mode 100644 index 0ed1b1a6..00000000 --- a/src/core/alloc/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! Integration with allocators - -// jemalloc -#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -pub mod je; -#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -pub use je::{memory_stats, memory_usage, trim}; - -#[cfg(all(not(target_env = "msvc"), feature = "hardened_malloc", not(feature = "jemalloc")))] -pub mod hardened; -#[cfg(all( - not(target_env = "msvc"), - feature = "hardened_malloc", - not(feature = "jemalloc") -))] -pub use hardened::{memory_stats, memory_usage, trim}; - -#[cfg(any( - target_env = "msvc", - all(not(feature = "hardened_malloc"), not(feature = "jemalloc")) -))] -pub mod default; -#[cfg(any( - target_env = "msvc", - all(not(feature = "hardened_malloc"), not(feature = "jemalloc")) -))] -pub use default::{memory_stats, memory_usage, trim}; diff --git a/src/core/config/check.rs b/src/core/config/check.rs deleted file mode 100644 index f9d51eeb..00000000 --- a/src/core/config/check.rs +++ /dev/null @@ -1,325 +0,0 @@ -use std::env::consts::OS; - -use either::Either; -use figment::Figment; - -use super::DEPRECATED_KEYS; -use crate::{Config, Err, Result, Server, debug, debug_info, debug_warn, error, warn}; - -/// Performs check() with additional checks specific to reloading old config -/// with new config. -pub fn reload(old: &Config, new: &Config) -> Result { - check(new)?; - - if new.server_name != old.server_name { - return Err!(Config( - "server_name", - "You can't change the server's name from {:?}.", - old.server_name - )); - } - - Ok(()) -} - -#[allow(clippy::cognitive_complexity)] -pub fn check(config: &Config) -> Result { - if cfg!(debug_assertions) { - warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); - } - - if config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure { - warn!("\n\nWARNING: \n\nTLS CERTIFICATE VALIDATION IS DISABLED, THIS IS HIGHLY INSECURE AND SHOULD NOT BE USED IN PRODUCTION.\n\n"); - } - - warn_deprecated(config); - warn_unknown_key(config); - - if config.sentry && config.sentry_endpoint.is_none() { - return Err!(Config( - "sentry_endpoint", - "Sentry cannot be enabled without an endpoint set" - )); - } - - if cfg!(all(feature = "hardened_malloc", feature = "jemalloc", not(target_env = "msvc"))) { - debug_warn!( - "hardened_malloc and jemalloc compile-time features are both enabled, this causes \ - jemalloc to be used." - ); - } - - if cfg!(not(unix)) && config.unix_socket_path.is_some() { - return Err!(Config( - "unix_socket_path", - "UNIX socket support is only available on *nix platforms. Please remove \ - 'unix_socket_path' from your config." - )); - } - - if config.unix_socket_path.is_none() && config.get_bind_hosts().is_empty() { - return Err!(Config("address", "No TCP addresses were specified to listen on")); - } - - if config.unix_socket_path.is_none() && config.get_bind_ports().is_empty() { - return Err!(Config("port", "No ports were specified to listen on")); - } - - if config.unix_socket_path.is_none() { - config.get_bind_addrs().iter().for_each(|addr| { - use std::path::Path; - - if addr.ip().is_loopback() { - debug_info!( - "Found loopback listening address {addr}, running checks if we're in a \ - container." - ); - - if Path::new("/proc/vz").exists() /* Guest */ && !Path::new("/proc/bz").exists() - /* Host */ - { - error!( - "You are detected using OpenVZ with a loopback/localhost listening \ - address of {addr}. If you are using OpenVZ for containers and you use \ - NAT-based networking to communicate with the host and guest, this will \ - NOT work. Please change this to \"0.0.0.0\". If this is expected, you \ - can ignore.", - ); - } else if Path::new("/.dockerenv").exists() { - error!( - "You are detected using Docker with a loopback/localhost listening \ - address of {addr}. If you are using a reverse proxy on the host and \ - require communication to conduwuit in the Docker container via \ - NAT-based networking, this will NOT work. Please change this to \ - \"0.0.0.0\". If this is expected, you can ignore.", - ); - } else if Path::new("/run/.containerenv").exists() { - error!( - "You are detected using Podman with a loopback/localhost listening \ - address of {addr}. If you are using a reverse proxy on the host and \ - require communication to conduwuit in the Podman container via \ - NAT-based networking, this will NOT work. Please change this to \ - \"0.0.0.0\". If this is expected, you can ignore.", - ); - } - } - }); - } - - // rocksdb does not allow max_log_files to be 0 - if config.rocksdb_max_log_files == 0 { - return Err!(Config( - "max_log_files", - "rocksdb_max_log_files cannot be 0. Please set a value at least 1." - )); - } - - // yeah, unless the user built a debug build hopefully for local testing only - if cfg!(not(debug_assertions)) && config.server_name == "your.server.name" { - return Err!(Config( - "server_name", - "You must specify a valid server name for production usage of conduwuit." - )); - } - - if config.emergency_password == Some(String::from("F670$2CP@Hw8mG7RY1$%!#Ic7YA")) { - return Err!(Config( - "emergency_password", - "The public example emergency password is being used, this is insecure. Please \ - change this." - )); - } - - if config.emergency_password == Some(String::new()) { - return Err!(Config( - "emergency_password", - "Emergency password was set to an empty string, this is not valid. Unset \ - emergency_password to disable it or set it to a real password." - )); - } - - // check if the user specified a registration token as `""` - if config.registration_token == Some(String::new()) { - return Err!(Config( - "registration_token", - "Registration token was specified but is empty (\"\")" - )); - } - - // check if we can read the token file path, and check if the file is empty - if config.registration_token_file.as_ref().is_some_and(|path| { - let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| { - error!("Failed to read the registration token file: {e}"); - }) else { - return true; - }; - - token == String::new() - }) { - return Err!(Config( - "registration_token_file", - "Registration token file was specified but is empty or failed to be read" - )); - } - - if config.max_request_size < 10_000_000 { - return Err!(Config( - "max_request_size", - "Max request size is less than 10MB. Please increase it as this is too low for \ - operable federation." - )); - } - - // check if user specified valid IP CIDR ranges on startup - for cidr in &config.ip_range_denylist { - if let Err(e) = ipaddress::IPAddress::parse(cidr) { - return Err!(Config( - "ip_range_denylist", - "Parsing specified IP CIDR range from string failed: {e}." - )); - } - } - - if config.allow_registration - && !config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse - && config.registration_token.is_none() - && config.registration_token_file.is_none() - { - return Err!(Config( - "registration_token", - "!! You have `allow_registration` enabled without a token configured in your config \ - which means you are allowing ANYONE to register on your conduwuit instance without \ - any 2nd-step (e.g. registration token). If this is not the intended behaviour, \ - please set a registration token. For security and safety reasons, conduwuit will \ - shut down. If you are extra sure this is the desired behaviour you want, please \ - set the following config option to true: -`yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse`" - )); - } - - if config.allow_registration - && config.yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse - && config.registration_token.is_none() - && config.registration_token_file.is_none() - { - warn!( - "Open registration is enabled via setting \ - `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` and \ - `allow_registration` to true without a registration token configured. You are \ - expected to be aware of the risks now. If this is not the desired behaviour, \ - please set a registration token." - ); - } - - if config.allow_outgoing_presence && !config.allow_local_presence { - return Err!(Config( - "allow_local_presence", - "Outgoing presence requires allowing local presence. Please enable \ - 'allow_local_presence' or disable outgoing presence." - )); - } - - if config - .url_preview_domain_contains_allowlist - .contains(&"*".to_owned()) - { - warn!( - "All URLs are allowed for URL previews via setting \ - \"url_preview_domain_contains_allowlist\" to \"*\". This opens up significant \ - attack surface to your server. You are expected to be aware of the risks by doing \ - this." - ); - } - if config - .url_preview_domain_explicit_allowlist - .contains(&"*".to_owned()) - { - warn!( - "All URLs are allowed for URL previews via setting \ - \"url_preview_domain_explicit_allowlist\" to \"*\". This opens up significant \ - attack surface to your server. You are expected to be aware of the risks by doing \ - this." - ); - } - if config - .url_preview_url_contains_allowlist - .contains(&"*".to_owned()) - { - warn!( - "All URLs are allowed for URL previews via setting \ - \"url_preview_url_contains_allowlist\" to \"*\". This opens up significant attack \ - surface to your server. You are expected to be aware of the risks by doing this." - ); - } - - if let Some(Either::Right(_)) = config.url_preview_bound_interface.as_ref() { - if !matches!(OS, "android" | "fuchsia" | "linux") { - return Err!(Config( - "url_preview_bound_interface", - "Not a valid IP address. Interface names not supported on {OS}." - )); - } - } - - if !Server::available_room_versions() - .any(|(version, _)| version == config.default_room_version) - { - return Err!(Config( - "default_room_version", - "Room version {:?} is not available", - config.default_room_version - )); - } - - Ok(()) -} - -/// Iterates over all the keys in the config file and warns if there is a -/// deprecated key specified -fn warn_deprecated(config: &Config) { - debug!("Checking for deprecated config keys"); - let mut was_deprecated = false; - for key in config - .catchall - .keys() - .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) - { - warn!("Config parameter \"{}\" is deprecated, ignoring.", key); - was_deprecated = true; - } - - if was_deprecated { - warn!( - "Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check your \ - configuration if any new configuration parameters should be adjusted" - ); - } -} - -/// iterates over all the catchall keys (unknown config options) and warns -/// if there are any. -fn warn_unknown_key(config: &Config) { - debug!("Checking for unknown config keys"); - for key in config - .catchall - .keys() - .filter(|key| "config".to_owned().ne(key.to_owned()) /* "config" is expected */) - { - warn!("Config parameter \"{}\" is unknown to conduwuit, ignoring.", key); - } -} - -/// Checks the presence of the `address` and `unix_socket_path` keys in the -/// raw_config, exiting the process if both keys were detected. -pub(super) fn is_dual_listening(raw_config: &Figment) -> Result<()> { - let contains_address = raw_config.contains("address"); - let contains_unix_socket = raw_config.contains("unix_socket_path"); - if contains_address && contains_unix_socket { - return Err!( - "TOML keys \"address\" and \"unix_socket_path\" were both defined. Please specify \ - only one option." - ); - } - - Ok(()) -} diff --git a/src/core/config/manager.rs b/src/core/config/manager.rs deleted file mode 100644 index e55916ba..00000000 --- a/src/core/config/manager.rs +++ /dev/null @@ -1,128 +0,0 @@ -use std::{ - cell::{Cell, RefCell}, - ops::Deref, - ptr, - ptr::null_mut, - sync::{ - Arc, - atomic::{AtomicPtr, Ordering}, - }, -}; - -use super::Config; -use crate::{Result, implement}; - -/// The configuration manager is an indirection to reload the configuration for -/// the server while it is running. In order to not burden or clutter the many -/// callsites which query for configuration items, this object implements Deref -/// for the actively loaded configuration. -pub struct Manager { - active: AtomicPtr, -} - -thread_local! { - static INDEX: Cell = 0.into(); - static HANDLE: RefCell = const { - RefCell::new([const { None }; HISTORY]) - }; -} - -type Handle = Option>; -type Handles = [Handle; HISTORY]; - -const HISTORY: usize = 8; - -impl Manager { - pub(crate) fn new(config: Config) -> Self { - let config = Arc::new(config); - Self { - active: AtomicPtr::new(Arc::into_raw(config).cast_mut()), - } - } -} - -impl Drop for Manager { - fn drop(&mut self) { - let config = self.active.swap(null_mut(), Ordering::AcqRel); - - // SAFETY: The active pointer was set using an Arc::into_raw(). We're obliged to - // reconstitute that into Arc otherwise it will leak. - unsafe { Arc::from_raw(config) }; - } -} - -impl Deref for Manager { - type Target = Arc; - - fn deref(&self) -> &Self::Target { HANDLE.with_borrow_mut(|handle| self.load(handle)) } -} - -/// Update the active configuration, returning prior configuration. -#[implement(Manager)] -#[tracing::instrument(skip_all)] -pub fn update(&self, config: Config) -> Result> { - let config = Arc::new(config); - let new = Arc::into_raw(config); - let old = self.active.swap(new.cast_mut(), Ordering::AcqRel); - - // SAFETY: The old active pointer was set using an Arc::into_raw(). We're - // obliged to reconstitute that into Arc otherwise it will leak. - Ok(unsafe { Arc::from_raw(old) }) -} - -#[implement(Manager)] -fn load(&self, handle: &mut [Option>]) -> &'static Arc { - let config = self.active.load(Ordering::Acquire); - - // Branch taken after config reload or first access by this thread. - if handle[INDEX.get()] - .as_ref() - .is_none_or(|handle| !ptr::eq(config, Arc::as_ptr(handle))) - { - INDEX.set(INDEX.get().wrapping_add(1).wrapping_rem(HISTORY)); - return load_miss(handle, INDEX.get(), config); - } - - let config: &Arc = handle[INDEX.get()] - .as_ref() - .expect("handle was already cached for this thread"); - - // SAFETY: The caller should not hold multiple references at a time directly - // into Config, as a subsequent reference might invalidate the thread's cache - // causing another reference to dangle. - // - // This is a highly unusual pattern as most config values are copied by value or - // used immediately without running overlap with another value. Even if it does - // actually occur somewhere, the window of danger is limited to the config being - // reloaded while the reference is held and another access is made by the same - // thread into a different config value. This is mitigated by creating a buffer - // of old configs rather than discarding at the earliest opportunity; the odds - // of this scenario are thus astronomical. - unsafe { std::mem::transmute(config) } -} - -#[tracing::instrument( - name = "miss", - level = "trace", - skip_all, - fields(%index, ?config) -)] -#[allow(clippy::transmute_ptr_to_ptr)] -fn load_miss( - handle: &mut [Option>], - index: usize, - config: *const Config, -) -> &'static Arc { - // SAFETY: The active pointer was set prior and always remains valid. We're - // reconstituting the Arc here but as a new reference, so the count is - // incremented. This instance will be cached in the thread-local. - let config = unsafe { - Arc::increment_strong_count(config); - Arc::from_raw(config) - }; - - // SAFETY: See the note on the transmute above. The caller should not hold more - // than one reference at a time directly into Config, as the second access - // might invalidate the thread's cache, dangling the reference to the first. - unsafe { std::mem::transmute(handle[index].insert(config)) } -} diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs deleted file mode 100644 index 5374c2c2..00000000 --- a/src/core/config/mod.rs +++ /dev/null @@ -1,2312 +0,0 @@ -pub mod check; -pub mod manager; -pub mod proxy; - -use std::{ - collections::{BTreeMap, BTreeSet}, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, - path::{Path, PathBuf}, -}; - -use conduwuit_macros::config_example_generator; -use either::{ - Either, - Either::{Left, Right}, -}; -use figment::providers::{Env, Format, Toml}; -pub use figment::{Figment, value::Value as FigmentValue}; -use regex::RegexSet; -use ruma::{ - OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId, - api::client::discovery::discover_support::ContactRole, -}; -use serde::{Deserialize, de::IgnoredAny}; -use url::Url; - -use self::proxy::ProxyConfig; -pub use self::{check::check, manager::Manager}; -use crate::{Result, err, error::Error, utils::sys}; - -/// All the config options for conduwuit. -#[allow(clippy::struct_excessive_bools)] -#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] -#[derive(Clone, Debug, Deserialize)] -#[config_example_generator( - filename = "conduwuit-example.toml", - section = "global", - undocumented = "# This item is undocumented. Please contribute documentation for it.", - header = r#"### conduwuit Configuration -### -### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE -### OVERWRITTEN! -### -### You should rename this file before configuring your server. Changes to -### documentation and defaults can be contributed in source code at -### src/core/config/mod.rs. This file is generated when building. -### -### Any values pre-populated are the default values for said config option. -### -### At the minimum, you MUST edit all the config options to your environment -### that say "YOU NEED TO EDIT THIS". -### -### For more information, see: -### https://conduwuit.puppyirl.gay/configuration.html -"#, - ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure" -)] -pub struct Config { - /// The server_name is the pretty name of this server. It is used as a - /// suffix for user and room IDs/aliases. - /// - /// See the docs for reverse proxying and delegation: - /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy - /// - /// Also see the `[global.well_known]` config section at the very bottom. - /// - /// Examples of delegation: - /// - https://puppygock.gay/.well-known/matrix/server - /// - https://puppygock.gay/.well-known/matrix/client - /// - /// YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE - /// WIPE. - /// - /// example: "conduwuit.woof" - pub server_name: OwnedServerName, - - /// The default address (IPv4 or IPv6) conduwuit will listen on. - /// - /// If you are using Docker or a container NAT networking setup, this must - /// be "0.0.0.0". - /// - /// To listen on multiple addresses, specify a vector e.g. ["127.0.0.1", - /// "::1"] - /// - /// default: ["127.0.0.1", "::1"] - #[serde(default = "default_address")] - address: ListeningAddr, - - /// The port(s) conduwuit will listen on. - /// - /// For reverse proxying, see: - /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy - /// - /// If you are using Docker, don't change this, you'll need to map an - /// external port to this. - /// - /// To listen on multiple ports, specify a vector e.g. [8080, 8448] - /// - /// default: 8008 - #[serde(default = "default_port")] - port: ListeningPort, - - // external structure; separate section - #[serde(default)] - pub tls: TlsConfig, - - /// The UNIX socket conduwuit will listen on. - /// - /// conduwuit cannot listen on both an IP address and a UNIX socket. If - /// listening on a UNIX socket, you MUST remove/comment the `address` key. - /// - /// Remember to make sure that your reverse proxy has access to this socket - /// file, either by adding your reverse proxy to the 'conduwuit' group or - /// granting world R/W permissions with `unix_socket_perms` (666 minimum). - /// - /// example: "/run/conduwuit/conduwuit.sock" - pub unix_socket_path: Option, - - /// The default permissions (in octal) to create the UNIX socket with. - /// - /// default: 660 - #[serde(default = "default_unix_socket_perms")] - pub unix_socket_perms: u32, - - /// This is the only directory where conduwuit will save its data, including - /// media. Note: this was previously "/var/lib/matrix-conduit". - /// - /// YOU NEED TO EDIT THIS. - /// - /// example: "/var/lib/conduwuit" - pub database_path: PathBuf, - - /// conduwuit supports online database backups using RocksDB's Backup engine - /// API. To use this, set a database backup path that conduwuit can write - /// to. - /// - /// For more information, see: - /// https://conduwuit.puppyirl.gay/maintenance.html#backups - /// - /// example: "/opt/conduwuit-db-backups" - pub database_backup_path: Option, - - /// The amount of online RocksDB database backups to keep/retain, if using - /// "database_backup_path", before deleting the oldest one. - /// - /// default: 1 - #[serde(default = "default_database_backups_to_keep")] - pub database_backups_to_keep: i16, - - /// Text which will be added to the end of the user's displayname upon - /// registration with a space before the text. In Conduit, this was the - /// lightning bolt emoji. - /// - /// To disable, set this to "" (an empty string). - /// - /// The default is the trans pride flag. - /// - /// example: "🏳️‍⚧️" - /// - /// default: "🏳️‍⚧️" - #[serde(default = "default_new_user_displayname_suffix")] - pub new_user_displayname_suffix: String, - - /// If enabled, conduwuit will send a simple GET request periodically to - /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new - /// announcements or major updates. This is not an update check endpoint. - /// - /// default: true - #[serde(alias = "allow_check_for_updates", default = "true_fn")] - pub allow_announcements_check: bool, - - /// Set this to any float value to multiply conduwuit's in-memory LRU caches - /// with such as "auth_chain_cache_capacity". - /// - /// May be useful if you have significant memory to spare to increase - /// performance. - /// - /// If you have low memory, reducing this may be viable. - /// - /// By default, the individual caches such as "auth_chain_cache_capacity" - /// are scaled by your CPU core count. - /// - /// default: 1.0 - #[serde( - default = "default_cache_capacity_modifier", - alias = "conduit_cache_capacity_modifier" - )] - pub cache_capacity_modifier: f64, - - /// Set this to any float value in megabytes for conduwuit to tell the - /// database engine that this much memory is available for database read - /// caches. - /// - /// May be useful if you have significant memory to spare to increase - /// performance. - /// - /// Similar to the individual LRU caches, this is scaled up with your CPU - /// core count. - /// - /// This defaults to 128.0 + (64.0 * CPU core count). - /// - /// default: varies by system - #[serde(default = "default_db_cache_capacity_mb")] - pub db_cache_capacity_mb: f64, - - /// Set this to any float value in megabytes for conduwuit to tell the - /// database engine that this much memory is available for database write - /// caches. - /// - /// May be useful if you have significant memory to spare to increase - /// performance. - /// - /// Similar to the individual LRU caches, this is scaled up with your CPU - /// core count. - /// - /// This defaults to 48.0 + (4.0 * CPU core count). - /// - /// default: varies by system - #[serde(default = "default_db_write_buffer_capacity_mb")] - pub db_write_buffer_capacity_mb: f64, - - /// default: varies by system - #[serde(default = "default_pdu_cache_capacity")] - pub pdu_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_auth_chain_cache_capacity")] - pub auth_chain_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_shorteventid_cache_capacity")] - pub shorteventid_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_eventidshort_cache_capacity")] - pub eventidshort_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_eventid_pdu_cache_capacity")] - pub eventid_pdu_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_shortstatekey_cache_capacity")] - pub shortstatekey_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_statekeyshort_cache_capacity")] - pub statekeyshort_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_servernameevent_data_cache_capacity")] - pub servernameevent_data_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_stateinfo_cache_capacity")] - pub stateinfo_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_roomid_spacehierarchy_cache_capacity")] - pub roomid_spacehierarchy_cache_capacity: u32, - - /// Maximum entries stored in DNS memory-cache. The size of an entry may - /// vary so please take care if raising this value excessively. Only - /// decrease this when using an external DNS cache. Please note that - /// systemd-resolved does *not* count as an external cache, even when - /// configured to do so. - /// - /// default: 32768 - #[serde(default = "default_dns_cache_entries")] - pub dns_cache_entries: u32, - - /// Minimum time-to-live in seconds for entries in the DNS cache. The - /// default may appear high to most administrators; this is by design as the - /// majority of NXDOMAINs are correct for a long time (e.g. the server is no - /// longer running Matrix). Only decrease this if you are using an external - /// DNS cache. - /// - /// default: 10800 - #[serde(default = "default_dns_min_ttl")] - pub dns_min_ttl: u64, - - /// Minimum time-to-live in seconds for NXDOMAIN entries in the DNS cache. - /// This value is critical for the server to federate efficiently. - /// NXDOMAIN's are assumed to not be returning to the federation and - /// aggressively cached rather than constantly rechecked. - /// - /// Defaults to 3 days as these are *very rarely* false negatives. - /// - /// default: 259200 - #[serde(default = "default_dns_min_ttl_nxdomain")] - pub dns_min_ttl_nxdomain: u64, - - /// Number of DNS nameserver retries after a timeout or error. - /// - /// default: 10 - #[serde(default = "default_dns_attempts")] - pub dns_attempts: u16, - - /// The number of seconds to wait for a reply to a DNS query. Please note - /// that recursive queries can take up to several seconds for some domains, - /// so this value should not be too low, especially on slower hardware or - /// resolvers. - /// - /// default: 10 - #[serde(default = "default_dns_timeout")] - pub dns_timeout: u64, - - /// Fallback to TCP on DNS errors. Set this to false if unsupported by - /// nameserver. - #[serde(default = "true_fn")] - pub dns_tcp_fallback: bool, - - /// Enable to query all nameservers until the domain is found. Referred to - /// as "trust_negative_responses" in hickory_resolver. This can avoid - /// useless DNS queries if the first nameserver responds with NXDOMAIN or - /// an empty NOERROR response. - #[serde(default = "true_fn")] - pub query_all_nameservers: bool, - - /// Enable using *only* TCP for querying your specified nameservers instead - /// of UDP. - /// - /// If you are running conduwuit in a container environment, this config - /// option may need to be enabled. For more details, see: - /// https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker - #[serde(default)] - pub query_over_tcp_only: bool, - - /// DNS A/AAAA record lookup strategy - /// - /// Takes a number of one of the following options: - /// 1 - Ipv4Only (Only query for A records, no AAAA/IPv6) - /// - /// 2 - Ipv6Only (Only query for AAAA records, no A/IPv4) - /// - /// 3 - Ipv4AndIpv6 (Query for A and AAAA records in parallel, uses whatever - /// returns a successful response first) - /// - /// 4 - Ipv6thenIpv4 (Query for AAAA record, if that fails then query the A - /// record) - /// - /// 5 - Ipv4thenIpv6 (Query for A record, if that fails then query the AAAA - /// record) - /// - /// If you don't have IPv6 networking, then for better DNS performance it - /// may be suitable to set this to Ipv4Only (1) as you will never ever use - /// the AAAA record contents even if the AAAA record is successful instead - /// of the A record. - /// - /// default: 5 - #[serde(default = "default_ip_lookup_strategy")] - pub ip_lookup_strategy: u8, - - /// Max request size for file uploads in bytes. Defaults to 20MB. - /// - /// default: 20971520 - #[serde(default = "default_max_request_size")] - pub max_request_size: usize, - - /// default: 192 - #[serde(default = "default_max_fetch_prev_events")] - pub max_fetch_prev_events: u16, - - /// Default/base connection timeout (seconds). This is used only by URL - /// previews and update/news endpoint checks. - /// - /// default: 10 - #[serde(default = "default_request_conn_timeout")] - pub request_conn_timeout: u64, - - /// Default/base request timeout (seconds). The time waiting to receive more - /// data from another server. This is used only by URL previews, - /// update/news, and misc endpoint checks. - /// - /// default: 35 - #[serde(default = "default_request_timeout")] - pub request_timeout: u64, - - /// Default/base request total timeout (seconds). The time limit for a whole - /// request. This is set very high to not cancel healthy requests while - /// serving as a backstop. This is used only by URL previews and update/news - /// endpoint checks. - /// - /// default: 320 - #[serde(default = "default_request_total_timeout")] - pub request_total_timeout: u64, - - /// Default/base idle connection pool timeout (seconds). This is used only - /// by URL previews and update/news endpoint checks. - /// - /// default: 5 - #[serde(default = "default_request_idle_timeout")] - pub request_idle_timeout: u64, - - /// Default/base max idle connections per host. This is used only by URL - /// previews and update/news endpoint checks. Defaults to 1 as generally the - /// same open connection can be re-used. - /// - /// default: 1 - #[serde(default = "default_request_idle_per_host")] - pub request_idle_per_host: u16, - - /// Federation well-known resolution connection timeout (seconds). - /// - /// default: 6 - #[serde(default = "default_well_known_conn_timeout")] - pub well_known_conn_timeout: u64, - - /// Federation HTTP well-known resolution request timeout (seconds). - /// - /// default: 10 - #[serde(default = "default_well_known_timeout")] - pub well_known_timeout: u64, - - /// Federation client request timeout (seconds). You most definitely want - /// this to be high to account for extremely large room joins, slow - /// homeservers, your own resources etc. - /// - /// default: 300 - #[serde(default = "default_federation_timeout")] - pub federation_timeout: u64, - - /// Federation client idle connection pool timeout (seconds). - /// - /// default: 25 - #[serde(default = "default_federation_idle_timeout")] - pub federation_idle_timeout: u64, - - /// Federation client max idle connections per host. Defaults to 1 as - /// generally the same open connection can be re-used. - /// - /// default: 1 - #[serde(default = "default_federation_idle_per_host")] - pub federation_idle_per_host: u16, - - /// Federation sender request timeout (seconds). The time it takes for the - /// remote server to process sent transactions can take a while. - /// - /// default: 180 - #[serde(default = "default_sender_timeout")] - pub sender_timeout: u64, - - /// Federation sender idle connection pool timeout (seconds). - /// - /// default: 180 - #[serde(default = "default_sender_idle_timeout")] - pub sender_idle_timeout: u64, - - /// Federation sender transaction retry backoff limit (seconds). - /// - /// default: 86400 - #[serde(default = "default_sender_retry_backoff_limit")] - pub sender_retry_backoff_limit: u64, - - /// Appservice URL request connection timeout. Defaults to 35 seconds as - /// generally appservices are hosted within the same network. - /// - /// default: 35 - #[serde(default = "default_appservice_timeout")] - pub appservice_timeout: u64, - - /// Appservice URL idle connection pool timeout (seconds). - /// - /// default: 300 - #[serde(default = "default_appservice_idle_timeout")] - pub appservice_idle_timeout: u64, - - /// Notification gateway pusher idle connection pool timeout. - /// - /// default: 15 - #[serde(default = "default_pusher_idle_timeout")] - pub pusher_idle_timeout: u64, - - /// Maximum time to receive a request from a client (seconds). - /// - /// default: 75 - #[serde(default = "default_client_receive_timeout")] - pub client_receive_timeout: u64, - - /// Maximum time to process a request received from a client (seconds). - /// - /// default: 180 - #[serde(default = "default_client_request_timeout")] - pub client_request_timeout: u64, - - /// Maximum time to transmit a response to a client (seconds) - /// - /// default: 120 - #[serde(default = "default_client_response_timeout")] - pub client_response_timeout: u64, - - /// Grace period for clean shutdown of client requests (seconds). - /// - /// default: 10 - #[serde(default = "default_client_shutdown_timeout")] - pub client_shutdown_timeout: u64, - - /// Grace period for clean shutdown of federation requests (seconds). - /// - /// default: 5 - #[serde(default = "default_sender_shutdown_timeout")] - pub sender_shutdown_timeout: u64, - - /// Enables registration. If set to false, no users can register on this - /// server. - /// - /// If set to true without a token configured, users can register with no - /// form of 2nd-step only if you set the following option to true: - /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` - /// - /// If you would like registration only via token reg, please configure - /// `registration_token` or `registration_token_file`. - #[serde(default)] - pub allow_registration: bool, - - /// Enabling this setting opens registration to anyone without restrictions. - /// This makes your server vulnerable to abuse - #[serde(default)] - pub yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse: bool, - - /// A static registration token that new users will have to provide when - /// creating an account. If unset and `allow_registration` is true, - /// you must set - /// `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` - /// to true to allow open registration without any conditions. - /// - /// YOU NEED TO EDIT THIS OR USE registration_token_file. - /// - /// example: "o&^uCtes4HPf0Vu@F20jQeeWE7" - /// - /// display: sensitive - pub registration_token: Option, - - /// Path to a file on the system that gets read for additional registration - /// tokens. Multiple tokens can be added if you separate them with - /// whitespace - /// - /// conduwuit must be able to access the file, and it must not be empty - /// - /// example: "/etc/conduwuit/.reg_token" - pub registration_token_file: Option, - - /// Controls whether encrypted rooms and events are allowed. - #[serde(default = "true_fn")] - pub allow_encryption: bool, - - /// Controls whether federation is allowed or not. It is not recommended to - /// disable this after the fact due to potential federation breakage. - #[serde(default = "true_fn")] - pub allow_federation: bool, - - /// Allows federation requests to be made to itself - /// - /// This isn't intended and is very likely a bug if federation requests are - /// being sent to yourself. This currently mainly exists for development - /// purposes. - #[serde(default)] - pub federation_loopback: bool, - - /// Always calls /forget on behalf of the user if leaving a room. This is a - /// part of MSC4267 "Automatically forgetting rooms on leave" - #[serde(default)] - pub forget_forced_upon_leave: bool, - - /// Set this to true to require authentication on the normally - /// unauthenticated profile retrieval endpoints (GET) - /// "/_matrix/client/v3/profile/{userId}". - /// - /// This can prevent profile scraping. - #[serde(default)] - pub require_auth_for_profile_requests: bool, - - /// Set this to true to allow your server's public room directory to be - /// federated. Set this to false to protect against /publicRooms spiders, - /// but will forbid external users from viewing your server's public room - /// directory. If federation is disabled entirely (`allow_federation`), this - /// is inherently false. - #[serde(default)] - pub allow_public_room_directory_over_federation: bool, - - /// Set this to true to allow your server's public room directory to be - /// queried without client authentication (access token) through the Client - /// APIs. Set this to false to protect against /publicRooms spiders. - #[serde(default)] - pub allow_public_room_directory_without_auth: bool, - - /// Allow guests/unauthenticated users to access TURN credentials. - /// - /// This is the equivalent of Synapse's `turn_allow_guests` config option. - /// This allows any unauthenticated user to call the endpoint - /// `/_matrix/client/v3/voip/turnServer`. - /// - /// It is unlikely you need to enable this as all major clients support - /// authentication for this endpoint and prevents misuse of your TURN server - /// from potential bots. - #[serde(default)] - pub turn_allow_guests: bool, - - /// Set this to true to lock down your server's public room directory and - /// only allow admins to publish rooms to the room directory. Unpublishing - /// is still allowed by all users with this enabled. - #[serde(default)] - pub lockdown_public_room_directory: bool, - - /// Set this to true to allow federating device display names / allow - /// external users to see your device display name. If federation is - /// disabled entirely (`allow_federation`), this is inherently false. For - /// privacy reasons, this is best left disabled. - #[serde(default)] - pub allow_device_name_federation: bool, - - /// Config option to allow or disallow incoming federation requests that - /// obtain the profiles of our local users from - /// `/_matrix/federation/v1/query/profile` - /// - /// Increases privacy of your local user's such as display names, but some - /// remote users may get a false "this user does not exist" error when they - /// try to invite you to a DM or room. Also can protect against profile - /// spiders. - /// - /// This is inherently false if `allow_federation` is disabled - #[serde(default = "true_fn", alias = "allow_profile_lookup_federation_requests")] - pub allow_inbound_profile_lookup_federation_requests: bool, - - /// Allow standard users to create rooms. Appservices and admins are always - /// allowed to create rooms - #[serde(default = "true_fn")] - pub allow_room_creation: bool, - - /// Set to false to disable users from joining or creating room versions - /// that aren't officially supported by conduwuit. - /// - /// conduwuit officially supports room versions 6 - 11. - /// - /// conduwuit has slightly experimental (though works fine in practice) - /// support for versions 3 - 5. - #[serde(default = "true_fn")] - pub allow_unstable_room_versions: bool, - - /// Default room version conduwuit will create rooms with. - /// - /// Per spec, room version 11 is the default. - /// - /// default: 11 - #[serde(default = "default_default_room_version")] - pub default_room_version: RoomVersionId, - - // external structure; separate section - #[serde(default)] - pub well_known: WellKnownConfig, - - #[serde(default)] - pub allow_jaeger: bool, - - /// default: "info" - #[serde(default = "default_jaeger_filter")] - pub jaeger_filter: String, - - /// If the 'perf_measurements' compile-time feature is enabled, enables - /// collecting folded stack trace profile of tracing spans using - /// tracing_flame. The resulting profile can be visualized with inferno[1], - /// speedscope[2], or a number of other tools. - /// - /// [1]: https://github.com/jonhoo/inferno - /// [2]: www.speedscope.app - #[serde(default)] - pub tracing_flame: bool, - - /// default: "info" - #[serde(default = "default_tracing_flame_filter")] - pub tracing_flame_filter: String, - - /// default: "./tracing.folded" - #[serde(default = "default_tracing_flame_output_path")] - pub tracing_flame_output_path: String, - - #[cfg(not(doctest))] - /// Examples: - /// - /// - No proxy (default): - /// - /// proxy = "none" - /// - /// - For global proxy, create the section at the bottom of this file: - /// - /// [global.proxy] - /// global = { url = "socks5h://localhost:9050" } - /// - /// - To proxy some domains: - /// - /// [global.proxy] - /// [[global.proxy.by_domain]] - /// url = "socks5h://localhost:9050" - /// include = ["*.onion", "matrix.myspecial.onion"] - /// exclude = ["*.myspecial.onion"] - /// - /// Include vs. Exclude: - /// - /// - If include is an empty list, it is assumed to be `["*"]`. - /// - /// - If a domain matches both the exclude and include list, the proxy will - /// only be used if it was included because of a more specific rule than - /// it was excluded. In the above example, the proxy would be used for - /// `ordinary.onion`, `matrix.myspecial.onion`, but not - /// `hello.myspecial.onion`. - /// - /// default: "none" - #[serde(default)] - pub proxy: ProxyConfig, - - /// Servers listed here will be used to gather public keys of other servers - /// (notary trusted key servers). - /// - /// Currently, conduwuit doesn't support inbound batched key requests, so - /// this list should only contain other Synapse servers. - /// - /// example: ["matrix.org", "tchncs.de"] - /// - /// default: ["matrix.org"] - #[serde(default = "default_trusted_servers")] - pub trusted_servers: Vec, - - /// Whether to query the servers listed in trusted_servers first or query - /// the origin server first. For best security, querying the origin server - /// first is advised to minimize the exposure to a compromised trusted - /// server. For maximum federation/join performance this can be set to true, - /// however other options exist to query trusted servers first under - /// specific high-load circumstances and should be evaluated before setting - /// this to true. - #[serde(default)] - pub query_trusted_key_servers_first: bool, - - /// Whether to query the servers listed in trusted_servers first - /// specifically on room joins. This option limits the exposure to a - /// compromised trusted server to room joins only. The join operation - /// requires gathering keys from many origin servers which can cause - /// significant delays. Therefor this defaults to true to mitigate - /// unexpected delays out-of-the-box. The security-paranoid or those willing - /// to tolerate delays are advised to set this to false. Note that setting - /// query_trusted_key_servers_first to true causes this option to be - /// ignored. - #[serde(default = "true_fn")] - pub query_trusted_key_servers_first_on_join: bool, - - /// Only query trusted servers for keys and never the origin server. This is - /// intended for clusters or custom deployments using their trusted_servers - /// as forwarding-agents to cache and deduplicate requests. Notary servers - /// do not act as forwarding-agents by default, therefor do not enable this - /// unless you know exactly what you are doing. - #[serde(default)] - pub only_query_trusted_key_servers: bool, - - /// Maximum number of keys to request in each trusted server batch query. - /// - /// default: 1024 - #[serde(default = "default_trusted_server_batch_size")] - pub trusted_server_batch_size: usize, - - /// Max log level for conduwuit. Allows debug, info, warn, or error. - /// - /// See also: - /// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives - /// - /// **Caveat**: - /// For release builds, the tracing crate is configured to only implement - /// levels higher than error to avoid unnecessary overhead in the compiled - /// binary from trace macros. For debug builds, this restriction is not - /// applied. - /// - /// default: "info" - #[serde(default = "default_log")] - pub log: String, - - /// Output logs with ANSI colours. - #[serde(default = "true_fn", alias = "log_colours")] - pub log_colors: bool, - - /// Configures the span events which will be outputted with the log. - /// - /// default: "none" - #[serde(default = "default_log_span_events")] - pub log_span_events: String, - - /// Configures whether CONDUWUIT_LOG EnvFilter matches values using regular - /// expressions. See the tracing_subscriber documentation on Directives. - /// - /// default: true - #[serde(default = "true_fn")] - pub log_filter_regex: bool, - - /// Toggles the display of ThreadId in tracing log output. - /// - /// default: false - #[serde(default)] - pub log_thread_ids: bool, - - /// OpenID token expiration/TTL in seconds. - /// - /// These are the OpenID tokens that are primarily used for Matrix account - /// integrations (e.g. Vector Integrations in Element), *not* OIDC/OpenID - /// Connect/etc. - /// - /// default: 3600 - #[serde(default = "default_openid_token_ttl")] - pub openid_token_ttl: u64, - - /// Allow an existing session to mint a login token for another client. - /// This requires interactive authentication, but has security ramifications - /// as a malicious client could use the mechanism to spawn more than one - /// session. - /// Enabled by default. - #[serde(default = "true_fn")] - pub login_via_existing_session: bool, - - /// Login token expiration/TTL in milliseconds. - /// - /// These are short-lived tokens for the m.login.token endpoint. - /// This is used to allow existing sessions to create new sessions. - /// see login_via_existing_session. - /// - /// default: 120000 - #[serde(default = "default_login_token_ttl")] - pub login_token_ttl: u64, - - /// Static TURN username to provide the client if not using a shared secret - /// ("turn_secret"), It is recommended to use a shared secret over static - /// credentials. - #[serde(default)] - pub turn_username: String, - - /// Static TURN password to provide the client if not using a shared secret - /// ("turn_secret"). It is recommended to use a shared secret over static - /// credentials. - /// - /// display: sensitive - #[serde(default)] - pub turn_password: String, - - /// Vector list of TURN URIs/servers to use. - /// - /// Replace "example.turn.uri" with your TURN domain, such as the coturn - /// "realm" config option. If using TURN over TLS, replace the URI prefix - /// "turn:" with "turns:". - /// - /// example: ["turn:example.turn.uri?transport=udp", - /// "turn:example.turn.uri?transport=tcp"] - /// - /// default: [] - #[serde(default)] - pub turn_uris: Vec, - - /// TURN secret to use for generating the HMAC-SHA1 hash apart of username - /// and password generation. - /// - /// This is more secure, but if needed you can use traditional static - /// username/password credentials. - /// - /// display: sensitive - #[serde(default)] - pub turn_secret: String, - - /// TURN secret to use that's read from the file path specified. - /// - /// This takes priority over "turn_secret" first, and falls back to - /// "turn_secret" if invalid or failed to open. - /// - /// example: "/etc/conduwuit/.turn_secret" - pub turn_secret_file: Option, - - /// TURN TTL, in seconds. - /// - /// default: 86400 - #[serde(default = "default_turn_ttl")] - pub turn_ttl: u64, - - /// List/vector of room IDs or room aliases that conduwuit will make newly - /// registered users join. The rooms specified must be rooms that you have - /// joined at least once on the server, and must be public. - /// - /// example: ["#conduwuit:puppygock.gay", - /// "!eoIzvAvVwY23LPDay8:puppygock.gay"] - /// - /// default: [] - #[serde(default = "Vec::new")] - pub auto_join_rooms: Vec, - - /// Config option to automatically deactivate the account of any user who - /// attempts to join a: - /// - banned room - /// - forbidden room alias - /// - room alias or ID with a forbidden server name - /// - /// This may be useful if all your banned lists consist of toxic rooms or - /// servers that no good faith user would ever attempt to join, and - /// to automatically remediate the problem without any admin user - /// intervention. - /// - /// This will also make the user leave all rooms. Federation (e.g. remote - /// room invites) are ignored here. - /// - /// Defaults to false as rooms can be banned for non-moderation-related - /// reasons and this performs a full user deactivation. - #[serde(default)] - pub auto_deactivate_banned_room_attempts: bool, - - /// RocksDB log level. This is not the same as conduwuit's log level. This - /// is the log level for the RocksDB engine/library which show up in your - /// database folder/path as `LOG` files. conduwuit will log RocksDB errors - /// as normal through tracing or panics if severe for safety. - /// - /// default: "error" - #[serde(default = "default_rocksdb_log_level")] - pub rocksdb_log_level: String, - - #[serde(default)] - pub rocksdb_log_stderr: bool, - - /// Max RocksDB `LOG` file size before rotating in bytes. Defaults to 4MB in - /// bytes. - /// - /// default: 4194304 - #[serde(default = "default_rocksdb_max_log_file_size")] - pub rocksdb_max_log_file_size: usize, - - /// Time in seconds before RocksDB will forcibly rotate logs. - /// - /// default: 0 - #[serde(default = "default_rocksdb_log_time_to_roll")] - pub rocksdb_log_time_to_roll: usize, - - /// Set this to true to use RocksDB config options that are tailored to HDDs - /// (slower device storage). - /// - /// It is worth noting that by default, conduwuit will use RocksDB with - /// Direct IO enabled. *Generally* speaking this improves performance as it - /// bypasses buffered I/O (system page cache). However there is a potential - /// chance that Direct IO may cause issues with database operations if your - /// setup is uncommon. This has been observed with FUSE filesystems, and - /// possibly ZFS filesystem. RocksDB generally deals/corrects these issues - /// but it cannot account for all setups. If you experience any weird - /// RocksDB issues, try enabling this option as it turns off Direct IO and - /// feel free to report in the conduwuit Matrix room if this option fixes - /// your DB issues. - /// - /// For more information, see: - /// https://github.com/facebook/rocksdb/wiki/Direct-IO - #[serde(default)] - pub rocksdb_optimize_for_spinning_disks: bool, - - /// Enables direct-io to increase database performance via unbuffered I/O. - /// - /// For more details about direct I/O and RockDB, see: - /// https://github.com/facebook/rocksdb/wiki/Direct-IO - /// - /// Set this option to false if the database resides on a filesystem which - /// does not support direct-io like FUSE, or any form of complex filesystem - /// setup such as possibly ZFS. - #[serde(default = "true_fn")] - pub rocksdb_direct_io: bool, - - /// Amount of threads that RocksDB will use for parallelism on database - /// operations such as cleanup, sync, flush, compaction, etc. Set to 0 to - /// use all your logical threads. Defaults to your CPU logical thread count. - /// - /// default: varies by system - #[serde(default = "default_rocksdb_parallelism_threads")] - pub rocksdb_parallelism_threads: usize, - - /// Maximum number of LOG files RocksDB will keep. This must *not* be set to - /// 0. It must be at least 1. Defaults to 3 as these are not very useful - /// unless troubleshooting/debugging a RocksDB bug. - /// - /// default: 3 - #[serde(default = "default_rocksdb_max_log_files")] - pub rocksdb_max_log_files: usize, - - /// Type of RocksDB database compression to use. - /// - /// Available options are "zstd", "bz2", "lz4", or "none". - /// - /// It is best to use ZSTD as an overall good balance between - /// speed/performance, storage, IO amplification, and CPU usage. For more - /// performance but less compression (more storage used) and less CPU usage, - /// use LZ4. - /// - /// For more details, see: - /// https://github.com/facebook/rocksdb/wiki/Compression - /// - /// "none" will disable compression. - /// - /// default: "zstd" - #[serde(default = "default_rocksdb_compression_algo")] - pub rocksdb_compression_algo: String, - - /// Level of compression the specified compression algorithm for RocksDB to - /// use. - /// - /// Default is 32767, which is internally read by RocksDB as the default - /// magic number and translated to the library's default compression level - /// as they all differ. See their `kDefaultCompressionLevel`. - /// - /// Note when using the default value we may override it with a setting - /// tailored specifically conduwuit. - /// - /// default: 32767 - #[serde(default = "default_rocksdb_compression_level")] - pub rocksdb_compression_level: i32, - - /// Level of compression the specified compression algorithm for the - /// bottommost level/data for RocksDB to use. Default is 32767, which is - /// internally read by RocksDB as the default magic number and translated to - /// the library's default compression level as they all differ. See their - /// `kDefaultCompressionLevel`. - /// - /// Since this is the bottommost level (generally old and least used data), - /// it may be desirable to have a very high compression level here as it's - /// less likely for this data to be used. Research your chosen compression - /// algorithm. - /// - /// Note when using the default value we may override it with a setting - /// tailored specifically conduwuit. - /// - /// default: 32767 - #[serde(default = "default_rocksdb_bottommost_compression_level")] - pub rocksdb_bottommost_compression_level: i32, - - /// Whether to enable RocksDB's "bottommost_compression". - /// - /// At the expense of more CPU usage, this will further compress the - /// database to reduce more storage. It is recommended to use ZSTD - /// compression with this for best compression results. This may be useful - /// if you're trying to reduce storage usage from the database. - /// - /// See https://github.com/facebook/rocksdb/wiki/Compression for more details. - #[serde(default = "true_fn")] - pub rocksdb_bottommost_compression: bool, - - /// Database recovery mode (for RocksDB WAL corruption). - /// - /// Use this option when the server reports corruption and refuses to start. - /// Set mode 2 (PointInTime) to cleanly recover from this corruption. The - /// server will continue from the last good state, several seconds or - /// minutes prior to the crash. Clients may have to run "clear-cache & - /// reload" to account for the rollback. Upon success, you may reset the - /// mode back to default and restart again. Please note in some cases the - /// corruption error may not be cleared for at least 30 minutes of operation - /// in PointInTime mode. - /// - /// As a very last ditch effort, if PointInTime does not fix or resolve - /// anything, you can try mode 3 (SkipAnyCorruptedRecord) but this will - /// leave the server in a potentially inconsistent state. - /// - /// The default mode 1 (TolerateCorruptedTailRecords) will automatically - /// drop the last entry in the database if corrupted during shutdown, but - /// nothing more. It is extraordinarily unlikely this will desynchronize - /// clients. To disable any form of silent rollback set mode 0 - /// (AbsoluteConsistency). - /// - /// The options are: - /// 0 = AbsoluteConsistency - /// 1 = TolerateCorruptedTailRecords (default) - /// 2 = PointInTime (use me if trying to recover) - /// 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) - /// - /// For more information on these modes, see: - /// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes - /// - /// For more details on recovering a corrupt database, see: - /// https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption - /// - /// default: 1 - #[serde(default = "default_rocksdb_recovery_mode")] - pub rocksdb_recovery_mode: u8, - - /// Enables or disables paranoid SST file checks. This can improve RocksDB - /// database consistency at a potential performance impact due to further - /// safety checks ran. - /// - /// For more information, see: - /// https://github.com/facebook/rocksdb/wiki/Online-Verification#columnfamilyoptionsparanoid_file_checks - #[serde(default)] - pub rocksdb_paranoid_file_checks: bool, - - /// Enables or disables checksum verification in rocksdb at runtime. - /// Checksums are usually hardware accelerated with low overhead; they are - /// enabled in rocksdb by default. Older or slower platforms may see gains - /// from disabling. - /// - /// default: true - #[serde(default = "true_fn")] - pub rocksdb_checksums: bool, - - /// Enables the "atomic flush" mode in rocksdb. This option is not intended - /// for users. It may be removed or ignored in future versions. Atomic flush - /// may be enabled by the paranoid to possibly improve database integrity at - /// the cost of performance. - #[serde(default)] - pub rocksdb_atomic_flush: bool, - - /// Database repair mode (for RocksDB SST corruption). - /// - /// Use this option when the server reports corruption while running or - /// panics. If the server refuses to start use the recovery mode options - /// first. Corruption errors containing the acronym 'SST' which occur after - /// startup will likely require this option. - /// - /// - Backing up your database directory is recommended prior to running the - /// repair. - /// - /// - Disabling repair mode and restarting the server is recommended after - /// running the repair. - /// - /// See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. - #[serde(default)] - pub rocksdb_repair: bool, - - #[serde(default)] - pub rocksdb_read_only: bool, - - #[serde(default)] - pub rocksdb_secondary: bool, - - /// Enables idle CPU priority for compaction thread. This is not enabled by - /// default to prevent compaction from falling too far behind on busy - /// systems. - #[serde(default)] - pub rocksdb_compaction_prio_idle: bool, - - /// Enables idle IO priority for compaction thread. This prevents any - /// unexpected lag in the server's operation and is usually a good idea. - /// Enabled by default. - #[serde(default = "true_fn")] - pub rocksdb_compaction_ioprio_idle: bool, - - /// Enables RocksDB compaction. You should never ever have to set this - /// option to false. If you for some reason find yourself needing to use - /// this option as part of troubleshooting or a bug, please reach out to us - /// in the conduwuit Matrix room with information and details. - /// - /// Disabling compaction will lead to a significantly bloated and - /// explosively large database, gradually poor performance, unnecessarily - /// excessive disk read/writes, and slower shutdowns and startups. - #[serde(default = "true_fn")] - pub rocksdb_compaction: bool, - - /// Level of statistics collection. Some admin commands to display database - /// statistics may require this option to be set. Database performance may - /// be impacted by higher settings. - /// - /// Option is a number ranging from 0 to 6: - /// 0 = No statistics. - /// 1 = No statistics in release mode (default). - /// 2 to 3 = Statistics with no performance impact. - /// 3 to 5 = Statistics with possible performance impact. - /// 6 = All statistics. - /// - /// default: 1 - #[serde(default = "default_rocksdb_stats_level")] - pub rocksdb_stats_level: u8, - - /// This is a password that can be configured that will let you login to the - /// server bot account (currently `@conduit`) for emergency troubleshooting - /// purposes such as recovering/recreating your admin room, or inviting - /// yourself back. - /// - /// See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. - /// - /// Once this password is unset, all sessions will be logged out for - /// security purposes. - /// - /// example: "F670$2CP@Hw8mG7RY1$%!#Ic7YA" - /// - /// display: sensitive - pub emergency_password: Option, - - /// default: "/_matrix/push/v1/notify" - #[serde(default = "default_notification_push_path")] - pub notification_push_path: String, - - /// Allow local (your server only) presence updates/requests. - /// - /// Note that presence on conduwuit is very fast unlike Synapse's. If using - /// outgoing presence, this MUST be enabled. - #[serde(default = "true_fn")] - pub allow_local_presence: bool, - - /// Allow incoming federated presence updates/requests. - /// - /// This option receives presence updates from other servers, but does not - /// send any unless `allow_outgoing_presence` is true. Note that presence on - /// conduwuit is very fast unlike Synapse's. - #[serde(default = "true_fn")] - pub allow_incoming_presence: bool, - - /// Allow outgoing presence updates/requests. - /// - /// This option sends presence updates to other servers, but does not - /// receive any unless `allow_incoming_presence` is true. Note that presence - /// on conduwuit is very fast unlike Synapse's. If using outgoing presence, - /// you MUST enable `allow_local_presence` as well. - #[serde(default = "true_fn")] - pub allow_outgoing_presence: bool, - - /// How many seconds without presence updates before you become idle. - /// Defaults to 5 minutes. - /// - /// default: 300 - #[serde(default = "default_presence_idle_timeout_s")] - pub presence_idle_timeout_s: u64, - - /// How many seconds without presence updates before you become offline. - /// Defaults to 30 minutes. - /// - /// default: 1800 - #[serde(default = "default_presence_offline_timeout_s")] - pub presence_offline_timeout_s: u64, - - /// Enable the presence idle timer for remote users. - /// - /// Disabling is offered as an optimization for servers participating in - /// many large rooms or when resources are limited. Disabling it may cause - /// incorrect presence states (i.e. stuck online) to be seen for some remote - /// users. - #[serde(default = "true_fn")] - pub presence_timeout_remote_users: bool, - - /// Allow receiving incoming read receipts from remote servers. - #[serde(default = "true_fn")] - pub allow_incoming_read_receipts: bool, - - /// Allow sending read receipts to remote servers. - #[serde(default = "true_fn")] - pub allow_outgoing_read_receipts: bool, - - /// Allow outgoing typing updates to federation. - #[serde(default = "true_fn")] - pub allow_outgoing_typing: bool, - - /// Allow incoming typing updates from federation. - #[serde(default = "true_fn")] - pub allow_incoming_typing: bool, - - /// Maximum time federation user can indicate typing. - /// - /// default: 30 - #[serde(default = "default_typing_federation_timeout_s")] - pub typing_federation_timeout_s: u64, - - /// Minimum time local client can indicate typing. This does not override a - /// client's request to stop typing. It only enforces a minimum value in - /// case of no stop request. - /// - /// default: 15 - #[serde(default = "default_typing_client_timeout_min_s")] - pub typing_client_timeout_min_s: u64, - - /// Maximum time local client can indicate typing. - /// - /// default: 45 - #[serde(default = "default_typing_client_timeout_max_s")] - pub typing_client_timeout_max_s: u64, - - /// Set this to true for conduwuit to compress HTTP response bodies using - /// zstd. This option does nothing if conduwuit was not built with - /// `zstd_compression` feature. Please be aware that enabling HTTP - /// compression may weaken TLS. Most users should not need to enable this. - /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH - /// before deciding to enable this. - #[serde(default)] - pub zstd_compression: bool, - - /// Set this to true for conduwuit to compress HTTP response bodies using - /// gzip. This option does nothing if conduwuit was not built with - /// `gzip_compression` feature. Please be aware that enabling HTTP - /// compression may weaken TLS. Most users should not need to enable this. - /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before - /// deciding to enable this. - /// - /// If you are in a large amount of rooms, you may find that enabling this - /// is necessary to reduce the significantly large response bodies. - #[serde(default)] - pub gzip_compression: bool, - - /// Set this to true for conduwuit to compress HTTP response bodies using - /// brotli. This option does nothing if conduwuit was not built with - /// `brotli_compression` feature. Please be aware that enabling HTTP - /// compression may weaken TLS. Most users should not need to enable this. - /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH - /// before deciding to enable this. - #[serde(default)] - pub brotli_compression: bool, - - /// Set to true to allow user type "guest" registrations. Some clients like - /// Element attempt to register guest users automatically. - #[serde(default)] - pub allow_guest_registration: bool, - - /// Set to true to log guest registrations in the admin room. Note that - /// these may be noisy or unnecessary if you're a public homeserver. - #[serde(default)] - pub log_guest_registrations: bool, - - /// Set to true to allow guest registrations/users to auto join any rooms - /// specified in `auto_join_rooms`. - #[serde(default)] - pub allow_guests_auto_join_rooms: bool, - - /// Enable the legacy unauthenticated Matrix media repository endpoints. - /// These endpoints consist of: - /// - /_matrix/media/*/config - /// - /_matrix/media/*/upload - /// - /_matrix/media/*/preview_url - /// - /_matrix/media/*/download/* - /// - /_matrix/media/*/thumbnail/* - /// - /// The authenticated equivalent endpoints are always enabled. - /// - /// Defaults to true for now, but this is highly subject to change, likely - /// in the next release. - #[serde(default = "true_fn")] - pub allow_legacy_media: bool, - - #[serde(default = "true_fn")] - pub freeze_legacy_media: bool, - - /// Check consistency of the media directory at startup: - /// 1. When `media_compat_file_link` is enabled, this check will upgrade - /// media when switching back and forth between Conduit and conduwuit. - /// Both options must be enabled to handle this. - /// 2. When media is deleted from the directory, this check will also delete - /// its database entry. - /// - /// If none of these checks apply to your use cases, and your media - /// directory is significantly large setting this to false may reduce - /// startup time. - #[serde(default = "true_fn")] - pub media_startup_check: bool, - - /// Enable backward-compatibility with Conduit's media directory by creating - /// symlinks of media. - /// - /// This option is only necessary if you plan on using Conduit again. - /// Otherwise setting this to false reduces filesystem clutter and overhead - /// for managing these symlinks in the directory. This is now disabled by - /// default. You may still return to upstream Conduit but you have to run - /// conduwuit at least once with this set to true and allow the - /// media_startup_check to take place before shutting down to return to - /// Conduit. - #[serde(default)] - pub media_compat_file_link: bool, - - /// Prune missing media from the database as part of the media startup - /// checks. - /// - /// This means if you delete files from the media directory the - /// corresponding entries will be removed from the database. This is - /// disabled by default because if the media directory is accidentally moved - /// or inaccessible, the metadata entries in the database will be lost with - /// sadness. - #[serde(default)] - pub prune_missing_media: bool, - - /// List of forbidden server names via regex patterns that we will block - /// incoming AND outgoing federation with, and block client room joins / - /// remote user invites. - /// - /// Note that your messages can still make it to forbidden servers through - /// backfilling. Events we receive from forbidden servers via backfill - /// from servers we *do* federate with will be stored in the database. - /// - /// This check is applied on the room ID, room alias, sender server name, - /// sender user's server name, inbound federation X-Matrix origin, and - /// outbound federation handler. - /// - /// You can set this to ["*"] to block all servers by default, and then - /// use `allowed_remote_server_names` to allow only specific servers. - /// - /// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub forbidden_remote_server_names: RegexSet, - - /// List of allowed server names via regex patterns that we will allow, - /// regardless of if they match `forbidden_remote_server_names`. - /// - /// This option has no effect if `forbidden_remote_server_names` is empty. - /// - /// example: ["goodserver\\.tld$", "goodphrase"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub allowed_remote_server_names: RegexSet, - - /// Vector list of regex patterns of server names that conduwuit will refuse - /// to download remote media from. - /// - /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub prevent_media_downloads_from: RegexSet, - - /// List of forbidden server names via regex patterns that we will block all - /// outgoing federated room directory requests for. Useful for preventing - /// our users from wandering into bad servers or spaces. - /// - /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub forbidden_remote_room_directory_server_names: RegexSet, - - /// Vector list of regex patterns of server names that conduwuit will not - /// send messages to the client from. - /// - /// Note that there is no way for clients to receive messages once a server - /// has become unignored without doing a full sync. This is a protocol - /// limitation with the current sync protocols. This means this is somewhat - /// of a nuclear option. - /// - /// example: ["reallybadserver\.tld$", "reallybadphrase", - /// "69dollarfortnitecards"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub ignore_messages_from_server_names: RegexSet, - - /// Send messages from users that the user has ignored to the client. - /// - /// There is no way for clients to receive messages sent while a user was - /// ignored without doing a full sync. This is a protocol limitation with - /// the current sync protocols. Disabling this option will move - /// responsibility of ignoring messages to the client, which can avoid this - /// limitation. - #[serde(default)] - pub send_messages_from_ignored_users_to_client: bool, - - /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you - /// do not want conduwuit to send outbound requests to. Defaults to - /// RFC1918, unroutable, loopback, multicast, and testnet addresses for - /// security. - /// - /// Please be aware that this is *not* a guarantee. You should be using a - /// firewall with zones as doing this on the application layer may have - /// bypasses. - /// - /// Currently this does not account for proxies in use like Synapse does. - /// - /// To disable, set this to be an empty vector (`[]`). - /// - /// Defaults to: - /// ["127.0.0.0/8", "10.0.0.0/8", "172.16.0.0/12", - /// "192.168.0.0/16", "100.64.0.0/10", "192.0.0.0/24", "169.254.0.0/16", - /// "192.88.99.0/24", "198.18.0.0/15", "192.0.2.0/24", "198.51.100.0/24", - /// "203.0.113.0/24", "224.0.0.0/4", "::1/128", "fe80::/10", "fc00::/7", - /// "2001:db8::/32", "ff00::/8", "fec0::/10"] - #[serde(default = "default_ip_range_denylist")] - pub ip_range_denylist: Vec, - - /// Optional IP address or network interface-name to bind as the source of - /// URL preview requests. If not set, it will not bind to a specific - /// address or interface. - /// - /// Interface names only supported on Linux, Android, and Fuchsia platforms; - /// all other platforms can specify the IP address. To list the interfaces - /// on your system, use the command `ip link show`. - /// - /// example: `"eth0"` or `"1.2.3.4"` - /// - /// default: - #[serde(default, with = "either::serde_untagged_optional")] - pub url_preview_bound_interface: Option>, - - /// Vector list of domains allowed to send requests to for URL previews. - /// - /// This is a *contains* match, not an explicit match. Putting "google.com" - /// will match "https://google.com" and - /// "http://mymaliciousdomainexamplegoogle.com" Setting this to "*" will - /// allow all URL previews. Please note that this opens up significant - /// attack surface to your server, you are expected to be aware of the risks - /// by doing so. - /// - /// default: [] - #[serde(default)] - pub url_preview_domain_contains_allowlist: Vec, - - /// Vector list of explicit domains allowed to send requests to for URL - /// previews. - /// - /// This is an *explicit* match, not a contains match. Putting "google.com" - /// will match "https://google.com", "http://google.com", but not - /// "https://mymaliciousdomainexamplegoogle.com". Setting this to "*" will - /// allow all URL previews. Please note that this opens up significant - /// attack surface to your server, you are expected to be aware of the risks - /// by doing so. - /// - /// default: [] - #[serde(default)] - pub url_preview_domain_explicit_allowlist: Vec, - - /// Vector list of explicit domains not allowed to send requests to for URL - /// previews. - /// - /// This is an *explicit* match, not a contains match. Putting "google.com" - /// will match "https://google.com", "http://google.com", but not - /// "https://mymaliciousdomainexamplegoogle.com". The denylist is checked - /// first before allowlist. Setting this to "*" will not do anything. - /// - /// default: [] - #[serde(default)] - pub url_preview_domain_explicit_denylist: Vec, - - /// Vector list of URLs allowed to send requests to for URL previews. - /// - /// Note that this is a *contains* match, not an explicit match. Putting - /// "google.com" will match "https://google.com/", - /// "https://google.com/url?q=https://mymaliciousdomainexample.com", and - /// "https://mymaliciousdomainexample.com/hi/google.com" Setting this to "*" - /// will allow all URL previews. Please note that this opens up significant - /// attack surface to your server, you are expected to be aware of the risks - /// by doing so. - /// - /// default: [] - #[serde(default)] - pub url_preview_url_contains_allowlist: Vec, - - /// Maximum amount of bytes allowed in a URL preview body size when - /// spidering. Defaults to 256KB in bytes. - /// - /// default: 256000 - #[serde(default = "default_url_preview_max_spider_size")] - pub url_preview_max_spider_size: usize, - - /// Option to decide whether you would like to run the domain allowlist - /// checks (contains and explicit) on the root domain or not. Does not apply - /// to URL contains allowlist. Defaults to false. - /// - /// Example usecase: If this is enabled and you have "wikipedia.org" allowed - /// in the explicit and/or contains domain allowlist, it will allow all - /// subdomains under "wikipedia.org" such as "en.m.wikipedia.org" as the - /// root domain is checked and matched. Useful if the domain contains - /// allowlist is still too broad for you but you still want to allow all the - /// subdomains under a root domain. - #[serde(default)] - pub url_preview_check_root_domain: bool, - - /// List of forbidden room aliases and room IDs as strings of regex - /// patterns. - /// - /// Regex can be used or explicit contains matches can be done by just - /// specifying the words (see example). - /// - /// This is checked upon room alias creation, custom room ID creation if - /// used, and startup as warnings if any room aliases in your database have - /// a forbidden room alias/ID. - /// - /// example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub forbidden_alias_names: RegexSet, - - /// List of forbidden username patterns/strings. - /// - /// Regex can be used or explicit contains matches can be done by just - /// specifying the words (see example). - /// - /// This is checked upon username availability check, registration, and - /// startup as warnings if any local users in your database have a forbidden - /// username. - /// - /// example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub forbidden_usernames: RegexSet, - - /// Retry failed and incomplete messages to remote servers immediately upon - /// startup. This is called bursting. If this is disabled, said messages may - /// not be delivered until more messages are queued for that server. Do not - /// change this option unless server resources are extremely limited or the - /// scale of the server's deployment is huge. Do not disable this unless you - /// know what you are doing. - #[serde(default = "true_fn")] - pub startup_netburst: bool, - - /// Messages are dropped and not reattempted. The `startup_netburst` option - /// must be enabled for this value to have any effect. Do not change this - /// value unless you know what you are doing. Set this value to -1 to - /// reattempt every message without trimming the queues; this may consume - /// significant disk. Set this value to 0 to drop all messages without any - /// attempt at redelivery. - /// - /// default: 50 - #[serde(default = "default_startup_netburst_keep")] - pub startup_netburst_keep: i64, - - /// Block non-admin local users from sending room invites (local and - /// remote), and block non-admin users from receiving remote room invites. - /// - /// Admins are always allowed to send and receive all room invites. - #[serde(default)] - pub block_non_admin_invites: bool, - - /// Allow admins to enter commands in rooms other than "#admins" (admin - /// room) by prefixing your message with "\!admin" or "\\!admin" followed up - /// a normal conduwuit admin command. The reply will be publicly visible to - /// the room, originating from the sender. - /// - /// example: \\!admin debug ping puppygock.gay - #[serde(default = "true_fn")] - pub admin_escape_commands: bool, - - /// Automatically activate the conduwuit admin room console / CLI on - /// startup. This option can also be enabled with `--console` conduwuit - /// argument. - #[serde(default)] - pub admin_console_automatic: bool, - - /// List of admin commands to execute on startup. - /// - /// This option can also be configured with the `--execute` conduwuit - /// argument and can take standard shell commands and environment variables - /// - /// For example: `./conduwuit --execute "server admin-notice conduwuit has - /// started up at $(date)"` - /// - /// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` - /// - /// default: [] - #[serde(default)] - pub admin_execute: Vec, - - /// Ignore errors in startup commands. - /// - /// If false, conduwuit will error and fail to start if an admin execute - /// command (`--execute` / `admin_execute`) fails. - #[serde(default)] - pub admin_execute_errors_ignore: bool, - - /// List of admin commands to execute on SIGUSR2. - /// - /// Similar to admin_execute, but these commands are executed when the - /// server receives SIGUSR2 on supporting platforms. - /// - /// default: [] - #[serde(default)] - pub admin_signal_execute: Vec, - - /// Controls the max log level for admin command log captures (logs - /// generated from running admin commands). Defaults to "info" on release - /// builds, else "debug" on debug builds. - /// - /// default: "info" - #[serde(default = "default_admin_log_capture")] - pub admin_log_capture: String, - - /// The default room tag to apply on the admin room. - /// - /// On some clients like Element, the room tag "m.server_notice" is a - /// special pinned room at the very bottom of your room list. The conduwuit - /// admin room can be pinned here so you always have an easy-to-access - /// shortcut dedicated to your admin room. - /// - /// default: "m.server_notice" - #[serde(default = "default_admin_room_tag")] - pub admin_room_tag: String, - - /// Sentry.io crash/panic reporting, performance monitoring/metrics, etc. - /// This is NOT enabled by default. conduwuit's default Sentry reporting - /// endpoint domain is `o4506996327251968.ingest.us.sentry.io`. - #[serde(default)] - pub sentry: bool, - - /// Sentry reporting URL, if a custom one is desired. - /// - /// display: sensitive - /// default: "" - #[serde(default = "default_sentry_endpoint")] - pub sentry_endpoint: Option, - - /// Report your conduwuit server_name in Sentry.io crash reports and - /// metrics. - #[serde(default)] - pub sentry_send_server_name: bool, - - /// Performance monitoring/tracing sample rate for Sentry.io. - /// - /// Note that too high values may impact performance, and can be disabled by - /// setting it to 0.0 (0%) This value is read as a percentage to Sentry, - /// represented as a decimal. Defaults to 15% of traces (0.15) - /// - /// default: 0.15 - #[serde(default = "default_sentry_traces_sample_rate")] - pub sentry_traces_sample_rate: f32, - - /// Whether to attach a stacktrace to Sentry reports. - #[serde(default)] - pub sentry_attach_stacktrace: bool, - - /// Send panics to Sentry. This is true by default, but Sentry has to be - /// enabled. The global `sentry` config option must be enabled to send any - /// data. - #[serde(default = "true_fn")] - pub sentry_send_panic: bool, - - /// Send errors to sentry. This is true by default, but sentry has to be - /// enabled. This option is only effective in release-mode; forced to false - /// in debug-mode. - #[serde(default = "true_fn")] - pub sentry_send_error: bool, - - /// Controls the tracing log level for Sentry to send things like - /// breadcrumbs and transactions - /// - /// default: "info" - #[serde(default = "default_sentry_filter")] - pub sentry_filter: String, - - /// Enable the tokio-console. This option is only relevant to developers. - /// - /// For more information, see: - /// https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console - #[serde(default)] - pub tokio_console: bool, - - #[serde(default)] - pub test: BTreeSet, - - /// Controls whether admin room notices like account registrations, password - /// changes, account deactivations, room directory publications, etc will be - /// sent to the admin room. Update notices and normal admin command - /// responses will still be sent. - #[serde(default = "true_fn")] - pub admin_room_notices: bool, - - /// Enable database pool affinity support. On supporting systems, block - /// device queue topologies are detected and the request pool is optimized - /// for the hardware; db_pool_workers is determined automatically. - /// - /// default: true - #[serde(default = "true_fn")] - pub db_pool_affinity: bool, - - /// Sets the number of worker threads in the frontend-pool of the database. - /// This number should reflect the I/O capabilities of the system, - /// such as the queue-depth or the number of simultaneous requests in - /// flight. Defaults to 32 or four times the number of CPU cores, whichever - /// is greater. - /// - /// Note: This value is only used if db_pool_affinity is disabled or not - /// detected on the system, otherwise it is determined automatically. - /// - /// default: 32 - #[serde(default = "default_db_pool_workers")] - pub db_pool_workers: usize, - - /// When db_pool_affinity is enabled and detected, the size of any worker - /// group will not exceed the determined value. This is necessary when - /// thread-pooling approach does not scale to the full capabilities of - /// high-end hardware; using detected values without limitation could - /// degrade performance. - /// - /// The value is multiplied by the number of cores which share a device - /// queue, since group workers can be scheduled on any of those cores. - /// - /// default: 64 - #[serde(default = "default_db_pool_workers_limit")] - pub db_pool_workers_limit: usize, - - /// Determines the size of the queues feeding the database's frontend-pool. - /// The size of the queue is determined by multiplying this value with the - /// number of pool workers. When this queue is full, tokio tasks conducting - /// requests will yield until space is available; this is good for - /// flow-control by avoiding buffer-bloat, but can inhibit throughput if - /// too low. - /// - /// default: 4 - #[serde(default = "default_db_pool_queue_mult")] - pub db_pool_queue_mult: usize, - - /// Sets the initial value for the concurrency of streams. This value simply - /// allows overriding the default in the code. The default is 32, which is - /// the same as the default in the code. Note this value is itself - /// overridden by the computed stream_width_scale, unless that is disabled; - /// this value can serve as a fixed-width instead. - /// - /// default: 32 - #[serde(default = "default_stream_width_default")] - pub stream_width_default: usize, - - /// Scales the stream width starting from a base value detected for the - /// specific system. The base value is the database pool worker count - /// determined from the hardware queue size (e.g. 32 for SSD or 64 or 128+ - /// for NVMe). This float allows scaling the width up or down by multiplying - /// it (e.g. 1.5, 2.0, etc). The maximum result can be the size of the pool - /// queue (see: db_pool_queue_mult) as any larger value will stall the tokio - /// task. The value can also be scaled down (e.g. 0.5) to improve - /// responsiveness for many users at the cost of throughput for each. - /// - /// Setting this value to 0.0 causes the stream width to be fixed at the - /// value of stream_width_default. The default scale is 1.0 to match the - /// capabilities detected for the system. - /// - /// default: 1.0 - #[serde(default = "default_stream_width_scale")] - pub stream_width_scale: f32, - - /// Sets the initial amplification factor. This controls batch sizes of - /// requests made by each pool worker, multiplying the throughput of each - /// stream. This value is somewhat abstract from specific hardware - /// characteristics and can be significantly larger than any thread count or - /// queue size. This is because each database query may require several - /// index lookups, thus many database queries in a batch may make progress - /// independently while also sharing index and data blocks which may or may - /// not be cached. It is worthwhile to submit huge batches to reduce - /// complexity. The maximum value is 32768, though sufficient hardware is - /// still advised for that. - /// - /// default: 1024 - #[serde(default = "default_stream_amplification")] - pub stream_amplification: usize, - - /// Number of sender task workers; determines sender parallelism. Default is - /// '0' which means the value is determined internally, likely matching the - /// number of tokio worker-threads or number of cores, etc. Override by - /// setting a non-zero value. - /// - /// default: 0 - #[serde(default)] - pub sender_workers: usize, - - /// Enables listener sockets; can be set to false to disable listening. This - /// option is intended for developer/diagnostic purposes only. - #[serde(default = "true_fn")] - pub listening: bool, - - /// Enables configuration reload when the server receives SIGUSR1 on - /// supporting platforms. - /// - /// default: true - #[serde(default = "true_fn")] - pub config_reload_signal: bool, - - /// Toggles ignore checking/validating TLS certificates - /// - /// This applies to everything, including URL previews, federation requests, - /// etc. This is a hidden argument that should NOT be used in production as - /// it is highly insecure and I will personally yell at you if I catch you - /// using this. - #[serde(default)] - pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure: - bool, - - // external structure; separate section - #[serde(default)] - pub blurhashing: BlurhashConfig, - #[serde(flatten)] - #[allow(clippy::zero_sized_map_values)] - // this is a catchall, the map shouldn't be zero at runtime - catchall: BTreeMap, -} - -#[derive(Clone, Debug, Deserialize, Default)] -#[config_example_generator(filename = "conduwuit-example.toml", section = "global.tls")] -pub struct TlsConfig { - /// Path to a valid TLS certificate file. - /// - /// example: "/path/to/my/certificate.crt" - pub certs: Option, - - /// Path to a valid TLS certificate private key. - /// - /// example: "/path/to/my/certificate.key" - pub key: Option, - - /// Whether to listen and allow for HTTP and HTTPS connections (insecure!) - #[serde(default)] - pub dual_protocol: bool, -} - -#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] -#[derive(Clone, Debug, Deserialize, Default)] -#[config_example_generator(filename = "conduwuit-example.toml", section = "global.well_known")] -pub struct WellKnownConfig { - /// The server URL that the client well-known file will serve. This should - /// not contain a port, and should just be a valid HTTPS URL. - /// - /// example: "https://matrix.example.com" - pub client: Option, - - /// The server base domain of the URL with a specific port that the server - /// well-known file will serve. This should contain a port at the end, and - /// should not be a URL. - /// - /// example: "matrix.example.com:443" - pub server: Option, - - pub support_page: Option, - - pub support_role: Option, - - pub support_email: Option, - - pub support_mxid: Option, -} - -#[derive(Clone, Copy, Debug, Deserialize, Default)] -#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] -#[config_example_generator(filename = "conduwuit-example.toml", section = "global.blurhashing")] -pub struct BlurhashConfig { - /// blurhashing x component, 4 is recommended by https://blurha.sh/ - /// - /// default: 4 - #[serde(default = "default_blurhash_x_component")] - pub components_x: u32, - /// blurhashing y component, 3 is recommended by https://blurha.sh/ - /// - /// default: 3 - #[serde(default = "default_blurhash_y_component")] - pub components_y: u32, - /// Max raw size that the server will blurhash, this is the size of the - /// image after converting it to raw data, it should be higher than the - /// upload limit but not too high. The higher it is the higher the - /// potential load will be for clients requesting blurhashes. The default - /// is 33.55MB. Setting it to 0 disables blurhashing. - /// - /// default: 33554432 - #[serde(default = "default_blurhash_max_raw_size")] - pub blurhash_max_raw_size: u64, -} - -#[derive(Deserialize, Clone, Debug)] -#[serde(transparent)] -struct ListeningPort { - #[serde(with = "either::serde_untagged")] - ports: Either>, -} - -#[derive(Deserialize, Clone, Debug)] -#[serde(transparent)] -struct ListeningAddr { - #[serde(with = "either::serde_untagged")] - addrs: Either>, -} - -const DEPRECATED_KEYS: &[&str; 9] = &[ - "cache_capacity", - "conduit_cache_capacity_modifier", - "max_concurrent_requests", - "well_known_client", - "well_known_server", - "well_known_support_page", - "well_known_support_role", - "well_known_support_email", - "well_known_support_mxid", -]; - -impl Config { - /// Pre-initialize config - pub fn load<'a, I>(paths: I) -> Result - where - I: Iterator, - { - let envs = [Env::var("CONDUIT_CONFIG"), Env::var("CONDUWUIT_CONFIG")]; - - let config = envs - .into_iter() - .flatten() - .map(Toml::file) - .chain(paths.map(Toml::file)) - .fold(Figment::new(), |config, file| config.merge(file.nested())) - .merge(Env::prefixed("CONDUIT_").global().split("__")) - .merge(Env::prefixed("CONDUWUIT_").global().split("__")); - - Ok(config) - } - - /// Finalize config - pub fn new(raw_config: &Figment) -> Result { - let config = raw_config - .extract::() - .map_err(|e| err!("There was a problem with your configuration file: {e}"))?; - - // don't start if we're listening on both UNIX sockets and TCP at same time - check::is_dual_listening(raw_config)?; - - Ok(config) - } - - #[must_use] - pub fn get_bind_addrs(&self) -> Vec { - let mut addrs = Vec::with_capacity( - self.get_bind_hosts() - .len() - .saturating_mul(self.get_bind_ports().len()), - ); - for host in &self.get_bind_hosts() { - for port in &self.get_bind_ports() { - addrs.push(SocketAddr::new(*host, *port)); - } - } - - addrs - } - - fn get_bind_hosts(&self) -> Vec { - match &self.address.addrs { - | Left(addr) => vec![*addr], - | Right(addrs) => addrs.clone(), - } - } - - fn get_bind_ports(&self) -> Vec { - match &self.port.ports { - | Left(port) => vec![*port], - | Right(ports) => ports.clone(), - } - } - - pub fn check(&self) -> Result<(), Error> { check(self) } -} - -fn true_fn() -> bool { true } - -fn default_address() -> ListeningAddr { - ListeningAddr { - addrs: Right(vec![Ipv4Addr::LOCALHOST.into(), Ipv6Addr::LOCALHOST.into()]), - } -} - -fn default_port() -> ListeningPort { ListeningPort { ports: Left(8008) } } - -fn default_unix_socket_perms() -> u32 { 660 } - -fn default_database_backups_to_keep() -> i16 { 1 } - -fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) } - -fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) } - -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } - -fn default_cache_capacity_modifier() -> f64 { 1.0 } - -fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(100_000) -} - -fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_servernameevent_data_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) -} - -fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } - -fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } - -fn default_dns_cache_entries() -> u32 { 32768 } - -fn default_dns_min_ttl() -> u64 { 60 * 180 } - -fn default_dns_min_ttl_nxdomain() -> u64 { 60 * 60 * 24 * 3 } - -fn default_dns_attempts() -> u16 { 10 } - -fn default_dns_timeout() -> u64 { 10 } - -fn default_ip_lookup_strategy() -> u8 { 5 } - -fn default_max_request_size() -> usize { - 20 * 1024 * 1024 // Default to 20 MB -} - -fn default_request_conn_timeout() -> u64 { 10 } - -fn default_request_timeout() -> u64 { 35 } - -fn default_request_total_timeout() -> u64 { 320 } - -fn default_request_idle_timeout() -> u64 { 5 } - -fn default_request_idle_per_host() -> u16 { 1 } - -fn default_well_known_conn_timeout() -> u64 { 6 } - -fn default_well_known_timeout() -> u64 { 10 } - -fn default_federation_timeout() -> u64 { 25 } - -fn default_federation_idle_timeout() -> u64 { 25 } - -fn default_federation_idle_per_host() -> u16 { 1 } - -fn default_sender_timeout() -> u64 { 180 } - -fn default_sender_idle_timeout() -> u64 { 180 } - -fn default_sender_retry_backoff_limit() -> u64 { 86400 } - -fn default_appservice_timeout() -> u64 { 35 } - -fn default_appservice_idle_timeout() -> u64 { 300 } - -fn default_pusher_idle_timeout() -> u64 { 15 } - -fn default_max_fetch_prev_events() -> u16 { 192_u16 } - -fn default_tracing_flame_filter() -> String { - cfg!(debug_assertions) - .then_some("trace,h2=off") - .unwrap_or("info") - .to_owned() -} - -fn default_jaeger_filter() -> String { - cfg!(debug_assertions) - .then_some("trace,h2=off") - .unwrap_or("info") - .to_owned() -} - -fn default_tracing_flame_output_path() -> String { "./tracing.folded".to_owned() } - -fn default_trusted_servers() -> Vec { - vec![OwnedServerName::try_from("matrix.org").unwrap()] -} - -/// do debug logging by default for debug builds -#[must_use] -pub fn default_log() -> String { - cfg!(debug_assertions) - .then_some("debug") - .unwrap_or("info") - .to_owned() -} - -#[must_use] -pub fn default_log_span_events() -> String { "none".into() } - -fn default_notification_push_path() -> String { "/_matrix/push/v1/notify".to_owned() } - -fn default_openid_token_ttl() -> u64 { 60 * 60 } - -fn default_login_token_ttl() -> u64 { 2 * 60 * 1000 } - -fn default_turn_ttl() -> u64 { 60 * 60 * 24 } - -fn default_presence_idle_timeout_s() -> u64 { 5 * 60 } - -fn default_presence_offline_timeout_s() -> u64 { 30 * 60 } - -fn default_typing_federation_timeout_s() -> u64 { 30 } - -fn default_typing_client_timeout_min_s() -> u64 { 15 } - -fn default_typing_client_timeout_max_s() -> u64 { 45 } - -fn default_rocksdb_recovery_mode() -> u8 { 1 } - -fn default_rocksdb_log_level() -> String { "error".to_owned() } - -fn default_rocksdb_log_time_to_roll() -> usize { 0 } - -fn default_rocksdb_max_log_files() -> usize { 3 } - -fn default_rocksdb_max_log_file_size() -> usize { - // 4 megabytes - 4 * 1024 * 1024 -} - -fn default_rocksdb_parallelism_threads() -> usize { 0 } - -fn default_rocksdb_compression_algo() -> String { - cfg!(feature = "zstd_compression") - .then_some("zstd") - .unwrap_or("none") - .to_owned() -} - -/// Default RocksDB compression level is 32767, which is internally read by -/// RocksDB as the default magic number and translated to the library's default -/// compression level as they all differ. See their `kDefaultCompressionLevel`. -#[allow(clippy::doc_markdown)] -fn default_rocksdb_compression_level() -> i32 { 32767 } - -/// Default RocksDB compression level is 32767, which is internally read by -/// RocksDB as the default magic number and translated to the library's default -/// compression level as they all differ. See their `kDefaultCompressionLevel`. -#[allow(clippy::doc_markdown)] -fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 } - -fn default_rocksdb_stats_level() -> u8 { 1 } - -// I know, it's a great name -#[must_use] -#[inline] -pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 } - -fn default_ip_range_denylist() -> Vec { - vec![ - "127.0.0.0/8".to_owned(), - "10.0.0.0/8".to_owned(), - "172.16.0.0/12".to_owned(), - "192.168.0.0/16".to_owned(), - "100.64.0.0/10".to_owned(), - "192.0.0.0/24".to_owned(), - "169.254.0.0/16".to_owned(), - "192.88.99.0/24".to_owned(), - "198.18.0.0/15".to_owned(), - "192.0.2.0/24".to_owned(), - "198.51.100.0/24".to_owned(), - "203.0.113.0/24".to_owned(), - "224.0.0.0/4".to_owned(), - "::1/128".to_owned(), - "fe80::/10".to_owned(), - "fc00::/7".to_owned(), - "2001:db8::/32".to_owned(), - "ff00::/8".to_owned(), - "fec0::/10".to_owned(), - ] -} - -fn default_url_preview_max_spider_size() -> usize { - 256_000 // 256KB -} - -fn default_new_user_displayname_suffix() -> String { "🏳️‍⚧️".to_owned() } - -fn default_sentry_endpoint() -> Option { None } - -fn default_sentry_traces_sample_rate() -> f32 { 0.15 } - -fn default_sentry_filter() -> String { "info".to_owned() } - -fn default_startup_netburst_keep() -> i64 { 50 } - -fn default_admin_log_capture() -> String { - cfg!(debug_assertions) - .then_some("debug") - .unwrap_or("info") - .to_owned() -} - -fn default_admin_room_tag() -> String { "m.server_notice".to_owned() } - -#[allow(clippy::as_conversions, clippy::cast_precision_loss)] -fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } - -fn parallelism_scaled_u32(val: u32) -> u32 { - let val = val.try_into().expect("failed to cast u32 to usize"); - parallelism_scaled(val).try_into().unwrap_or(u32::MAX) -} - -fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } - -fn default_trusted_server_batch_size() -> usize { 256 } - -fn default_db_pool_workers() -> usize { - sys::available_parallelism() - .saturating_mul(4) - .clamp(32, 1024) -} - -fn default_db_pool_workers_limit() -> usize { 64 } - -fn default_db_pool_queue_mult() -> usize { 4 } - -fn default_stream_width_default() -> usize { 32 } - -fn default_stream_width_scale() -> f32 { 1.0 } - -fn default_stream_amplification() -> usize { 1024 } - -fn default_client_receive_timeout() -> u64 { 75 } - -fn default_client_request_timeout() -> u64 { 180 } - -fn default_client_response_timeout() -> u64 { 120 } - -fn default_client_shutdown_timeout() -> u64 { 15 } - -fn default_sender_shutdown_timeout() -> u64 { 5 } - -// blurhashing defaults recommended by https://blurha.sh/ -// 2^25 -pub(super) fn default_blurhash_max_raw_size() -> u64 { 33_554_432 } - -pub(super) fn default_blurhash_x_component() -> u32 { 4 } - -pub(super) fn default_blurhash_y_component() -> u32 { 3 } - -// end recommended & blurhashing defaults diff --git a/src/core/debug.rs b/src/core/debug.rs deleted file mode 100644 index 21a5ada4..00000000 --- a/src/core/debug.rs +++ /dev/null @@ -1,116 +0,0 @@ -#![allow(clippy::disallowed_macros)] - -use std::{any::Any, env, panic, sync::LazyLock}; - -// Export debug proc_macros -pub use conduwuit_macros::recursion_depth; -use tracing::Level; - -// Export all of the ancillary tools from here as well. -pub use crate::{result::DebugInspect, utils::debug::*}; - -/// Log event at given level in debug-mode (when debug-assertions are enabled). -/// In release-mode it becomes DEBUG level, and possibly subject to elision. -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! debug_event { - ( $level:expr_2021, $($x:tt)+ ) => { - if $crate::debug::logging() { - ::tracing::event!( $level, _debug = true, $($x)+ ) - } else { - ::tracing::debug!( $($x)+ ) - } - } -} - -/// Log message at the ERROR level in debug-mode (when debug-assertions are -/// enabled). In release-mode it becomes DEBUG level, and possibly subject to -/// elision. -#[macro_export] -macro_rules! debug_error { - ( $($x:tt)+ ) => { - $crate::debug_event!(::tracing::Level::ERROR, $($x)+ ) - } -} - -/// Log message at the WARN level in debug-mode (when debug-assertions are -/// enabled). In release-mode it becomes DEBUG level, and possibly subject to -/// elision. -#[macro_export] -macro_rules! debug_warn { - ( $($x:tt)+ ) => { - $crate::debug_event!(::tracing::Level::WARN, $($x)+ ) - } -} - -/// Log message at the INFO level in debug-mode (when debug-assertions are -/// enabled). In release-mode it becomes DEBUG level, and possibly subject to -/// elision. -#[macro_export] -macro_rules! debug_info { - ( $($x:tt)+ ) => { - $crate::debug_event!(::tracing::Level::INFO, $($x)+ ) - } -} - -pub const INFO_SPAN_LEVEL: Level = if cfg!(debug_assertions) { - Level::INFO -} else { - Level::DEBUG -}; - -pub static DEBUGGER: LazyLock = - LazyLock::new(|| env::var("_").unwrap_or_default().ends_with("gdb")); - -#[cfg_attr(debug_assertions, crate::ctor)] -#[cfg_attr(not(debug_assertions), allow(dead_code))] -fn set_panic_trap() { - if !*DEBUGGER { - return; - } - - let next = panic::take_hook(); - panic::set_hook(Box::new(move |info| { - panic_handler(info, &next); - })); -} - -#[cold] -#[inline(never)] -#[allow(deprecated_in_future)] -pub fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { - trap(); - next(info); -} - -#[inline(always)] -pub fn trap() { - #[cfg(core_intrinsics)] - //SAFETY: embeds llvm intrinsic for hardware breakpoint - unsafe { - std::intrinsics::breakpoint(); - } - - #[cfg(all(not(core_intrinsics), target_arch = "x86_64"))] - //SAFETY: embeds instruction for hardware breakpoint - unsafe { - std::arch::asm!("int3"); - } -} - -#[must_use] -pub fn panic_str(p: &Box) -> &'static str { - p.downcast_ref::<&str>().copied().unwrap_or_default() -} - -#[inline(always)] -#[must_use] -pub fn rttype_name(_: &T) -> &'static str { type_name::() } - -#[inline(always)] -#[must_use] -pub fn type_name() -> &'static str { std::any::type_name::() } - -#[must_use] -#[inline] -pub const fn logging() -> bool { cfg!(debug_assertions) } diff --git a/src/core/error/err.rs b/src/core/error/err.rs deleted file mode 100644 index 2eb6823a..00000000 --- a/src/core/error/err.rs +++ /dev/null @@ -1,209 +0,0 @@ -//! Error construction macros -//! -//! These are specialized macros specific to this project's patterns for -//! throwing Errors; they make Error construction succinct and reduce clutter. -//! They are developed from folding existing patterns into the macro while -//! fixing several anti-patterns in the codebase. -//! -//! - The primary macros `Err!` and `err!` are provided. `Err!` simply wraps -//! `err!` in the Result variant to reduce `Err(err!(...))` boilerplate, thus -//! `err!` can be used in any case. -//! -//! 1. The macro makes the general Error construction easy: `return -//! Err!("something went wrong")` replaces the prior `return -//! Err(Error::Err("something went wrong".to_owned()))`. -//! -//! 2. The macro integrates format strings automatically: `return -//! Err!("something bad: {msg}")` replaces the prior `return -//! Err(Error::Err(format!("something bad: {msg}")))`. -//! -//! 3. The macro scopes variants of Error: `return Err!(Database("problem with -//! bad database."))` replaces the prior `return Err(Error::Database("problem -//! with bad database."))`. -//! -//! 4. The macro matches and scopes some special-case sub-variants, for example -//! with ruma ErrorKind: `return Err!(Request(MissingToken("you must provide -//! an access token")))`. -//! -//! 5. The macro fixes the anti-pattern of repeating messages in an error! log -//! and then again in an Error construction, often slightly different due to -//! the Error variant not supporting a format string. Instead `return -//! Err(Database(error!("problem with db: {msg}")))` logs the error at the -//! callsite and then returns the error with the same string. Caller has the -//! option of replacing `error!` with `debug_error!`. - -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! Err { - ($($args:tt)*) => { - Err($crate::err!($($args)*)) - }; -} - -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! err { - (Request(Forbidden($level:ident!($($args:tt)+)))) => {{ - let mut buf = String::new(); - $crate::error::Error::Request( - $crate::ruma::api::client::error::ErrorKind::forbidden(), - $crate::err_log!(buf, $level, $($args)+), - $crate::http::StatusCode::BAD_REQUEST - ) - }}; - - (Request(Forbidden($($args:tt)+))) => { - $crate::error::Error::Request( - $crate::ruma::api::client::error::ErrorKind::forbidden(), - $crate::format_maybe!($($args)+), - $crate::http::StatusCode::BAD_REQUEST - ) - }; - - (Request($variant:ident($level:ident!($($args:tt)+)))) => {{ - let mut buf = String::new(); - $crate::error::Error::Request( - $crate::ruma::api::client::error::ErrorKind::$variant, - $crate::err_log!(buf, $level, $($args)+), - $crate::http::StatusCode::BAD_REQUEST - ) - }}; - - (Request($variant:ident($($args:tt)+))) => { - $crate::error::Error::Request( - $crate::ruma::api::client::error::ErrorKind::$variant, - $crate::format_maybe!($($args)+), - $crate::http::StatusCode::BAD_REQUEST - ) - }; - - (Config($item:literal, $($args:tt)+)) => {{ - let mut buf = String::new(); - $crate::error::Error::Config($item, $crate::err_log!(buf, error, config = %$item, $($args)+)) - }}; - - ($variant:ident($level:ident!($($args:tt)+))) => {{ - let mut buf = String::new(); - $crate::error::Error::$variant($crate::err_log!(buf, $level, $($args)+)) - }}; - - ($variant:ident($($args:ident),+)) => { - $crate::error::Error::$variant($($args),+) - }; - - ($variant:ident($($args:tt)+)) => { - $crate::error::Error::$variant($crate::format_maybe!($($args)+)) - }; - - ($level:ident!($($args:tt)+)) => {{ - let mut buf = String::new(); - $crate::error::Error::Err($crate::err_log!(buf, $level, $($args)+)) - }}; - - ($($args:tt)+) => { - $crate::error::Error::Err($crate::format_maybe!($($args)+)) - }; -} - -/// A trinity of integration between tracing, logging, and Error. This is a -/// customization of tracing::event! with the primary purpose of sharing the -/// error string, fieldset parsing and formatting. An added benefit is that we -/// can share the same callsite metadata for the source of our Error and the -/// associated logging and tracing event dispatches. -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! err_log { - ($out:ident, $level:ident, $($fields:tt)+) => {{ - use $crate::tracing::{ - callsite, callsite2, metadata, valueset, Callsite, - Level, - }; - - const LEVEL: Level = $crate::err_lev!($level); - static __CALLSITE: callsite::DefaultCallsite = callsite2! { - name: std::concat! { - "event ", - std::file!(), - ":", - std::line!(), - }, - kind: metadata::Kind::EVENT, - target: std::module_path!(), - level: LEVEL, - fields: $($fields)+, - }; - - ($crate::error::visit)(&mut $out, LEVEL, &__CALLSITE, &mut valueset!(__CALLSITE.metadata().fields(), $($fields)+)); - ($out).into() - }} -} - -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! err_lev { - (debug_warn) => { - if $crate::debug::logging() { - $crate::tracing::Level::WARN - } else { - $crate::tracing::Level::DEBUG - } - }; - - (debug_error) => { - if $crate::debug::logging() { - $crate::tracing::Level::ERROR - } else { - $crate::tracing::Level::DEBUG - } - }; - - (warn) => { - $crate::tracing::Level::WARN - }; - - (error) => { - $crate::tracing::Level::ERROR - }; -} - -use std::{fmt, fmt::Write}; - -use tracing::{ - __macro_support, __tracing_log, Callsite, Event, Level, - callsite::DefaultCallsite, - field::{Field, ValueSet, Visit}, - level_enabled, -}; - -struct Visitor<'a>(&'a mut String); - -impl Visit for Visitor<'_> { - #[inline] - fn record_debug(&mut self, field: &Field, val: &dyn fmt::Debug) { - if field.name() == "message" { - write!(self.0, "{val:?}").expect("stream error"); - } else { - write!(self.0, " {}={val:?}", field.name()).expect("stream error"); - } - } -} - -pub fn visit( - out: &mut String, - level: Level, - __callsite: &'static DefaultCallsite, - vs: &mut ValueSet<'_>, -) { - let meta = __callsite.metadata(); - let enabled = level_enabled!(level) && { - let interest = __callsite.interest(); - !interest.is_never() && __macro_support::__is_enabled(meta, interest) - }; - - if enabled { - Event::dispatch(meta, vs); - } - - __tracing_log!(level, __callsite, vs); - vs.record(&mut Visitor(out)); -} diff --git a/src/core/error/log.rs b/src/core/error/log.rs deleted file mode 100644 index 18c3a680..00000000 --- a/src/core/error/log.rs +++ /dev/null @@ -1,99 +0,0 @@ -use std::{convert::Infallible, fmt}; - -use tracing::Level; - -use super::Error; - -#[inline] -pub fn else_log(error: E) -> Result -where - T: Default, - Error: From, -{ - Ok(default_log(error)) -} - -#[inline] -pub fn else_debug_log(error: E) -> Result -where - T: Default, - Error: From, -{ - Ok(default_debug_log(error)) -} - -#[inline] -pub fn default_log(error: E) -> T -where - T: Default, - Error: From, -{ - let error = Error::from(error); - inspect_log(&error); - T::default() -} - -#[inline] -pub fn default_debug_log(error: E) -> T -where - T: Default, - Error: From, -{ - let error = Error::from(error); - inspect_debug_log(&error); - T::default() -} - -#[inline] -pub fn map_log(error: E) -> Error -where - Error: From, -{ - let error = Error::from(error); - inspect_log(&error); - error -} - -#[inline] -pub fn map_debug_log(error: E) -> Error -where - Error: From, -{ - let error = Error::from(error); - inspect_debug_log(&error); - error -} - -#[inline] -pub fn inspect_log(error: &E) { inspect_log_level(error, Level::ERROR); } - -#[inline] -pub fn inspect_debug_log(error: &E) { - inspect_debug_log_level(error, Level::ERROR); -} - -#[inline] -pub fn inspect_log_level(error: &E, level: Level) { - use crate::{debug, error, info, trace, warn}; - - match level { - | Level::ERROR => error!("{error}"), - | Level::WARN => warn!("{error}"), - | Level::INFO => info!("{error}"), - | Level::DEBUG => debug!("{error}"), - | Level::TRACE => trace!("{error}"), - } -} - -#[inline] -pub fn inspect_debug_log_level(error: &E, level: Level) { - use crate::{debug, debug_error, debug_info, debug_warn, trace}; - - match level { - | Level::ERROR => debug_error!("{error:?}"), - | Level::WARN => debug_warn!("{error:?}"), - | Level::INFO => debug_info!("{error:?}"), - | Level::DEBUG => debug!("{error:?}"), - | Level::TRACE => trace!("{error:?}"), - } -} diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs deleted file mode 100644 index e46edf09..00000000 --- a/src/core/error/mod.rs +++ /dev/null @@ -1,233 +0,0 @@ -mod err; -mod log; -mod panic; -mod response; -mod serde; - -use std::{any::Any, borrow::Cow, convert::Infallible, sync::PoisonError}; - -pub use self::{err::visit, log::*}; - -#[derive(thiserror::Error)] -pub enum Error { - #[error("PANIC!")] - PanicAny(Box), - #[error("PANIC! {0}")] - Panic(&'static str, Box), - - // std - #[error(transparent)] - Fmt(#[from] std::fmt::Error), - #[error(transparent)] - FromUtf8(#[from] std::string::FromUtf8Error), - #[error("I/O error: {0}")] - Io(#[from] std::io::Error), - #[error(transparent)] - ParseFloat(#[from] std::num::ParseFloatError), - #[error(transparent)] - ParseInt(#[from] std::num::ParseIntError), - #[error(transparent)] - Std(#[from] Box), - #[error(transparent)] - ThreadAccessError(#[from] std::thread::AccessError), - #[error(transparent)] - TryFromInt(#[from] std::num::TryFromIntError), - #[error(transparent)] - TryFromSlice(#[from] std::array::TryFromSliceError), - #[error(transparent)] - Utf8(#[from] std::str::Utf8Error), - - // third-party - #[error(transparent)] - CapacityError(#[from] arrayvec::CapacityError), - #[error(transparent)] - CargoToml(#[from] cargo_toml::Error), - #[error(transparent)] - Clap(#[from] clap::error::Error), - #[error(transparent)] - Extension(#[from] axum::extract::rejection::ExtensionRejection), - #[error(transparent)] - Figment(#[from] figment::error::Error), - #[error(transparent)] - Http(#[from] http::Error), - #[error(transparent)] - HttpHeader(#[from] http::header::InvalidHeaderValue), - #[error("Join error: {0}")] - JoinError(#[from] tokio::task::JoinError), - #[error(transparent)] - Json(#[from] serde_json::Error), - #[error(transparent)] - JsParseInt(#[from] ruma::JsParseIntError), // js_int re-export - #[error(transparent)] - JsTryFromInt(#[from] ruma::JsTryFromIntError), // js_int re-export - #[error(transparent)] - Path(#[from] axum::extract::rejection::PathRejection), - #[error("Mutex poisoned: {0}")] - Poison(Cow<'static, str>), - #[error("Regex error: {0}")] - Regex(#[from] regex::Error), - #[error("Request error: {0}")] - Reqwest(#[from] reqwest::Error), - #[error("{0}")] - SerdeDe(Cow<'static, str>), - #[error("{0}")] - SerdeSer(Cow<'static, str>), - #[error(transparent)] - TomlDe(#[from] toml::de::Error), - #[error(transparent)] - TomlSer(#[from] toml::ser::Error), - #[error("Tracing filter error: {0}")] - TracingFilter(#[from] tracing_subscriber::filter::ParseError), - #[error("Tracing reload error: {0}")] - TracingReload(#[from] tracing_subscriber::reload::Error), - #[error(transparent)] - TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection), - #[error(transparent)] - Yaml(#[from] serde_yaml::Error), - - // ruma/conduwuit - #[error("Arithmetic operation failed: {0}")] - Arithmetic(Cow<'static, str>), - #[error("{0}: {1}")] - BadRequest(ruma::api::client::error::ErrorKind, &'static str), //TODO: remove - #[error("{0}")] - BadServerResponse(Cow<'static, str>), - #[error(transparent)] - CanonicalJson(#[from] ruma::CanonicalJsonError), - #[error("There was a problem with the '{0}' directive in your configuration: {1}")] - Config(&'static str, Cow<'static, str>), - #[error("{0}")] - Conflict(Cow<'static, str>), // This is only needed for when a room alias already exists - #[error(transparent)] - ContentDisposition(#[from] ruma::http_headers::ContentDispositionParseError), - #[error("{0}")] - Database(Cow<'static, str>), - #[error("Feature '{0}' is not available on this server.")] - FeatureDisabled(Cow<'static, str>), - #[error("Remote server {0} responded with: {1}")] - Federation(ruma::OwnedServerName, ruma::api::client::error::Error), - #[error("{0} in {1}")] - InconsistentRoomState(&'static str, ruma::OwnedRoomId), - #[error(transparent)] - IntoHttp(#[from] ruma::api::error::IntoHttpError), - #[error(transparent)] - Mxc(#[from] ruma::MxcUriError), - #[error(transparent)] - Mxid(#[from] ruma::IdParseError), - #[error("from {0}: {1}")] - Redaction(ruma::OwnedServerName, ruma::canonical_json::RedactionError), - #[error("{0}: {1}")] - Request(ruma::api::client::error::ErrorKind, Cow<'static, str>, http::StatusCode), - #[error(transparent)] - Ruma(#[from] ruma::api::client::error::Error), - #[error(transparent)] - Signatures(#[from] ruma::signatures::Error), - #[error(transparent)] - StateRes(#[from] crate::state_res::Error), - #[error("uiaa")] - Uiaa(ruma::api::client::uiaa::UiaaInfo), - - // unique / untyped - #[error("{0}")] - Err(Cow<'static, str>), -} - -impl Error { - #[inline] - #[must_use] - pub fn from_errno() -> Self { Self::Io(std::io::Error::last_os_error()) } - - //#[deprecated] - pub fn bad_database(message: &'static str) -> Self { - crate::err!(Database(error!("{message}"))) - } - - /// Sanitizes public-facing errors that can leak sensitive information. - pub fn sanitized_message(&self) -> String { - match self { - | Self::Database(..) => String::from("Database error occurred."), - | Self::Io(..) => String::from("I/O error occurred."), - | _ => self.message(), - } - } - - /// Generate the error message string. - pub fn message(&self) -> String { - match self { - | Self::Federation(origin, error) => format!("Answer from {origin}: {error}"), - | Self::Ruma(error) => response::ruma_error_message(error), - | _ => format!("{self}"), - } - } - - /// Returns the Matrix error code / error kind - #[inline] - pub fn kind(&self) -> ruma::api::client::error::ErrorKind { - use ruma::api::client::error::ErrorKind::{FeatureDisabled, Unknown}; - - match self { - | Self::Federation(_, error) | Self::Ruma(error) => - response::ruma_error_kind(error).clone(), - | Self::BadRequest(kind, ..) | Self::Request(kind, ..) => kind.clone(), - | Self::FeatureDisabled(..) => FeatureDisabled, - | _ => Unknown, - } - } - - /// Returns the HTTP error code or closest approximation based on error - /// variant. - pub fn status_code(&self) -> http::StatusCode { - use http::StatusCode; - - match self { - | Self::Federation(_, error) | Self::Ruma(error) => error.status_code, - | Self::Request(kind, _, code) => response::status_code(kind, *code), - | Self::BadRequest(kind, ..) => response::bad_request_code(kind), - | Self::FeatureDisabled(..) => response::bad_request_code(&self.kind()), - | Self::Reqwest(error) => error.status().unwrap_or(StatusCode::INTERNAL_SERVER_ERROR), - | Self::Conflict(_) => StatusCode::CONFLICT, - | Self::Io(error) => response::io_error_code(error.kind()), - | _ => StatusCode::INTERNAL_SERVER_ERROR, - } - } - - /// Returns true for "not found" errors. This means anything that qualifies - /// as a "not found" from any variant's contained error type. This call is - /// often used as a special case to eliminate a contained Option with a - /// Result where Ok(None) is instead Err(e) if e.is_not_found(). - #[inline] - pub fn is_not_found(&self) -> bool { self.status_code() == http::StatusCode::NOT_FOUND } -} - -impl std::fmt::Debug for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.message()) - } -} - -impl From> for Error { - #[cold] - #[inline(never)] - fn from(e: PoisonError) -> Self { Self::Poison(e.to_string().into()) } -} - -#[allow(clippy::fallible_impl_from)] -impl From for Error { - #[cold] - #[inline(never)] - fn from(_e: Infallible) -> Self { - panic!("infallible error should never exist"); - } -} - -#[cold] -#[inline(never)] -pub fn infallible(_e: &Infallible) { - panic!("infallible error should never exist"); -} - -/// Convenience functor for fundamental Error::sanitized_message(); see member. -#[inline] -#[must_use] -#[allow(clippy::needless_pass_by_value)] -pub fn sanitized_message(e: Error) -> String { e.sanitized_message() } diff --git a/src/core/error/panic.rs b/src/core/error/panic.rs deleted file mode 100644 index 2e63105b..00000000 --- a/src/core/error/panic.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::{ - any::Any, - panic::{RefUnwindSafe, UnwindSafe, panic_any}, -}; - -use super::Error; -use crate::debug; - -impl UnwindSafe for Error {} -impl RefUnwindSafe for Error {} - -impl Error { - #[inline] - pub fn panic(self) -> ! { panic_any(self.into_panic()) } - - #[must_use] - #[inline] - pub fn from_panic(e: Box) -> Self { Self::Panic(debug::panic_str(&e), e) } - - #[inline] - pub fn into_panic(self) -> Box { - match self { - | Self::Panic(_, e) | Self::PanicAny(e) => e, - | Self::JoinError(e) => e.into_panic(), - | _ => Box::new(self), - } - } - - /// Get the panic message string. - #[inline] - pub fn panic_str(self) -> Option<&'static str> { - self.is_panic() - .then_some(debug::panic_str(&self.into_panic())) - } - - /// Check if the Error is trafficking a panic object. - #[inline] - pub fn is_panic(&self) -> bool { - match &self { - | Self::Panic(..) | Self::PanicAny(..) => true, - | Self::JoinError(e) => e.is_panic(), - | _ => false, - } - } -} diff --git a/src/core/error/response.rs b/src/core/error/response.rs deleted file mode 100644 index ae6fce62..00000000 --- a/src/core/error/response.rs +++ /dev/null @@ -1,112 +0,0 @@ -use bytes::BytesMut; -use http::StatusCode; -use http_body_util::Full; -use ruma::api::{ - OutgoingResponse, - client::{ - error::{ErrorBody, ErrorKind}, - uiaa::UiaaResponse, - }, -}; - -use super::Error; -use crate::error; - -impl axum::response::IntoResponse for Error { - fn into_response(self) -> axum::response::Response { - let response: UiaaResponse = self.into(); - response - .try_into_http_response::() - .inspect_err(|e| error!("error response error: {e}")) - .map_or_else( - |_| StatusCode::INTERNAL_SERVER_ERROR.into_response(), - |r| r.map(BytesMut::freeze).map(Full::new).into_response(), - ) - } -} - -impl From for UiaaResponse { - #[inline] - fn from(error: Error) -> Self { - if let Error::Uiaa(uiaainfo) = error { - return Self::AuthResponse(uiaainfo); - } - - let body = ErrorBody::Standard { - kind: error.kind(), - message: error.message(), - }; - - Self::MatrixError(ruma::api::client::error::Error { - status_code: error.status_code(), - body, - }) - } -} - -pub(super) fn status_code(kind: &ErrorKind, hint: StatusCode) -> StatusCode { - if hint == StatusCode::BAD_REQUEST { - bad_request_code(kind) - } else { - hint - } -} - -pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode { - use ErrorKind::*; - - match kind { - // 429 - | LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, - - // 413 - | TooLarge => StatusCode::PAYLOAD_TOO_LARGE, - - // 405 - | Unrecognized => StatusCode::METHOD_NOT_ALLOWED, - - // 404 - | NotFound | NotImplemented | FeatureDisabled => StatusCode::NOT_FOUND, - - // 403 - | GuestAccessForbidden - | ThreepidAuthFailed - | UserDeactivated - | ThreepidDenied - | WrongRoomKeysVersion { .. } - | Forbidden { .. } => StatusCode::FORBIDDEN, - - // 401 - | UnknownToken { .. } | MissingToken | Unauthorized => StatusCode::UNAUTHORIZED, - - // 400 - | _ => StatusCode::BAD_REQUEST, - } -} - -pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String { - if let ErrorBody::Standard { message, .. } = &error.body { - return message.clone(); - } - - format!("{error}") -} - -pub(super) fn ruma_error_kind(e: &ruma::api::client::error::Error) -> &ErrorKind { - e.error_kind().unwrap_or(&ErrorKind::Unknown) -} - -pub(super) fn io_error_code(kind: std::io::ErrorKind) -> StatusCode { - use std::io::ErrorKind; - - match kind { - | ErrorKind::InvalidInput => StatusCode::BAD_REQUEST, - | ErrorKind::PermissionDenied => StatusCode::FORBIDDEN, - | ErrorKind::NotFound => StatusCode::NOT_FOUND, - | ErrorKind::TimedOut => StatusCode::GATEWAY_TIMEOUT, - | ErrorKind::FileTooLarge => StatusCode::PAYLOAD_TOO_LARGE, - | ErrorKind::StorageFull => StatusCode::INSUFFICIENT_STORAGE, - | ErrorKind::Interrupted => StatusCode::SERVICE_UNAVAILABLE, - | _ => StatusCode::INTERNAL_SERVER_ERROR, - } -} diff --git a/src/core/error/serde.rs b/src/core/error/serde.rs deleted file mode 100644 index 0c5a153b..00000000 --- a/src/core/error/serde.rs +++ /dev/null @@ -1,13 +0,0 @@ -use std::fmt::Display; - -use serde::{de, ser}; - -use crate::Error; - -impl de::Error for Error { - fn custom(msg: T) -> Self { Self::SerdeDe(msg.to_string().into()) } -} - -impl ser::Error for Error { - fn custom(msg: T) -> Self { Self::SerdeSer(msg.to_string().into()) } -} diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs deleted file mode 100644 index 28c6590e..00000000 --- a/src/core/info/cargo.rs +++ /dev/null @@ -1,93 +0,0 @@ -//! Information about the build related to Cargo. This is a frontend interface -//! informed by proc-macros that capture raw information at build time which is -//! further processed at runtime either during static initialization or as -//! necessary. - -use std::sync::OnceLock; - -use cargo_toml::{DepsSet, Manifest}; -use conduwuit_macros::cargo_manifest; - -use crate::Result; - -// Raw captures of the cargo manifest for each crate. This is provided by a -// proc-macro at build time since the source directory and the cargo toml's may -// not be present during execution. - -#[cargo_manifest] -const WORKSPACE_MANIFEST: &'static str = (); -#[cargo_manifest(crate = "macros")] -const MACROS_MANIFEST: &'static str = (); -#[cargo_manifest(crate = "core")] -const CORE_MANIFEST: &'static str = (); -#[cargo_manifest(crate = "database")] -const DATABASE_MANIFEST: &'static str = (); -#[cargo_manifest(crate = "service")] -const SERVICE_MANIFEST: &'static str = (); -#[cargo_manifest(crate = "admin")] -const ADMIN_MANIFEST: &'static str = (); -#[cargo_manifest(crate = "router")] -const ROUTER_MANIFEST: &'static str = (); -#[cargo_manifest(crate = "main")] -const MAIN_MANIFEST: &'static str = (); - -/// Processed list of features across all project crates. This is generated from -/// the data in the MANIFEST strings and contains all possible project features. -/// For *enabled* features see the info::rustc module instead. -static FEATURES: OnceLock> = OnceLock::new(); - -/// Processed list of dependencies. This is generated from the datas captured in -/// the MANIFEST. -static DEPENDENCIES: OnceLock = OnceLock::new(); - -#[must_use] -pub fn dependencies_names() -> Vec<&'static str> { - dependencies().keys().map(String::as_str).collect() -} - -pub fn dependencies() -> &'static DepsSet { - DEPENDENCIES.get_or_init(|| { - init_dependencies().unwrap_or_else(|e| panic!("Failed to initialize dependencies: {e}")) - }) -} - -/// List of all possible features for the project. For *enabled* features in -/// this build see the companion function in info::rustc. -pub fn features() -> &'static Vec { - FEATURES.get_or_init(|| { - init_features().unwrap_or_else(|e| panic!("Failed initialize features: {e}")) - }) -} - -fn init_features() -> Result> { - let mut features = Vec::new(); - append_features(&mut features, WORKSPACE_MANIFEST)?; - append_features(&mut features, MACROS_MANIFEST)?; - append_features(&mut features, CORE_MANIFEST)?; - append_features(&mut features, DATABASE_MANIFEST)?; - append_features(&mut features, SERVICE_MANIFEST)?; - append_features(&mut features, ADMIN_MANIFEST)?; - append_features(&mut features, ROUTER_MANIFEST)?; - append_features(&mut features, MAIN_MANIFEST)?; - features.sort(); - features.dedup(); - - Ok(features) -} - -fn append_features(features: &mut Vec, manifest: &str) -> Result<()> { - let manifest = Manifest::from_str(manifest)?; - features.extend(manifest.features.keys().cloned()); - - Ok(()) -} - -fn init_dependencies() -> Result { - let manifest = Manifest::from_str(WORKSPACE_MANIFEST)?; - Ok(manifest - .workspace - .as_ref() - .expect("manifest has workspace section") - .dependencies - .clone()) -} diff --git a/src/core/info/mod.rs b/src/core/info/mod.rs deleted file mode 100644 index ca39b348..00000000 --- a/src/core/info/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Information about the project. This module contains version, build, system, -//! etc information which can be queried by admins or used by developers. - -pub mod cargo; -pub mod room_version; -pub mod rustc; -pub mod version; - -pub use conduwuit_macros::rustc_flags_capture; - -pub const MODULE_ROOT: &str = const_str::split!(std::module_path!(), "::")[0]; -pub const CRATE_PREFIX: &str = const_str::split!(MODULE_ROOT, '_')[0]; diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs deleted file mode 100644 index 51d5d3c6..00000000 --- a/src/core/info/room_version.rs +++ /dev/null @@ -1,60 +0,0 @@ -//! Room version support - -use std::iter::once; - -use ruma::{RoomVersionId, api::client::discovery::get_capabilities::RoomVersionStability}; - -use crate::{at, is_equal_to}; - -/// Supported and stable room versions -pub const STABLE_ROOM_VERSIONS: &[RoomVersionId] = &[ - RoomVersionId::V6, - RoomVersionId::V7, - RoomVersionId::V8, - RoomVersionId::V9, - RoomVersionId::V10, - RoomVersionId::V11, -]; - -/// Experimental, partially supported room versions -pub const UNSTABLE_ROOM_VERSIONS: &[RoomVersionId] = - &[RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - -type RoomVersion = (RoomVersionId, RoomVersionStability); - -impl crate::Server { - #[inline] - pub fn supported_room_version(&self, version: &RoomVersionId) -> bool { - self.supported_room_versions().any(is_equal_to!(*version)) - } - - #[inline] - pub fn supported_room_versions(&self) -> impl Iterator + '_ { - Self::available_room_versions() - .filter(|(_, stability)| self.supported_stability(stability)) - .map(at!(0)) - } - - #[inline] - pub fn available_room_versions() -> impl Iterator { - available_room_versions() - } - - #[inline] - fn supported_stability(&self, stability: &RoomVersionStability) -> bool { - self.config.allow_unstable_room_versions || *stability == RoomVersionStability::Stable - } -} - -pub fn available_room_versions() -> impl Iterator { - let unstable_room_versions = UNSTABLE_ROOM_VERSIONS - .iter() - .cloned() - .zip(once(RoomVersionStability::Unstable).cycle()); - - STABLE_ROOM_VERSIONS - .iter() - .cloned() - .zip(once(RoomVersionStability::Stable).cycle()) - .chain(unstable_room_versions) -} diff --git a/src/core/info/rustc.rs b/src/core/info/rustc.rs deleted file mode 100644 index 048c0cd5..00000000 --- a/src/core/info/rustc.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Information about the build related to rustc. This is a frontend interface -//! informed by proc-macros at build time. Since the project is split into -//! several crates, lower-level information is supplied from each crate during -//! static initialization. - -use std::{ - collections::BTreeMap, - sync::{Mutex, OnceLock}, -}; - -use crate::utils::exchange; - -/// Raw capture of rustc flags used to build each crate in the project. Informed -/// by rustc_flags_capture macro (one in each crate's mod.rs). This is -/// done during static initialization which is why it's mutex-protected and pub. -/// Should not be written to by anything other than our macro. -pub static FLAGS: Mutex> = Mutex::new(BTreeMap::new()); - -/// Processed list of enabled features across all project crates. This is -/// generated from the data in FLAGS. -static FEATURES: OnceLock> = OnceLock::new(); - -/// List of features enabled for the project. -pub fn features() -> &'static Vec<&'static str> { FEATURES.get_or_init(init_features) } - -fn init_features() -> Vec<&'static str> { - let mut features = Vec::new(); - FLAGS - .lock() - .expect("locked") - .iter() - .for_each(|(_, flags)| append_features(&mut features, flags)); - - features.sort_unstable(); - features.dedup(); - features -} - -fn append_features(features: &mut Vec<&'static str>, flags: &[&'static str]) { - let mut next_is_cfg = false; - for flag in flags { - let is_cfg = *flag == "--cfg"; - let is_feature = flag.starts_with("feature="); - if exchange(&mut next_is_cfg, is_cfg) && is_feature { - if let Some(feature) = flag - .split_once('=') - .map(|(_, feature)| feature.trim_matches('"')) - { - features.push(feature); - } - } - } -} diff --git a/src/core/info/version.rs b/src/core/info/version.rs deleted file mode 100644 index 6abb6e13..00000000 --- a/src/core/info/version.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! one true function for returning the conduwuit version with the necessary -//! CONDUWUIT_VERSION_EXTRA env variables used if specified -//! -//! Set the environment variable `CONDUWUIT_VERSION_EXTRA` to any UTF-8 string -//! to include it in parenthesis after the SemVer version. A common value are -//! git commit hashes. - -use std::sync::OnceLock; - -static BRANDING: &str = "continuwuity"; -static SEMANTIC: &str = env!("CARGO_PKG_VERSION"); - -static VERSION: OnceLock = OnceLock::new(); -static USER_AGENT: OnceLock = OnceLock::new(); - -#[inline] -#[must_use] -pub fn name() -> &'static str { BRANDING } - -#[inline] -pub fn version() -> &'static str { VERSION.get_or_init(init_version) } - -#[inline] -pub fn user_agent() -> &'static str { USER_AGENT.get_or_init(init_user_agent) } - -fn init_user_agent() -> String { format!("{}/{}", name(), version()) } - -fn init_version() -> String { - option_env!("CONDUWUIT_VERSION_EXTRA") - .or(option_env!("CONDUIT_VERSION_EXTRA")) - .map_or(SEMANTIC.to_owned(), |extra| { - if extra.is_empty() { - SEMANTIC.to_owned() - } else { - format!("{SEMANTIC} ({extra})") - } - }) -} diff --git a/src/core/log/capture/data.rs b/src/core/log/capture/data.rs deleted file mode 100644 index a4a1225b..00000000 --- a/src/core/log/capture/data.rs +++ /dev/null @@ -1,35 +0,0 @@ -use tracing::Level; -use tracing_core::{Event, span::Current}; - -use super::{Layer, layer::Value}; -use crate::{info, utils::string::EMPTY}; - -pub struct Data<'a> { - pub layer: &'a Layer, - pub event: &'a Event<'a>, - pub current: &'a Current, - pub values: &'a [Value], - pub scope: &'a [&'static str], -} - -impl Data<'_> { - #[must_use] - pub fn our_modules(&self) -> bool { self.mod_name().starts_with(info::CRATE_PREFIX) } - - #[must_use] - pub fn level(&self) -> Level { *self.event.metadata().level() } - - #[must_use] - pub fn mod_name(&self) -> &str { self.event.metadata().module_path().unwrap_or_default() } - - #[must_use] - pub fn span_name(&self) -> &str { self.current.metadata().map_or(EMPTY, |s| s.name()) } - - #[must_use] - pub fn message(&self) -> &str { - self.values - .iter() - .find(|(k, _)| *k == "message") - .map_or(EMPTY, |(_, v)| v.as_str()) - } -} diff --git a/src/core/log/capture/guard.rs b/src/core/log/capture/guard.rs deleted file mode 100644 index b5a6d8b3..00000000 --- a/src/core/log/capture/guard.rs +++ /dev/null @@ -1,13 +0,0 @@ -use std::sync::Arc; - -use super::Capture; - -/// Capture instance scope guard. -pub struct Guard { - pub(super) capture: Arc, -} - -impl Drop for Guard { - #[inline] - fn drop(&mut self) { self.capture.stop(); } -} diff --git a/src/core/log/capture/layer.rs b/src/core/log/capture/layer.rs deleted file mode 100644 index 381a652f..00000000 --- a/src/core/log/capture/layer.rs +++ /dev/null @@ -1,97 +0,0 @@ -use std::{fmt, sync::Arc}; - -use arrayvec::ArrayVec; -use tracing::field::{Field, Visit}; -use tracing_core::{Event, Subscriber}; -use tracing_subscriber::{layer::Context, registry::LookupSpan}; - -use super::{Capture, Data, State}; - -pub struct Layer { - state: Arc, -} - -struct Visitor { - values: Values, -} - -type Values = ArrayVec; -pub type Value = (&'static str, String); - -type ScopeNames = ArrayVec<&'static str, 32>; - -impl Layer { - #[inline] - pub fn new(state: &Arc) -> Self { Self { state: state.clone() } } -} - -impl fmt::Debug for Layer { - #[inline] - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.debug_struct("capture::Layer").finish() - } -} - -impl tracing_subscriber::Layer for Layer -where - S: Subscriber + for<'a> LookupSpan<'a>, -{ - fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { - self.state - .active - .read() - .expect("shared lock") - .iter() - .filter(|capture| filter(self, capture, event, &ctx)) - .for_each(|capture| handle(self, capture, event, &ctx)); - } -} - -fn handle(layer: &Layer, capture: &Capture, event: &Event<'_>, ctx: &Context<'_, S>) -where - S: Subscriber + for<'a> LookupSpan<'a>, -{ - let names = ScopeNames::new(); - let mut visitor = Visitor { values: Values::new() }; - event.record(&mut visitor); - - let mut closure = capture.closure.lock().expect("exclusive lock"); - closure(Data { - layer, - event, - current: &ctx.current_span(), - values: &visitor.values, - scope: &names, - }); -} - -fn filter(layer: &Layer, capture: &Capture, event: &Event<'_>, ctx: &Context<'_, S>) -> bool -where - S: Subscriber + for<'a> LookupSpan<'a>, -{ - let values = Values::new(); - let mut names = ScopeNames::new(); - if let Some(scope) = ctx.event_scope(event) { - for span in scope { - names.push(span.name()); - } - } - - capture.filter.as_ref().is_none_or(|filter| { - filter(Data { - layer, - event, - current: &ctx.current_span(), - values: &values, - scope: &names, - }) - }) -} - -impl Visit for Visitor { - fn record_debug(&mut self, f: &Field, v: &dyn fmt::Debug) { - self.values.push((f.name(), format!("{v:?}"))); - } - - fn record_str(&mut self, f: &Field, v: &str) { self.values.push((f.name(), v.to_owned())); } -} diff --git a/src/core/log/capture/mod.rs b/src/core/log/capture/mod.rs deleted file mode 100644 index 20f70091..00000000 --- a/src/core/log/capture/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -pub mod data; -mod guard; -pub mod layer; -pub mod state; -pub mod util; - -use std::sync::{Arc, Mutex}; - -pub use data::Data; -use guard::Guard; -pub use layer::{Layer, Value}; -pub use state::State; -pub use util::*; - -pub type Filter = dyn Fn(Data<'_>) -> bool + Send + Sync + 'static; -pub type Closure = dyn FnMut(Data<'_>) + Send + Sync + 'static; - -/// Capture instance state. -pub struct Capture { - state: Arc, - filter: Option>, - closure: Mutex>, -} - -impl Capture { - /// Construct a new capture instance. Capture does not start until the Guard - /// is in scope. - #[must_use] - pub fn new(state: &Arc, filter: Option, closure: C) -> Arc - where - F: Fn(Data<'_>) -> bool + Send + Sync + 'static, - C: FnMut(Data<'_>) + Send + Sync + 'static, - { - Arc::new(Self { - state: state.clone(), - filter: filter.map(|p| -> Box { Box::new(p) }), - closure: Mutex::new(Box::new(closure)), - }) - } - - #[must_use] - pub fn start(self: &Arc) -> Guard { - self.state.add(self); - Guard { capture: self.clone() } - } - - pub fn stop(self: &Arc) { self.state.del(self); } -} diff --git a/src/core/log/capture/state.rs b/src/core/log/capture/state.rs deleted file mode 100644 index dad6c8d8..00000000 --- a/src/core/log/capture/state.rs +++ /dev/null @@ -1,31 +0,0 @@ -use std::sync::{Arc, RwLock}; - -use super::Capture; - -/// Capture layer state. -pub struct State { - pub(super) active: RwLock>>, -} - -impl Default for State { - fn default() -> Self { Self::new() } -} - -impl State { - #[must_use] - pub fn new() -> Self { Self { active: RwLock::new(Vec::new()) } } - - pub(super) fn add(&self, capture: &Arc) { - self.active - .write() - .expect("locked for writing") - .push(capture.clone()); - } - - pub(super) fn del(&self, capture: &Arc) { - let mut vec = self.active.write().expect("locked for writing"); - if let Some(pos) = vec.iter().position(|v| Arc::ptr_eq(v, capture)) { - vec.swap_remove(pos); - } - } -} diff --git a/src/core/log/capture/util.rs b/src/core/log/capture/util.rs deleted file mode 100644 index 65524be5..00000000 --- a/src/core/log/capture/util.rs +++ /dev/null @@ -1,37 +0,0 @@ -use std::sync::{Arc, Mutex}; - -use super::{ - super::{Level, fmt}, - Closure, Data, -}; -use crate::Result; - -pub fn fmt_html(out: Arc>) -> Box -where - S: std::fmt::Write + Send + 'static, -{ - fmt(fmt::html, out) -} - -pub fn fmt_markdown(out: Arc>) -> Box -where - S: std::fmt::Write + Send + 'static, -{ - fmt(fmt::markdown, out) -} - -pub fn fmt(fun: F, out: Arc>) -> Box -where - F: Fn(&mut S, &Level, &str, &str) -> Result<()> + Send + Sync + Copy + 'static, - S: std::fmt::Write + Send + 'static, -{ - Box::new(move |data| call(fun, &mut *out.lock().expect("locked"), &data)) -} - -fn call(fun: F, out: &mut S, data: &Data<'_>) -where - F: Fn(&mut S, &Level, &str, &str) -> Result<()>, - S: std::fmt::Write, -{ - fun(out, &data.level(), data.span_name(), data.message()).expect("log line appended"); -} diff --git a/src/core/log/color.rs b/src/core/log/color.rs deleted file mode 100644 index 23905d14..00000000 --- a/src/core/log/color.rs +++ /dev/null @@ -1,27 +0,0 @@ -use super::Level; - -/// @returns (Foreground, Background) -#[inline] -#[must_use] -pub fn html(level: &Level) -> (&'static str, &'static str) { - match *level { - | Level::TRACE => ("#000000", "#A0A0A0"), - | Level::DEBUG => ("#000000", "#FFFFFF"), - | Level::ERROR => ("#000000", "#FF0000"), - | Level::WARN => ("#000000", "#FFFF00"), - | Level::INFO => ("#FFFFFF", "#008E00"), - } -} - -/// @returns (Foreground) -#[inline] -#[must_use] -pub fn code_tag(level: &Level) -> &'static str { - match *level { - | Level::TRACE => "#888888", - | Level::DEBUG => "#C8C8C8", - | Level::ERROR => "#FF0000", - | Level::WARN => "#FFFF00", - | Level::INFO => "#00FF00", - } -} diff --git a/src/core/log/console.rs b/src/core/log/console.rs deleted file mode 100644 index d91239ac..00000000 --- a/src/core/log/console.rs +++ /dev/null @@ -1,159 +0,0 @@ -use std::{env, io, sync::LazyLock}; - -use tracing::{ - Event, Level, Subscriber, - field::{Field, Visit}, -}; -use tracing_subscriber::{ - field::RecordFields, - fmt, - fmt::{ - FmtContext, FormatEvent, FormatFields, MakeWriter, - format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, - }, - registry::LookupSpan, -}; - -use crate::{Config, Result, apply}; - -static SYSTEMD_MODE: LazyLock = - LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); - -pub struct ConsoleWriter { - stdout: io::Stdout, - stderr: io::Stderr, - _journal_stream: [u64; 2], - use_stderr: bool, -} - -impl ConsoleWriter { - #[must_use] - pub fn new(_config: &Config) -> Self { - let journal_stream = get_journal_stream(); - Self { - stdout: io::stdout(), - stderr: io::stderr(), - _journal_stream: journal_stream.into(), - use_stderr: journal_stream.0 != 0, - } - } -} - -impl<'a> MakeWriter<'a> for ConsoleWriter { - type Writer = &'a Self; - - fn make_writer(&'a self) -> Self::Writer { self } -} - -impl io::Write for &'_ ConsoleWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - if self.use_stderr { - self.stderr.lock().write(buf) - } else { - self.stdout.lock().write(buf) - } - } - - fn flush(&mut self) -> io::Result<()> { - if self.use_stderr { - self.stderr.lock().flush() - } else { - self.stdout.lock().flush() - } - } -} - -pub struct ConsoleFormat { - _compact: Format, - full: Format, - pretty: Format, -} - -impl ConsoleFormat { - #[must_use] - pub fn new(config: &Config) -> Self { - Self { - _compact: fmt::format().compact(), - - full: Format::::default() - .with_thread_ids(config.log_thread_ids) - .with_ansi(config.log_colors), - - pretty: fmt::format() - .pretty() - .with_ansi(config.log_colors) - .with_thread_names(true) - .with_thread_ids(true) - .with_target(true) - .with_file(true) - .with_line_number(true) - .with_source_location(true), - } - } -} - -impl FormatEvent for ConsoleFormat -where - S: Subscriber + for<'a> LookupSpan<'a>, - N: for<'a> FormatFields<'a> + 'static, -{ - fn format_event( - &self, - ctx: &FmtContext<'_, S, N>, - writer: Writer<'_>, - event: &Event<'_>, - ) -> Result<(), std::fmt::Error> { - let is_debug = - cfg!(debug_assertions) && event.fields().any(|field| field.name() == "_debug"); - - match *event.metadata().level() { - | Level::ERROR if !is_debug => self.pretty.format_event(ctx, writer, event), - | _ => self.full.format_event(ctx, writer, event), - } - } -} - -struct ConsoleVisitor<'a> { - visitor: DefaultVisitor<'a>, -} - -impl<'writer> FormatFields<'writer> for ConsoleFormat { - fn format_fields(&self, writer: Writer<'writer>, fields: R) -> Result<(), std::fmt::Error> - where - R: RecordFields, - { - let mut visitor = ConsoleVisitor { - visitor: DefaultVisitor::<'_>::new(writer, true), - }; - - fields.record(&mut visitor); - - Ok(()) - } -} - -impl Visit for ConsoleVisitor<'_> { - fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - if field.name().starts_with('_') { - return; - } - - self.visitor.record_debug(field, value); - } -} - -#[must_use] -fn get_journal_stream() -> (u64, u64) { - is_systemd_mode() - .then(|| env::var("JOURNAL_STREAM").ok()) - .flatten() - .as_deref() - .and_then(|s| s.split_once(':')) - .map(apply!(2, str::parse)) - .map(apply!(2, Result::unwrap_or_default)) - .unwrap_or((0, 0)) -} - -#[inline] -#[must_use] -pub fn is_systemd_mode() -> bool { *SYSTEMD_MODE } diff --git a/src/core/log/fmt.rs b/src/core/log/fmt.rs deleted file mode 100644 index b73d0c9b..00000000 --- a/src/core/log/fmt.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::fmt::Write; - -use super::{Level, color}; -use crate::Result; - -pub fn html(out: &mut S, level: &Level, span: &str, msg: &str) -> Result<()> -where - S: Write + ?Sized, -{ - let color = color::code_tag(level); - let level = level.as_str().to_uppercase(); - write!( - out, - "{level:>5} {span:^12} \ - {msg}
" - )?; - - Ok(()) -} - -pub fn markdown(out: &mut S, level: &Level, span: &str, msg: &str) -> Result<()> -where - S: Write + ?Sized, -{ - let level = level.as_str().to_uppercase(); - writeln!(out, "`{level:>5}` `{span:^12}` `{msg}`")?; - - Ok(()) -} - -pub fn markdown_table(out: &mut S, level: &Level, span: &str, msg: &str) -> Result<()> -where - S: Write + ?Sized, -{ - let level = level.as_str().to_uppercase(); - writeln!(out, "| {level:>5} | {span:^12} | {msg} |")?; - - Ok(()) -} - -pub fn markdown_table_head(out: &mut S) -> Result<()> -where - S: Write + ?Sized, -{ - write!(out, "| level | span | message |\n| ------: | :-----: | :------- |\n")?; - - Ok(()) -} diff --git a/src/core/log/fmt_span.rs b/src/core/log/fmt_span.rs deleted file mode 100644 index 7ba86c4c..00000000 --- a/src/core/log/fmt_span.rs +++ /dev/null @@ -1,17 +0,0 @@ -use tracing_subscriber::fmt::format::FmtSpan; - -use crate::Result; - -#[inline] -pub fn from_str(str: &str) -> Result { - match str.to_uppercase().as_str() { - | "ENTER" => Ok(FmtSpan::ENTER), - | "EXIT" => Ok(FmtSpan::EXIT), - | "NEW" => Ok(FmtSpan::NEW), - | "CLOSE" => Ok(FmtSpan::CLOSE), - | "ACTIVE" => Ok(FmtSpan::ACTIVE), - | "FULL" => Ok(FmtSpan::FULL), - | "NONE" => Ok(FmtSpan::NONE), - | _ => Err(FmtSpan::NONE), - } -} diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs deleted file mode 100644 index f7b2521a..00000000 --- a/src/core/log/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -#![allow(clippy::disallowed_macros)] - -pub mod capture; -pub mod color; -pub mod console; -pub mod fmt; -pub mod fmt_span; -mod reload; -mod suppress; - -pub use capture::Capture; -pub use console::{ConsoleFormat, ConsoleWriter, is_systemd_mode}; -pub use reload::{LogLevelReloadHandles, ReloadHandle}; -pub use suppress::Suppress; -pub use tracing::Level; -pub use tracing_core::{Event, Metadata}; -pub use tracing_subscriber::EnvFilter; - -/// Logging subsystem. This is a singleton member of super::Server which holds -/// all logging and tracing related state rather than shoving it all in -/// super::Server directly. -pub struct Log { - /// General log level reload handles. - pub reload: LogLevelReloadHandles, - - /// Tracing capture state for ephemeral/oneshot uses. - pub capture: std::sync::Arc, -} - -// Wraps for logging macros. Use these macros rather than extern tracing:: or -// log:: crates in project code. ::log and ::tracing can still be used if -// necessary but discouraged. Remember debug_ log macros are also exported to -// the crate namespace like these. - -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! event { - ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } -} - -#[macro_export] -macro_rules! error { - ( $($x:tt)+ ) => { ::tracing::error!( $($x)+ ) } -} - -#[macro_export] -macro_rules! warn { - ( $($x:tt)+ ) => { ::tracing::warn!( $($x)+ ) } -} - -#[macro_export] -macro_rules! info { - ( $($x:tt)+ ) => { ::tracing::info!( $($x)+ ) } -} - -#[macro_export] -macro_rules! debug { - ( $($x:tt)+ ) => { ::tracing::debug!( $($x)+ ) } -} - -#[macro_export] -macro_rules! trace { - ( $($x:tt)+ ) => { ::tracing::trace!( $($x)+ ) } -} diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs deleted file mode 100644 index e6a16c9f..00000000 --- a/src/core/log/reload.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; - -use tracing_subscriber::{EnvFilter, reload}; - -use crate::{Result, error}; - -/// We need to store a reload::Handle value, but can't name it's type explicitly -/// because the S type parameter depends on the subscriber's previous layers. In -/// our case, this includes unnameable 'impl Trait' types. -/// -/// This is fixed[1] in the unreleased tracing-subscriber from the master -/// branch, which removes the S parameter. Unfortunately can't use it without -/// pulling in a version of tracing that's incompatible with the rest of our -/// deps. -/// -/// To work around this, we define an trait without the S paramter that forwards -/// to the reload::Handle::reload method, and then store the handle as a trait -/// object. -/// -/// [1]: -pub trait ReloadHandle { - fn current(&self) -> Option; - - fn reload(&self, new_value: L) -> Result<(), reload::Error>; -} - -impl ReloadHandle for reload::Handle { - fn current(&self) -> Option { Self::clone_current(self) } - - fn reload(&self, new_value: L) -> Result<(), reload::Error> { Self::reload(self, new_value) } -} - -#[derive(Clone)] -pub struct LogLevelReloadHandles { - handles: Arc>, -} - -type HandleMap = HashMap; -type Handle = Box + Send + Sync>; - -impl LogLevelReloadHandles { - pub fn add(&self, name: &str, handle: Handle) { - self.handles - .lock() - .expect("locked") - .insert(name.into(), handle); - } - - pub fn reload(&self, new_value: &EnvFilter, names: Option<&[&str]>) -> Result<()> { - self.handles - .lock() - .expect("locked") - .iter() - .filter(|(name, _)| names.is_some_and(|names| names.contains(&name.as_str()))) - .for_each(|(_, handle)| { - _ = handle.reload(new_value.clone()).or_else(error::else_log); - }); - - Ok(()) - } - - #[must_use] - pub fn current(&self, name: &str) -> Option { - self.handles - .lock() - .expect("locked") - .get(name) - .map(|handle| handle.current())? - } -} - -impl Default for LogLevelReloadHandles { - fn default() -> Self { - Self { - handles: Arc::new(HandleMap::new().into()), - } - } -} diff --git a/src/core/log/suppress.rs b/src/core/log/suppress.rs deleted file mode 100644 index 55428d15..00000000 --- a/src/core/log/suppress.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::sync::Arc; - -use super::EnvFilter; -use crate::Server; - -pub struct Suppress { - server: Arc, - restore: EnvFilter, -} - -impl Suppress { - pub fn new(server: &Arc) -> Self { - let handle = "console"; - let config = &server.config.log; - let suppress = EnvFilter::default(); - let restore = server - .log - .reload - .current(handle) - .unwrap_or_else(|| EnvFilter::try_new(config).unwrap_or_default()); - - server - .log - .reload - .reload(&suppress, Some(&[handle])) - .expect("log filter reloaded"); - - Self { server: server.clone(), restore } - } -} - -impl Drop for Suppress { - fn drop(&mut self) { - self.server - .log - .reload - .reload(&self.restore, Some(&["console"])) - .expect("log filter reloaded"); - } -} diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs deleted file mode 100644 index 29153334..00000000 --- a/src/core/matrix/event.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::{ - borrow::Borrow, - fmt::{Debug, Display}, - hash::Hash, -}; - -use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; -use serde_json::value::RawValue as RawJsonValue; - -/// Abstraction of a PDU so users can have their own PDU types. -pub trait Event { - type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow; - - /// The `EventId` of this event. - fn event_id(&self) -> &Self::Id; - - /// The `RoomId` of this event. - fn room_id(&self) -> &RoomId; - - /// The `UserId` of this event. - fn sender(&self) -> &UserId; - - /// The time of creation on the originating server. - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; - - /// The event type. - fn event_type(&self) -> &TimelineEventType; - - /// The event's content. - fn content(&self) -> &RawJsonValue; - - /// The state key for this event. - fn state_key(&self) -> Option<&str>; - - /// The events before this event. - // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; - - /// All the authenticating events for this event. - // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; - - /// If this event is a redaction event this is the event it redacts. - fn redacts(&self) -> Option<&Self::Id>; -} - -impl Event for &T { - type Id = T::Id; - - fn event_id(&self) -> &Self::Id { (*self).event_id() } - - fn room_id(&self) -> &RoomId { (*self).room_id() } - - fn sender(&self) -> &UserId { (*self).sender() } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (*self).origin_server_ts() } - - fn event_type(&self) -> &TimelineEventType { (*self).event_type() } - - fn content(&self) -> &RawJsonValue { (*self).content() } - - fn state_key(&self) -> Option<&str> { (*self).state_key() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (*self).prev_events() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (*self).auth_events() - } - - fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } -} diff --git a/src/core/matrix/mod.rs b/src/core/matrix/mod.rs deleted file mode 100644 index 8c978173..00000000 --- a/src/core/matrix/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Core Matrix Library - -pub mod event; -pub mod pdu; -pub mod state_res; - -pub use event::Event; -pub use pdu::{PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; -pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; diff --git a/src/core/matrix/pdu.rs b/src/core/matrix/pdu.rs deleted file mode 100644 index 7e1ecfa8..00000000 --- a/src/core/matrix/pdu.rs +++ /dev/null @@ -1,127 +0,0 @@ -mod builder; -mod content; -mod count; -mod event_id; -mod filter; -mod id; -mod raw_id; -mod redact; -mod relation; -mod state_key; -mod strip; -#[cfg(test)] -mod tests; -mod unsigned; - -use std::cmp::Ordering; - -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, - OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, UInt, UserId, events::TimelineEventType, -}; -use serde::{Deserialize, Serialize}; -use serde_json::value::RawValue as RawJsonValue; - -pub use self::{ - Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, - builder::{Builder, Builder as PduBuilder}, - count::Count, - event_id::*, - id::*, - raw_id::*, - state_key::{ShortStateKey, StateKey}, -}; -use super::Event; -use crate::Result; - -/// Persistent Data Unit (Event) -#[derive(Clone, Deserialize, Serialize, Debug)] -pub struct Pdu { - pub event_id: OwnedEventId, - pub room_id: OwnedRoomId, - pub sender: OwnedUserId, - #[serde(skip_serializing_if = "Option::is_none")] - pub origin: Option, - pub origin_server_ts: UInt, - #[serde(rename = "type")] - pub kind: TimelineEventType, - pub content: Box, - #[serde(skip_serializing_if = "Option::is_none")] - pub state_key: Option, - pub prev_events: Vec, - pub depth: UInt, - pub auth_events: Vec, - #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub unsigned: Option>, - pub hashes: EventHash, - #[serde(default, skip_serializing_if = "Option::is_none")] - // BTreeMap, BTreeMap> - pub signatures: Option>, -} - -/// Content hashes of a PDU. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct EventHash { - /// The SHA-256 hash. - pub sha256: String, -} - -impl Pdu { - pub fn from_id_val(event_id: &EventId, mut json: CanonicalJsonObject) -> Result { - let event_id = CanonicalJsonValue::String(event_id.into()); - json.insert("event_id".into(), event_id); - serde_json::to_value(json) - .and_then(serde_json::from_value) - .map_err(Into::into) - } -} - -impl Event for Pdu { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { &self.room_id } - - fn sender(&self) -> &UserId { &self.sender } - - fn event_type(&self) -> &TimelineEventType { &self.kind } - - fn content(&self) -> &RawJsonValue { &self.content } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - MilliSecondsSinceUnixEpoch(self.origin_server_ts) - } - - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.auth_events.iter() - } - - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } -} - -/// Prevent derived equality which wouldn't limit itself to event_id -impl Eq for Pdu {} - -/// Equality determined by the Pdu's ID, not the memory representations. -impl PartialEq for Pdu { - fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } -} - -/// Ordering determined by the Pdu's ID, not the memory representations. -impl Ord for Pdu { - fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } -} - -/// Ordering determined by the Pdu's ID, not the memory representations. -impl PartialOrd for Pdu { - fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } -} diff --git a/src/core/matrix/pdu/builder.rs b/src/core/matrix/pdu/builder.rs deleted file mode 100644 index 5aa0c9ca..00000000 --- a/src/core/matrix/pdu/builder.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::collections::BTreeMap; - -use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, - events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, -}; -use serde::Deserialize; -use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; - -use super::StateKey; - -/// Build the start of a PDU in order to add it to the Database. -#[derive(Debug, Deserialize)] -pub struct Builder { - #[serde(rename = "type")] - pub event_type: TimelineEventType, - - pub content: Box, - - pub unsigned: Option, - - pub state_key: Option, - - pub redacts: Option, - - /// For timestamped messaging, should only be used for appservices. - /// Will be set to current time if None - pub timestamp: Option, -} - -type Unsigned = BTreeMap; - -impl Builder { - pub fn state(state_key: S, content: &T) -> Self - where - T: EventContent, - S: Into, - { - Self { - event_type: content.event_type().into(), - content: to_raw_value(content) - .expect("Builder failed to serialize state event content to RawValue"), - state_key: Some(state_key.into()), - ..Self::default() - } - } - - pub fn timeline(content: &T) -> Self - where - T: EventContent, - { - Self { - event_type: content.event_type().into(), - content: to_raw_value(content) - .expect("Builder failed to serialize timeline event content to RawValue"), - ..Self::default() - } - } -} - -impl Default for Builder { - fn default() -> Self { - Self { - event_type: "m.room.message".into(), - content: Box::::default(), - unsigned: None, - state_key: None, - redacts: None, - timestamp: None, - } - } -} diff --git a/src/core/matrix/pdu/content.rs b/src/core/matrix/pdu/content.rs deleted file mode 100644 index 4e60ce6e..00000000 --- a/src/core/matrix/pdu/content.rs +++ /dev/null @@ -1,20 +0,0 @@ -use serde::Deserialize; -use serde_json::value::Value as JsonValue; - -use crate::{Result, err, implement}; - -#[must_use] -#[implement(super::Pdu)] -pub fn get_content_as_value(&self) -> JsonValue { - self.get_content() - .expect("pdu content must be a valid JSON value") -} - -#[implement(super::Pdu)] -pub fn get_content(&self) -> Result -where - T: for<'de> Deserialize<'de>, -{ - serde_json::from_str(self.content.get()) - .map_err(|e| err!(Database("Failed to deserialize pdu content into type: {e}"))) -} diff --git a/src/core/matrix/pdu/count.rs b/src/core/matrix/pdu/count.rs deleted file mode 100644 index b880278f..00000000 --- a/src/core/matrix/pdu/count.rs +++ /dev/null @@ -1,174 +0,0 @@ -#![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)] - -use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; - -use ruma::api::Direction; - -use crate::{Error, Result, err}; - -#[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] -pub enum Count { - Normal(u64), - Backfilled(i64), -} - -impl Count { - #[inline] - #[must_use] - pub fn from_unsigned(unsigned: u64) -> Self { Self::from_signed(unsigned as i64) } - - #[inline] - #[must_use] - pub fn from_signed(signed: i64) -> Self { - match signed { - | i64::MIN..=0 => Self::Backfilled(signed), - | _ => Self::Normal(signed as u64), - } - } - - #[inline] - #[must_use] - pub fn into_unsigned(self) -> u64 { - self.debug_assert_valid(); - match self { - | Self::Normal(i) => i, - | Self::Backfilled(i) => i as u64, - } - } - - #[inline] - #[must_use] - pub fn into_signed(self) -> i64 { - self.debug_assert_valid(); - match self { - | Self::Normal(i) => i as i64, - | Self::Backfilled(i) => i, - } - } - - #[inline] - #[must_use] - pub fn into_normal(self) -> Self { - self.debug_assert_valid(); - match self { - | Self::Normal(i) => Self::Normal(i), - | Self::Backfilled(_) => Self::Normal(0), - } - } - - #[inline] - pub fn checked_inc(self, dir: Direction) -> Result { - match dir { - | Direction::Forward => self.checked_add(1), - | Direction::Backward => self.checked_sub(1), - } - } - - #[inline] - pub fn checked_add(self, add: u64) -> Result { - Ok(match self { - | Self::Normal(i) => Self::Normal( - i.checked_add(add) - .ok_or_else(|| err!(Arithmetic("Count::Normal overflow")))?, - ), - | Self::Backfilled(i) => Self::Backfilled( - i.checked_add(add as i64) - .ok_or_else(|| err!(Arithmetic("Count::Backfilled overflow")))?, - ), - }) - } - - #[inline] - pub fn checked_sub(self, sub: u64) -> Result { - Ok(match self { - | Self::Normal(i) => Self::Normal( - i.checked_sub(sub) - .ok_or_else(|| err!(Arithmetic("Count::Normal underflow")))?, - ), - | Self::Backfilled(i) => Self::Backfilled( - i.checked_sub(sub as i64) - .ok_or_else(|| err!(Arithmetic("Count::Backfilled underflow")))?, - ), - }) - } - - #[inline] - #[must_use] - pub fn saturating_inc(self, dir: Direction) -> Self { - match dir { - | Direction::Forward => self.saturating_add(1), - | Direction::Backward => self.saturating_sub(1), - } - } - - #[inline] - #[must_use] - pub fn saturating_add(self, add: u64) -> Self { - match self { - | Self::Normal(i) => Self::Normal(i.saturating_add(add)), - | Self::Backfilled(i) => Self::Backfilled(i.saturating_add(add as i64)), - } - } - - #[inline] - #[must_use] - pub fn saturating_sub(self, sub: u64) -> Self { - match self { - | Self::Normal(i) => Self::Normal(i.saturating_sub(sub)), - | Self::Backfilled(i) => Self::Backfilled(i.saturating_sub(sub as i64)), - } - } - - #[inline] - #[must_use] - pub const fn min() -> Self { Self::Backfilled(i64::MIN) } - - #[inline] - #[must_use] - pub const fn max() -> Self { Self::Normal(i64::MAX as u64) } - - #[inline] - pub(crate) fn debug_assert_valid(&self) { - if let Self::Backfilled(i) = self { - debug_assert!(*i <= 0, "Backfilled sequence must be negative"); - } - } -} - -impl Display for Count { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - self.debug_assert_valid(); - match self { - | Self::Normal(i) => write!(f, "{i}"), - | Self::Backfilled(i) => write!(f, "{i}"), - } - } -} - -impl From for Count { - #[inline] - fn from(signed: i64) -> Self { Self::from_signed(signed) } -} - -impl From for Count { - #[inline] - fn from(unsigned: u64) -> Self { Self::from_unsigned(unsigned) } -} - -impl FromStr for Count { - type Err = Error; - - fn from_str(token: &str) -> Result { Ok(Self::from_signed(token.parse()?)) } -} - -impl PartialOrd for Count { - fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } -} - -impl Ord for Count { - fn cmp(&self, other: &Self) -> Ordering { self.into_signed().cmp(&other.into_signed()) } -} - -impl Default for Count { - fn default() -> Self { Self::Normal(0) } -} diff --git a/src/core/matrix/pdu/event_id.rs b/src/core/matrix/pdu/event_id.rs deleted file mode 100644 index e9d868b1..00000000 --- a/src/core/matrix/pdu/event_id.rs +++ /dev/null @@ -1,31 +0,0 @@ -use ruma::{CanonicalJsonObject, OwnedEventId, RoomVersionId}; -use serde_json::value::RawValue as RawJsonValue; - -use crate::{Result, err}; - -/// Generates a correct eventId for the incoming pdu. -/// -/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. -pub fn gen_event_id_canonical_json( - pdu: &RawJsonValue, - room_version_id: &RoomVersionId, -) -> Result<(OwnedEventId, CanonicalJsonObject)> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) - .map_err(|e| err!(BadServerResponse(warn!("Error parsing incoming event: {e:?}"))))?; - - let event_id = gen_event_id(&value, room_version_id)?; - - Ok((event_id, value)) -} - -/// Generates a correct eventId for the incoming pdu. -pub fn gen_event_id( - value: &CanonicalJsonObject, - room_version_id: &RoomVersionId, -) -> Result { - let reference_hash = ruma::signatures::reference_hash(value, room_version_id)?; - let event_id: OwnedEventId = format!("${reference_hash}").try_into()?; - - Ok(event_id) -} diff --git a/src/core/matrix/pdu/filter.rs b/src/core/matrix/pdu/filter.rs deleted file mode 100644 index aabf13db..00000000 --- a/src/core/matrix/pdu/filter.rs +++ /dev/null @@ -1,90 +0,0 @@ -use ruma::api::client::filter::{RoomEventFilter, UrlFilter}; -use serde_json::Value; - -use crate::{implement, is_equal_to}; - -#[implement(super::Pdu)] -#[must_use] -pub fn matches(&self, filter: &RoomEventFilter) -> bool { - if !self.matches_sender(filter) { - return false; - } - - if !self.matches_room(filter) { - return false; - } - - if !self.matches_type(filter) { - return false; - } - - if !self.matches_url(filter) { - return false; - } - - true -} - -#[implement(super::Pdu)] -fn matches_room(&self, filter: &RoomEventFilter) -> bool { - if filter.not_rooms.contains(&self.room_id) { - return false; - } - - if let Some(rooms) = filter.rooms.as_ref() { - if !rooms.contains(&self.room_id) { - return false; - } - } - - true -} - -#[implement(super::Pdu)] -fn matches_sender(&self, filter: &RoomEventFilter) -> bool { - if filter.not_senders.contains(&self.sender) { - return false; - } - - if let Some(senders) = filter.senders.as_ref() { - if !senders.contains(&self.sender) { - return false; - } - } - - true -} - -#[implement(super::Pdu)] -fn matches_type(&self, filter: &RoomEventFilter) -> bool { - let event_type = &self.kind.to_cow_str(); - if filter.not_types.iter().any(is_equal_to!(event_type)) { - return false; - } - - if let Some(types) = filter.types.as_ref() { - if !types.iter().any(is_equal_to!(event_type)) { - return false; - } - } - - true -} - -#[implement(super::Pdu)] -fn matches_url(&self, filter: &RoomEventFilter) -> bool { - let Some(url_filter) = filter.url_filter.as_ref() else { - return true; - }; - - //TODO: might be better to use Ruma's Raw rather than serde here - let url = serde_json::from_str::(self.content.get()) - .expect("parsing content JSON failed") - .get("url") - .is_some_and(Value::is_string); - - match url_filter { - | UrlFilter::EventsWithUrl => url, - | UrlFilter::EventsWithoutUrl => !url, - } -} diff --git a/src/core/matrix/pdu/id.rs b/src/core/matrix/pdu/id.rs deleted file mode 100644 index 0b23a29f..00000000 --- a/src/core/matrix/pdu/id.rs +++ /dev/null @@ -1,22 +0,0 @@ -use super::{Count, RawId}; -use crate::utils::u64_from_u8x8; - -pub type ShortRoomId = ShortId; -pub type ShortEventId = ShortId; -pub type ShortId = u64; - -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Id { - pub shortroomid: ShortRoomId, - pub shorteventid: Count, -} - -impl From for Id { - #[inline] - fn from(raw: RawId) -> Self { - Self { - shortroomid: u64_from_u8x8(raw.shortroomid()), - shorteventid: Count::from_unsigned(u64_from_u8x8(raw.shorteventid())), - } - } -} diff --git a/src/core/matrix/pdu/raw_id.rs b/src/core/matrix/pdu/raw_id.rs deleted file mode 100644 index 318a0cd7..00000000 --- a/src/core/matrix/pdu/raw_id.rs +++ /dev/null @@ -1,113 +0,0 @@ -use arrayvec::ArrayVec; - -use super::{Count, Id, ShortEventId, ShortId, ShortRoomId}; - -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] -pub enum RawId { - Normal(RawIdNormal), - Backfilled(RawIdBackfilled), -} - -type RawIdNormal = [u8; RawId::NORMAL_LEN]; -type RawIdBackfilled = [u8; RawId::BACKFILLED_LEN]; - -const INT_LEN: usize = size_of::(); - -impl RawId { - const BACKFILLED_LEN: usize = size_of::() + INT_LEN + size_of::(); - const MAX_LEN: usize = Self::BACKFILLED_LEN; - const NORMAL_LEN: usize = size_of::() + size_of::(); - - #[inline] - #[must_use] - pub fn pdu_count(&self) -> Count { - let id: Id = (*self).into(); - id.shorteventid - } - - #[inline] - #[must_use] - pub fn shortroomid(self) -> [u8; INT_LEN] { - match self { - | Self::Normal(raw) => raw[0..INT_LEN] - .try_into() - .expect("normal raw shortroomid array from slice"), - | Self::Backfilled(raw) => raw[0..INT_LEN] - .try_into() - .expect("backfilled raw shortroomid array from slice"), - } - } - - #[inline] - #[must_use] - pub fn shorteventid(self) -> [u8; INT_LEN] { - match self { - | Self::Normal(raw) => raw[INT_LEN..INT_LEN * 2] - .try_into() - .expect("normal raw shorteventid array from slice"), - | Self::Backfilled(raw) => raw[INT_LEN * 2..INT_LEN * 3] - .try_into() - .expect("backfilled raw shorteventid array from slice"), - } - } - - #[inline] - #[must_use] - pub fn as_bytes(&self) -> &[u8] { - match self { - | Self::Normal(raw) => raw, - | Self::Backfilled(raw) => raw, - } - } -} - -impl AsRef<[u8]> for RawId { - #[inline] - fn as_ref(&self) -> &[u8] { self.as_bytes() } -} - -impl From<&[u8]> for RawId { - #[inline] - fn from(id: &[u8]) -> Self { - match id.len() { - | Self::NORMAL_LEN => Self::Normal( - id[0..Self::NORMAL_LEN] - .try_into() - .expect("normal RawId from [u8]"), - ), - | Self::BACKFILLED_LEN => Self::Backfilled( - id[0..Self::BACKFILLED_LEN] - .try_into() - .expect("backfilled RawId from [u8]"), - ), - | _ => unimplemented!("unrecognized RawId length"), - } - } -} - -impl From for RawId { - #[inline] - fn from(id: Id) -> Self { - const MAX_LEN: usize = RawId::MAX_LEN; - type RawVec = ArrayVec; - - let mut vec = RawVec::new(); - vec.extend(id.shortroomid.to_be_bytes()); - id.shorteventid.debug_assert_valid(); - match id.shorteventid { - | Count::Normal(shorteventid) => { - vec.extend(shorteventid.to_be_bytes()); - Self::Normal(vec.as_ref().try_into().expect("RawVec into RawId::Normal")) - }, - | Count::Backfilled(shorteventid) => { - vec.extend(0_u64.to_be_bytes()); - vec.extend(shorteventid.to_be_bytes()); - Self::Backfilled( - vec.as_ref() - .try_into() - .expect("RawVec into RawId::Backfilled"), - ) - }, - } - } -} diff --git a/src/core/matrix/pdu/redact.rs b/src/core/matrix/pdu/redact.rs deleted file mode 100644 index 409debfe..00000000 --- a/src/core/matrix/pdu/redact.rs +++ /dev/null @@ -1,117 +0,0 @@ -use ruma::{ - OwnedEventId, RoomVersionId, - canonical_json::redact_content_in_place, - events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, -}; -use serde::Deserialize; -use serde_json::{ - json, - value::{RawValue as RawJsonValue, to_raw_value}, -}; - -use crate::{Error, Result, implement}; - -#[derive(Deserialize)] -struct ExtractRedactedBecause { - redacted_because: Option, -} - -#[implement(super::Pdu)] -pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: &Self) -> Result { - self.unsigned = None; - - let mut content = serde_json::from_str(self.content.get()) - .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; - - redact_content_in_place(&mut content, room_version_id, self.kind.to_string()) - .map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?; - - self.unsigned = Some( - to_raw_value(&json!({ - "redacted_because": serde_json::to_value(reason).expect("to_value(Pdu) always works") - })) - .expect("to string always works"), - ); - - self.content = to_raw_value(&content).expect("to string always works"); - - Ok(()) -} - -#[implement(super::Pdu)] -#[must_use] -pub fn is_redacted(&self) -> bool { - let Some(unsigned) = &self.unsigned else { - return false; - }; - - let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else { - return false; - }; - - unsigned.redacted_because.is_some() -} - -/// Copies the `redacts` property of the event to the `content` dict and -/// vice-versa. -/// -/// This follows the specification's -/// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): -/// -/// > For backwards-compatibility with older clients, servers should add a -/// > redacts -/// > property to the top level of m.room.redaction events in when serving -/// > such events -/// > over the Client-Server API. -/// -/// > For improved compatibility with newer clients, servers should add a -/// > redacts property -/// > to the content of m.room.redaction events in older room versions when -/// > serving -/// > such events over the Client-Server API. -#[implement(super::Pdu)] -#[must_use] -pub fn copy_redacts(&self) -> (Option, Box) { - if self.kind == TimelineEventType::RoomRedaction { - if let Ok(mut content) = - serde_json::from_str::(self.content.get()) - { - match content.redacts { - | Some(redacts) => { - return (Some(redacts), self.content.clone()); - }, - | _ => match self.redacts.clone() { - | Some(redacts) => { - content.redacts = Some(redacts); - return ( - self.redacts.clone(), - to_raw_value(&content) - .expect("Must be valid, we only added redacts field"), - ); - }, - | _ => {}, - }, - } - } - } - - (self.redacts.clone(), self.content.clone()) -} - -#[implement(super::Pdu)] -#[must_use] -pub fn redacts_id(&self, room_version: &RoomVersionId) -> Option { - use RoomVersionId::*; - - if self.kind != TimelineEventType::RoomRedaction { - return None; - } - - match *room_version { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => self.redacts.clone(), - | _ => - self.get_content::() - .ok()? - .redacts, - } -} diff --git a/src/core/matrix/pdu/relation.rs b/src/core/matrix/pdu/relation.rs deleted file mode 100644 index 2968171e..00000000 --- a/src/core/matrix/pdu/relation.rs +++ /dev/null @@ -1,22 +0,0 @@ -use ruma::events::relation::RelationType; -use serde::Deserialize; - -use crate::implement; - -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelType { - rel_type: RelationType, -} -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelatesToEventId { - #[serde(rename = "m.relates_to")] - relates_to: ExtractRelType, -} - -#[implement(super::Pdu)] -#[must_use] -pub fn relation_type_equal(&self, rel_type: &RelationType) -> bool { - self.get_content() - .map(|c: ExtractRelatesToEventId| c.relates_to.rel_type) - .is_ok_and(|r| r == *rel_type) -} diff --git a/src/core/matrix/pdu/state_key.rs b/src/core/matrix/pdu/state_key.rs deleted file mode 100644 index 4af4fcf7..00000000 --- a/src/core/matrix/pdu/state_key.rs +++ /dev/null @@ -1,8 +0,0 @@ -use smallstr::SmallString; - -use super::ShortId; - -pub type StateKey = SmallString<[u8; INLINE_SIZE]>; -pub type ShortStateKey = ShortId; - -const INLINE_SIZE: usize = 48; diff --git a/src/core/matrix/pdu/strip.rs b/src/core/matrix/pdu/strip.rs deleted file mode 100644 index 3683caaa..00000000 --- a/src/core/matrix/pdu/strip.rs +++ /dev/null @@ -1,288 +0,0 @@ -use ruma::{ - events::{ - AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, - AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, - }, - serde::Raw, -}; -use serde_json::{json, value::Value as JsonValue}; - -use crate::implement; - -/// This only works for events that are also AnyRoomEvents. -#[must_use] -#[implement(super::Pdu)] -pub fn into_any_event(self) -> Raw { - serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works") -} - -/// This only works for events that are also AnyRoomEvents. -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_any_event_value(self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_room_event(self) -> Raw { self.to_room_event() } - -#[implement(super::Pdu)] -#[must_use] -pub fn to_room_event(&self) -> Raw { - serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_room_event_value(&self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_message_like_event(self) -> Raw { self.to_message_like_event() } - -#[implement(super::Pdu)] -#[must_use] -pub fn to_message_like_event(&self) -> Raw { - serde_json::from_value(self.to_message_like_event_value()) - .expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_message_like_event_value(&self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_sync_room_event(self) -> Raw { self.to_sync_room_event() } - -#[implement(super::Pdu)] -#[must_use] -pub fn to_sync_room_event(&self) -> Raw { - serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_sync_room_event_value(&self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_state_event(self) -> Raw { - serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_state_event_value(self) -> JsonValue { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - "state_key": self.state_key, - }); - - if let Some(unsigned) = self.unsigned { - json["unsigned"] = json!(unsigned); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_sync_state_event(self) -> Raw { - serde_json::from_value(self.into_sync_state_event_value()) - .expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_sync_state_event_value(self) -> JsonValue { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "state_key": self.state_key, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_stripped_state_event(self) -> Raw { - self.to_stripped_state_event() -} - -#[implement(super::Pdu)] -#[must_use] -pub fn to_stripped_state_event(&self) -> Raw { - serde_json::from_value(self.to_stripped_state_event_value()) - .expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_stripped_state_event_value(&self) -> JsonValue { - json!({ - "content": self.content, - "type": self.kind, - "sender": self.sender, - "state_key": self.state_key, - }) -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_stripped_spacechild_state_event(self) -> Raw { - serde_json::from_value(self.into_stripped_spacechild_state_event_value()) - .expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { - json!({ - "content": self.content, - "type": self.kind, - "sender": self.sender, - "state_key": self.state_key, - "origin_server_ts": self.origin_server_ts, - }) -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_member_event(self) -> Raw> { - serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_member_event_value(self) -> JsonValue { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "redacts": self.redacts, - "room_id": self.room_id, - "state_key": self.state_key, - }); - - if let Some(unsigned) = self.unsigned { - json["unsigned"] = json!(unsigned); - } - - json -} diff --git a/src/core/matrix/pdu/tests.rs b/src/core/matrix/pdu/tests.rs deleted file mode 100644 index ed9b7caa..00000000 --- a/src/core/matrix/pdu/tests.rs +++ /dev/null @@ -1,17 +0,0 @@ -use super::Count; - -#[test] -fn backfilled_parse() { - let count: Count = "-987654".parse().expect("parse() failed"); - let backfilled = matches!(count, Count::Backfilled(_)); - - assert!(backfilled, "not backfilled variant"); -} - -#[test] -fn normal_parse() { - let count: Count = "987654".parse().expect("parse() failed"); - let backfilled = matches!(count, Count::Backfilled(_)); - - assert!(!backfilled, "backfilled variant"); -} diff --git a/src/core/matrix/pdu/unsigned.rs b/src/core/matrix/pdu/unsigned.rs deleted file mode 100644 index 23897519..00000000 --- a/src/core/matrix/pdu/unsigned.rs +++ /dev/null @@ -1,116 +0,0 @@ -use std::collections::BTreeMap; - -use ruma::MilliSecondsSinceUnixEpoch; -use serde::Deserialize; -use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value}; - -use super::Pdu; -use crate::{Result, err, implement, is_true}; - -#[implement(Pdu)] -pub fn remove_transaction_id(&mut self) -> Result { - use BTreeMap as Map; - - let Some(unsigned) = &self.unsigned else { - return Ok(()); - }; - - let mut unsigned: Map<&str, Box> = serde_json::from_str(unsigned.get()) - .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - - unsigned.remove("transaction_id"); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); - - Ok(()) -} - -#[implement(Pdu)] -pub fn add_age(&mut self) -> Result { - use BTreeMap as Map; - - let mut unsigned: Map<&str, Box> = self - .unsigned - .as_deref() - .map(RawJsonValue::get) - .map_or_else(|| Ok(Map::new()), serde_json::from_str) - .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - - // deliberately allowing for the possibility of negative age - let now: i128 = MilliSecondsSinceUnixEpoch::now().get().into(); - let then: i128 = self.origin_server_ts.into(); - let this_age = now.saturating_sub(then); - - unsigned.insert("age", to_raw_value(&this_age)?); - self.unsigned = Some(to_raw_value(&unsigned)?); - - Ok(()) -} - -#[implement(Pdu)] -pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { - use serde_json::Map; - - let mut unsigned: Map = self - .unsigned - .as_deref() - .map(RawJsonValue::get) - .map_or_else(|| Ok(Map::new()), serde_json::from_str) - .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - - let pdu = pdu - .map(serde_json::to_value) - .transpose()? - .unwrap_or_else(|| JsonValue::Object(Map::new())); - - unsigned - .entry("m.relations") - .or_insert(JsonValue::Object(Map::new())) - .as_object_mut() - .map(|object| object.insert(name.to_owned(), pdu)); - - self.unsigned = Some(to_raw_value(&unsigned)?); - - Ok(()) -} - -#[implement(Pdu)] -pub fn contains_unsigned_property(&self, property: &str, is_type: F) -> bool -where - F: FnOnce(&JsonValue) -> bool, -{ - self.get_unsigned_as_value() - .get(property) - .map(is_type) - .is_some_and(is_true!()) -} - -#[implement(Pdu)] -pub fn get_unsigned_property(&self, property: &str) -> Result -where - T: for<'de> Deserialize<'de>, -{ - self.get_unsigned_as_value() - .get_mut(property) - .map(JsonValue::take) - .map(serde_json::from_value) - .ok_or(err!(Request(NotFound("property not found in unsigned object"))))? - .map_err(|e| err!(Database("Failed to deserialize unsigned.{property} into type: {e}"))) -} - -#[implement(Pdu)] -#[must_use] -pub fn get_unsigned_as_value(&self) -> JsonValue { - self.get_unsigned::().unwrap_or_default() -} - -#[implement(Pdu)] -pub fn get_unsigned(&self) -> Result { - self.unsigned - .as_ref() - .map(|raw| raw.get()) - .map(serde_json::from_str) - .ok_or(err!(Request(NotFound("\"unsigned\" property not found in pdu"))))? - .map_err(|e| err!(Database("Failed to deserialize \"unsigned\" into value: {e}"))) -} diff --git a/src/core/matrix/state_res/LICENSE b/src/core/matrix/state_res/LICENSE deleted file mode 100644 index c103a044..00000000 --- a/src/core/matrix/state_res/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -//! Permission is hereby granted, free of charge, to any person obtaining a copy -//! of this software and associated documentation files (the "Software"), to -//! deal in the Software without restriction, including without limitation the -//! rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -//! sell copies of the Software, and to permit persons to whom the Software is -//! furnished to do so, subject to the following conditions: - -//! The above copyright notice and this permission notice shall be included in -//! all copies or substantial portions of the Software. - -//! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -//! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -//! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -//! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -//! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -//! FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -//! IN THE SOFTWARE. diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs deleted file mode 100644 index 01218b01..00000000 --- a/src/core/matrix/state_res/benches.rs +++ /dev/null @@ -1,669 +0,0 @@ -#[cfg(conduwuit_bench)] -extern crate test; - -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - sync::atomic::{AtomicU64, Ordering::SeqCst}, -}; - -use futures::{future, future::ready}; -use maplit::{btreemap, hashmap, hashset}; -use ruma::{ - EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, Signatures, UserId, - events::{ - StateEventType, TimelineEventType, - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - }, - int, room_id, uint, user_id, -}; -use serde_json::{ - json, - value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, -}; - -use self::event::PduEvent; -use crate::state_res::{self as state_res, Error, Event, Result, StateMap}; - -static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); - -#[cfg(conduwuit_bench)] -#[cfg_attr(conduwuit_bench, bench)] -fn lexico_topo_sort(c: &mut test::Bencher) { - let graph = hashmap! { - event_id("l") => hashset![event_id("o")], - event_id("m") => hashset![event_id("n"), event_id("o")], - event_id("n") => hashset![event_id("o")], - event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges - event_id("p") => hashset![event_id("o")], - }; - - c.iter(|| { - let _ = state_res::lexicographical_topological_sort(&graph, &|_| { - future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) - }); - }); -} - -#[cfg(conduwuit_bench)] -#[cfg_attr(conduwuit_bench, bench)] -fn resolution_shallow_auth_chain(c: &mut test::Bencher) { - let parallel_fetches = 32; - let mut store = TestStore(hashmap! {}); - - // build up the DAG - let (state_at_bob, state_at_charlie, _) = store.set_up(); - - c.iter(|| async { - let ev_map = store.0.clone(); - let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).clone()); - let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); - let auth_chain_sets: Vec> = state_sets - .iter() - .map(|map| { - store - .auth_event_ids(room_id(), map.values().cloned().collect()) - .unwrap() - }) - .collect(); - - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - parallel_fetches, - ) - .await - { - | Ok(state) => state, - | Err(e) => panic!("{e}"), - }; - }); -} - -#[cfg(conduwuit_bench)] -#[cfg_attr(conduwuit_bench, bench)] -fn resolve_deeper_event_set(c: &mut test::Bencher) { - let parallel_fetches = 32; - let mut inner = INITIAL_EVENTS(); - let ban = BAN_STATE_SET(); - - inner.extend(ban); - let store = TestStore(inner.clone()); - - let state_set_a = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("MB")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - ( - (ev.event_type().clone().into(), ev.state_key().unwrap().into()), - ev.event_id().to_owned(), - ) - }) - .collect::>(); - - let state_set_b = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("IME")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - ( - (ev.event_type().clone().into(), ev.state_key().unwrap().into()), - ev.event_id().to_owned(), - ) - }) - .collect::>(); - - c.iter(|| async { - let state_sets = [&state_set_a, &state_set_b]; - let auth_chain_sets: Vec> = state_sets - .iter() - .map(|map| { - store - .auth_event_ids(room_id(), map.values().cloned().collect()) - .unwrap() - }) - .collect(); - - let fetch = |id: OwnedEventId| ready(inner.get(&id).clone()); - let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - parallel_fetches, - ) - .await - { - | Ok(state) => state, - | Err(_) => panic!("resolution failed during benchmarking"), - }; - }); -} - -//*///////////////////////////////////////////////////////////////////// -// -// IMPLEMENTATION DETAILS AHEAD -// -/////////////////////////////////////////////////////////////////////*/ -struct TestStore(HashMap); - -#[allow(unused)] -impl TestStore { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result { - self.0 - .get(event_id) - .cloned() - .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) - } - - /// Returns the events that correspond to the `event_ids` sorted in the same - /// order. - fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result> { - let mut events = vec![]; - for id in event_ids { - events.push(self.get_event(room_id, id)?); - } - Ok(events) - } - - /// Returns a Vec of the related auth events to the given `event`. - fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { - let mut result = HashSet::new(); - let mut stack = event_ids; - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains(&ev_id) { - continue; - } - - result.insert(ev_id.clone()); - - let event = self.get_event(room_id, ev_id.borrow())?; - - stack.extend(event.auth_events().map(ToOwned::to_owned)); - } - - Ok(result) - } - - /// Returns a vector representing the difference in auth chains of the given - /// `events`. - fn auth_chain_diff( - &self, - room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { - let mut auth_chain_sets = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self - .auth_event_ids(room_id, ids)? - .into_iter() - .collect::>(); - auth_chain_sets.push(chain); - } - - if let Some(first) = auth_chain_sets.first().cloned() { - let common = auth_chain_sets - .iter() - .skip(1) - .fold(first, |a, b| a.intersection(b).cloned().collect::>()); - - Ok(auth_chain_sets - .into_iter() - .flatten() - .filter(|id| !common.contains(id.borrow())) - .collect()) - } else { - Ok(vec![]) - } - } -} - -impl TestStore { - #[allow(clippy::type_complexity)] - fn set_up( - &mut self, - ) -> (StateMap, StateMap, StateMap) { - let create_event = to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ); - let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), create_event.clone()); - - let alice_mem = to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().to_string().as_str()), - member_content_join(), - &[cre.clone()], - &[cre.clone()], - ); - self.0 - .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); - - let join_rules = to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &[cre.clone(), alice_mem.event_id().to_owned()], - &[alice_mem.event_id().to_owned()], - ); - self.0 - .insert(join_rules.event_id().to_owned(), join_rules.clone()); - - // Bob and Charlie join at the same time, so there is a fork - // this will be represented in the state_sets when we resolve - let bob_mem = to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &[cre.clone(), join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0 - .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); - - let charlie_mem = to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &[cre, join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0 - .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); - - let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] - .iter() - .map(|ev| { - ( - (ev.event_type().clone().into(), ev.state_key().unwrap().into()), - ev.event_id().to_owned(), - ) - }) - .collect::>(); - - let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] - .iter() - .map(|ev| { - ( - (ev.event_type().clone().into(), ev.state_key().unwrap().into()), - ev.event_id().to_owned(), - ) - }) - .collect::>(); - - let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] - .iter() - .map(|ev| { - ( - (ev.event_type().clone().into(), ev.state_key().unwrap().into()), - ev.event_id().to_owned(), - ) - }) - .collect::>(); - - (state_at_bob, state_at_charlie, expected) - } -} - -fn event_id(id: &str) -> OwnedEventId { - if id.contains('$') { - return id.try_into().unwrap(); - } - format!("${}:foo", id).try_into().unwrap() -} - -fn alice() -> &'static UserId { user_id!("@alice:foo") } - -fn bob() -> &'static UserId { user_id!("@bob:foo") } - -fn charlie() -> &'static UserId { user_id!("@charlie:foo") } - -fn ella() -> &'static UserId { user_id!("@ella:foo") } - -fn room_id() -> &'static RoomId { room_id!("!test:foo") } - -fn member_content_ban() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() -} - -fn member_content_join() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() -} - -fn to_pdu_event( - id: &str, - sender: &UserId, - ev_type: TimelineEventType, - state_key: Option<&str>, - content: Box, - auth_events: &[S], - prev_events: &[S], -) -> PduEvent -where - S: AsRef, -{ - // We don't care if the addition happens in order just that it is atomic - // (each event has its own value) - let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); - let id = if id.contains('$') { - id.to_owned() - } else { - format!("${}:foo", id) - }; - let auth_events = auth_events - .iter() - .map(AsRef::as_ref) - .map(event_id) - .collect::>(); - let prev_events = prev_events - .iter() - .map(AsRef::as_ref) - .map(event_id) - .collect::>(); - - let state_key = state_key.map(ToOwned::to_owned); - PduEvent { - event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: btreemap! {}, - auth_events, - prev_events, - depth: uint!(0), - hashes: EventHash::new(String::new()), - signatures: Signatures::new(), - }), - } -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap { - vec![ - to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ), - to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_join(), - &["CREATE"], - &["CREATE"], - ), - to_pdu_event( - "IPOWER", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), - &["CREATE", "IMA"], - &["IMA"], - ), - to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["IPOWER"], - ), - to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IJR"], - ), - to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IMB"], - ), - to_pdu_event::<&EventId>( - "START", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - to_pdu_event::<&EventId>( - "END", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap { - vec![ - to_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], // auth_events - &["START"], // prev_events - ), - to_pdu_event( - "PB", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["END"], - ), - to_pdu_event( - "MB", - alice(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_ban(), - &["CREATE", "IMA", "PB"], - &["PA"], - ), - to_pdu_event( - "IME", - ella(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_join(), - &["CREATE", "IJR", "PA"], - &["MB"], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -/// Convenience trait for adding event type plus state key to state maps. -trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); -} - -impl EventTypeExt for &TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) - } -} - -mod event { - use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, - events::{TimelineEventType, pdu::Pdu}, - }; - use serde::{Deserialize, Serialize}; - use serde_json::value::RawValue as RawJsonValue; - - use super::Event; - - impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.room_id, - | Pdu::RoomV3Pdu(ev) => &ev.room_id, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn sender(&self) -> &UserId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.sender, - | Pdu::RoomV3Pdu(ev) => &ev.sender, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn event_type(&self) -> &TimelineEventType { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.kind, - | Pdu::RoomV3Pdu(ev) => &ev.kind, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn content(&self) -> &RawJsonValue { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.content, - | Pdu::RoomV3Pdu(ev) => &ev.content, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, - | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn state_key(&self) -> Option<&str> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), - | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn prev_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn auth_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn redacts(&self) -> Option<&Self::Id> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - pub(crate) struct PduEvent { - pub(crate) event_id: OwnedEventId, - #[serde(flatten)] - pub(crate) rest: Pdu, - } -} diff --git a/src/core/matrix/state_res/error.rs b/src/core/matrix/state_res/error.rs deleted file mode 100644 index 7711d878..00000000 --- a/src/core/matrix/state_res/error.rs +++ /dev/null @@ -1,23 +0,0 @@ -use serde_json::Error as JsonError; -use thiserror::Error; - -/// Represents the various errors that arise when resolving state. -#[derive(Error, Debug)] -#[non_exhaustive] -pub enum Error { - /// A deserialization error. - #[error(transparent)] - SerdeJson(#[from] JsonError), - - /// The given option or version is unsupported. - #[error("Unsupported room version: {0}")] - Unsupported(String), - - /// The given event was not found. - #[error("Not found error: {0}")] - NotFound(String), - - /// Invalid fields in the given PDU. - #[error("Invalid PDU: {0}")] - InvalidPdu(String), -} diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs deleted file mode 100644 index c69db50e..00000000 --- a/src/core/matrix/state_res/event_auth.rs +++ /dev/null @@ -1,1453 +0,0 @@ -use std::{borrow::Borrow, collections::BTreeSet}; - -use futures::{ - Future, - future::{OptionFuture, join3}, -}; -use ruma::{ - Int, OwnedUserId, RoomVersionId, UserId, - events::room::{ - create::RoomCreateEventContent, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, ThirdPartyInvite}, - power_levels::RoomPowerLevelsEventContent, - third_party_invite::RoomThirdPartyInviteEventContent, - }, - int, - serde::{Base64, Raw}, -}; -use serde::{ - Deserialize, - de::{Error as _, IgnoredAny}, -}; -use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; - -use super::{ - Error, Event, Result, StateEventType, StateKey, TimelineEventType, - power_levels::{ - deserialize_power_levels, deserialize_power_levels_content_fields, - deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, - }, - room_version::RoomVersion, -}; -use crate::{debug, error, trace, warn}; - -// FIXME: field extracting could be bundled for `content` -#[derive(Deserialize)] -struct GetMembership { - membership: MembershipState, -} - -#[derive(Deserialize, Debug)] -struct RoomMemberContentFields { - membership: Option>, - join_authorised_via_users_server: Option>, -} - -/// For the given event `kind` what are the relevant auth events that are needed -/// to authenticate this `content`. -/// -/// # Errors -/// -/// This function will return an error if the supplied `content` is not a JSON -/// object. -pub fn auth_types_for_event( - kind: &TimelineEventType, - sender: &UserId, - state_key: Option<&str>, - content: &RawJsonValue, -) -> serde_json::Result> { - if kind == &TimelineEventType::RoomCreate { - return Ok(vec![]); - } - - let mut auth_types = vec![ - (StateEventType::RoomPowerLevels, StateKey::new()), - (StateEventType::RoomMember, sender.as_str().into()), - (StateEventType::RoomCreate, StateKey::new()), - ]; - - if kind == &TimelineEventType::RoomMember { - #[derive(Deserialize)] - struct RoomMemberContentFields { - membership: Option>, - third_party_invite: Option>, - join_authorised_via_users_server: Option>, - } - - if let Some(state_key) = state_key { - let content: RoomMemberContentFields = from_json_str(content.get())?; - - if let Some(Ok(membership)) = content.membership.map(|m| m.deserialize()) { - if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] - .contains(&membership) - { - let key = (StateEventType::RoomJoinRules, StateKey::new()); - if !auth_types.contains(&key) { - auth_types.push(key); - } - - if let Some(Ok(u)) = content - .join_authorised_via_users_server - .map(|m| m.deserialize()) - { - let key = (StateEventType::RoomMember, u.as_str().into()); - if !auth_types.contains(&key) { - auth_types.push(key); - } - } - } - - let key = (StateEventType::RoomMember, state_key.into()); - if !auth_types.contains(&key) { - auth_types.push(key); - } - - if membership == MembershipState::Invite { - if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { - let key = - (StateEventType::RoomThirdPartyInvite, t_id.signed.token.into()); - if !auth_types.contains(&key) { - auth_types.push(key); - } - } - } - } - } - } - - Ok(auth_types) -} - -/// Authenticate the incoming `event`. -/// -/// The steps of authentication are: -/// -/// * check that the event is being authenticated for the correct room -/// * then there are checks for specific event types -/// -/// The `fetch_state` closure should gather state from a state snapshot. We need -/// to know if the event passes auth against some state not a recursive -/// collection of auth_events fields. -#[tracing::instrument( - level = "debug", - skip_all, - fields( - event_id = incoming_event.event_id().borrow().as_str() - ) -)] -pub async fn auth_check( - room_version: &RoomVersion, - incoming_event: &Incoming, - current_third_party_invite: Option<&Incoming>, - fetch_state: F, -) -> Result -where - F: Fn(&StateEventType, &str) -> Fut + Send, - Fut: Future> + Send, - Fetched: Event + Send, - Incoming: Event + Send + Sync, -{ - debug!( - event_id = format!("{}", incoming_event.event_id()), - event_type = format!("{}", incoming_event.event_type()), - "auth_check beginning" - ); - - // [synapse] check that all the events are in the same room as `incoming_event` - - // [synapse] do_sig_check check the event has valid signatures for member events - - // TODO do_size_check is false when called by `iterative_auth_check` - // do_size_check is also mostly accomplished by ruma with the exception of - // checking event_type, state_key, and json are below a certain size (255 and - // 65_536 respectively) - - let sender = incoming_event.sender(); - - // Implementation of https://spec.matrix.org/latest/rooms/v1/#authorization-rules - // - // 1. If type is m.room.create: - if *incoming_event.event_type() == TimelineEventType::RoomCreate { - #[derive(Deserialize)] - struct RoomCreateContentFields { - room_version: Option>, - creator: Option>, - } - - debug!("start m.room.create check"); - - // If it has any previous events, reject - if incoming_event.prev_events().next().is_some() { - warn!("the room creation event had previous events"); - return Ok(false); - } - - // If the domain of the room_id does not match the domain of the sender, reject - let Some(room_id_server_name) = incoming_event.room_id().server_name() else { - warn!("room ID has no servername"); - return Ok(false); - }; - - if room_id_server_name != sender.server_name() { - warn!("servername of room ID does not match servername of sender"); - return Ok(false); - } - - // If content.room_version is present and is not a recognized version, reject - let content: RoomCreateContentFields = from_json_str(incoming_event.content().get())?; - if content - .room_version - .is_some_and(|v| v.deserialize().is_err()) - { - warn!("invalid room version found in m.room.create event"); - return Ok(false); - } - - if !room_version.use_room_create_sender { - // If content has no creator field, reject - if content.creator.is_none() { - warn!("no creator field found in m.room.create content"); - return Ok(false); - } - } - - debug!("m.room.create event was allowed"); - return Ok(true); - } - - /* - // TODO: In the past this code caused problems federating with synapse, maybe this has been - // resolved already. Needs testing. - // - // 2. Reject if auth_events - // a. auth_events cannot have duplicate keys since it's a BTree - // b. All entries are valid auth events according to spec - let expected_auth = auth_types_for_event( - incoming_event.kind, - sender, - incoming_event.state_key, - incoming_event.content().clone(), - ); - - dbg!(&expected_auth); - - for ev_key in auth_events.keys() { - // (b) - if !expected_auth.contains(ev_key) { - warn!("auth_events contained invalid auth event"); - return Ok(false); - } - } - */ - - let (room_create_event, power_levels_event, sender_member_event) = join3( - fetch_state(&StateEventType::RoomCreate, ""), - fetch_state(&StateEventType::RoomPowerLevels, ""), - fetch_state(&StateEventType::RoomMember, sender.as_str()), - ) - .await; - - let room_create_event = match room_create_event { - | None => { - warn!("no m.room.create event in auth chain"); - return Ok(false); - }, - | Some(e) => e, - }; - - // 3. If event does not have m.room.create in auth_events reject - if !incoming_event - .auth_events() - .any(|id| id.borrow() == room_create_event.event_id().borrow()) - { - warn!("no m.room.create event in auth events"); - return Ok(false); - } - - // If the create event content has the field m.federate set to false and the - // sender domain of the event does not match the sender domain of the create - // event, reject. - #[derive(Deserialize)] - #[allow(clippy::items_after_statements)] - struct RoomCreateContentFederate { - #[serde(rename = "m.federate", default = "ruma::serde::default_true")] - federate: bool, - } - let room_create_content: RoomCreateContentFederate = - from_json_str(room_create_event.content().get())?; - if !room_create_content.federate - && room_create_event.sender().server_name() != incoming_event.sender().server_name() - { - warn!( - "room is not federated and event's sender domain does not match create event's \ - sender domain" - ); - return Ok(false); - } - - // Only in some room versions 6 and below - if room_version.special_case_aliases_auth { - // 4. If type is m.room.aliases - if *incoming_event.event_type() == TimelineEventType::RoomAliases { - debug!("starting m.room.aliases check"); - - // If sender's domain doesn't matches state_key, reject - if incoming_event.state_key() != Some(sender.server_name().as_str()) { - warn!("state_key does not match sender"); - return Ok(false); - } - - debug!("m.room.aliases event was allowed"); - return Ok(true); - } - } - - // If type is m.room.member - if *incoming_event.event_type() == TimelineEventType::RoomMember { - debug!("starting m.room.member check"); - let state_key = match incoming_event.state_key() { - | None => { - warn!("no statekey in member event"); - return Ok(false); - }, - | Some(s) => s, - }; - - let content: RoomMemberContentFields = from_json_str(incoming_event.content().get())?; - if content - .membership - .as_ref() - .and_then(|m| m.deserialize().ok()) - .is_none() - { - warn!("no valid membership field found for m.room.member event content"); - return Ok(false); - } - - let target_user = - <&UserId>::try_from(state_key).map_err(|e| Error::InvalidPdu(format!("{e}")))?; - - let user_for_join_auth = content - .join_authorised_via_users_server - .as_ref() - .and_then(|u| u.deserialize().ok()); - - let user_for_join_auth_event: OptionFuture<_> = user_for_join_auth - .as_ref() - .map(|auth_user| fetch_state(&StateEventType::RoomMember, auth_user.as_str())) - .into(); - - let target_user_member_event = - fetch_state(&StateEventType::RoomMember, target_user.as_str()); - - let join_rules_event = fetch_state(&StateEventType::RoomJoinRules, ""); - - let (join_rules_event, target_user_member_event, user_for_join_auth_event) = - join3(join_rules_event, target_user_member_event, user_for_join_auth_event).await; - - let user_for_join_auth_membership = user_for_join_auth_event - .and_then(|mem| from_json_str::(mem?.content().get()).ok()) - .map_or(MembershipState::Leave, |mem| mem.membership); - - if !valid_membership_change( - room_version, - target_user, - target_user_member_event.as_ref(), - sender, - sender_member_event.as_ref(), - incoming_event, - current_third_party_invite, - power_levels_event.as_ref(), - join_rules_event.as_ref(), - user_for_join_auth.as_deref(), - &user_for_join_auth_membership, - &room_create_event, - )? { - return Ok(false); - } - - debug!("m.room.member event was allowed"); - return Ok(true); - } - - // If the sender's current membership state is not join, reject - #[allow(clippy::manual_let_else)] - let sender_member_event = match sender_member_event { - | Some(mem) => mem, - | None => { - warn!("sender not found in room"); - return Ok(false); - }, - }; - - let sender_membership_event_content: RoomMemberContentFields = - from_json_str(sender_member_event.content().get())?; - let Some(membership_state) = sender_membership_event_content.membership else { - warn!( - sender_membership_event_content = format!("{sender_membership_event_content:?}"), - event_id = format!("{}", incoming_event.event_id()), - "Sender membership event content missing membership field" - ); - return Err(Error::InvalidPdu("Missing membership field".to_owned())); - }; - let membership_state = membership_state.deserialize()?; - - if !matches!(membership_state, MembershipState::Join) { - warn!("sender's membership is not join"); - return Ok(false); - } - - // If type is m.room.third_party_invite - let sender_power_level = match &power_levels_event { - | Some(pl) => { - let content = - deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - match content.get_user_power(sender) { - | Some(level) => *level, - | _ => content.users_default, - } - }, - | _ => { - // If no power level event found the creator gets 100 everyone else gets 0 - let is_creator = if room_version.use_room_create_sender { - room_create_event.sender() == sender - } else { - #[allow(deprecated)] - from_json_str::(room_create_event.content().get()) - .is_ok_and(|create| create.creator.unwrap() == *sender) - }; - - if is_creator { int!(100) } else { int!(0) } - }, - }; - - // Allow if and only if sender's current power level is greater than - // or equal to the invite level - if *incoming_event.event_type() == TimelineEventType::RoomThirdPartyInvite { - let invite_level = match &power_levels_event { - | Some(power_levels) => - deserialize_power_levels_content_invite( - power_levels.content().get(), - room_version, - )? - .invite, - | None => int!(0), - }; - - if sender_power_level < invite_level { - warn!("sender's cannot send invites in this room"); - return Ok(false); - } - - debug!("m.room.third_party_invite event was allowed"); - return Ok(true); - } - - // If the event type's required power level is greater than the sender's power - // level, reject If the event has a state_key that starts with an @ and does - // not match the sender, reject. - if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) { - warn!("user cannot send event"); - return Ok(false); - } - - // If type is m.room.power_levels - if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { - debug!("starting m.room.power_levels check"); - - match check_power_levels( - room_version, - incoming_event, - power_levels_event.as_ref(), - sender_power_level, - ) { - | Some(required_pwr_lvl) => - if !required_pwr_lvl { - warn!("m.room.power_levels was not allowed"); - return Ok(false); - }, - | _ => { - warn!("m.room.power_levels was not allowed"); - return Ok(false); - }, - } - debug!("m.room.power_levels event allowed"); - } - - // Room version 3: Redaction events are always accepted (provided the event is - // allowed by `events` and `events_default` in the power levels). However, - // servers should not apply or send redaction's to clients until both the - // redaction event and original event have been seen, and are valid. Servers - // should only apply redaction's to events where the sender's domains match, or - // the sender of the redaction has the appropriate permissions per the - // power levels. - - if room_version.extra_redaction_checks - && *incoming_event.event_type() == TimelineEventType::RoomRedaction - { - let redact_level = match power_levels_event { - | Some(pl) => - deserialize_power_levels_content_redact(pl.content().get(), room_version)?.redact, - | None => int!(50), - }; - - if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? { - return Ok(false); - } - } - - debug!("allowing event passed all checks"); - Ok(true) -} - -// TODO deserializing the member, power, join_rules event contents is done in -// conduit just before this is called. Could they be passed in? -/// Does the user who sent this member event have required power levels to do -/// so. -/// -/// * `user` - Information about the membership event and user making the -/// request. -/// * `auth_events` - The set of auth events that relate to a membership event. -/// -/// This is generated by calling `auth_types_for_event` with the membership -/// event and the current State. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::cognitive_complexity)] -fn valid_membership_change( - room_version: &RoomVersion, - target_user: &UserId, - target_user_membership_event: Option<&impl Event>, - sender: &UserId, - sender_membership_event: Option<&impl Event>, - current_event: impl Event, - current_third_party_invite: Option<&impl Event>, - power_levels_event: Option<&impl Event>, - join_rules_event: Option<&impl Event>, - user_for_join_auth: Option<&UserId>, - user_for_join_auth_membership: &MembershipState, - create_room: &impl Event, -) -> Result { - #[derive(Deserialize)] - struct GetThirdPartyInvite { - third_party_invite: Option>, - } - let content = current_event.content(); - - let target_membership = from_json_str::(content.get())?.membership; - let third_party_invite = - from_json_str::(content.get())?.third_party_invite; - - let sender_membership = match &sender_membership_event { - | Some(pdu) => from_json_str::(pdu.content().get())?.membership, - | None => MembershipState::Leave, - }; - let sender_is_joined = sender_membership == MembershipState::Join; - - let target_user_current_membership = match &target_user_membership_event { - | Some(pdu) => from_json_str::(pdu.content().get())?.membership, - | None => MembershipState::Leave, - }; - - let power_levels: RoomPowerLevelsEventContent = match &power_levels_event { - | Some(ev) => from_json_str(ev.content().get())?, - | None => RoomPowerLevelsEventContent::default(), - }; - - let sender_power = power_levels - .users - .get(sender) - .or_else(|| sender_is_joined.then_some(&power_levels.users_default)); - - let target_power = power_levels.users.get(target_user).or_else(|| { - (target_membership == MembershipState::Join).then_some(&power_levels.users_default) - }); - - let mut join_rules = JoinRule::Invite; - if let Some(jr) = &join_rules_event { - join_rules = from_json_str::(jr.content().get())?.join_rule; - } - - let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id); - let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id); - let target_user_membership_event_id = - target_user_membership_event.as_ref().map(Event::event_id); - - let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth { - // Is the authorised user allowed to invite users into this room - let (auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event { - // TODO Refactor all powerlevel parsing - let invite = - deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite; - - let content = - deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - let user_pl = match content.get_user_power(user_for_join_auth) { - | Some(level) => *level, - | _ => content.users_default, - }; - - (user_pl, invite) - } else { - (int!(0), int!(0)) - }; - (user_for_join_auth_membership == &MembershipState::Join) - && (auth_user_pl >= invite_level) - } else { - // No auth user was given - false - }; - - Ok(match target_membership { - | MembershipState::Join => { - // 1. If the only previous event is an m.room.create and the state_key is the - // creator, - // allow - let mut prev_events = current_event.prev_events(); - - let prev_event_is_create_event = prev_events - .next() - .is_some_and(|event_id| event_id.borrow() == create_room.event_id().borrow()); - let no_more_prev_events = prev_events.next().is_none(); - - if prev_event_is_create_event && no_more_prev_events { - let is_creator = if room_version.use_room_create_sender { - let creator = create_room.sender(); - - creator == sender && creator == target_user - } else { - #[allow(deprecated)] - let creator = from_json_str::(create_room.content().get())? - .creator - .ok_or_else(|| serde_json::Error::missing_field("creator"))?; - - creator == sender && creator == target_user - }; - - if is_creator { - return Ok(true); - } - } - - if sender != target_user { - // If the sender does not match state_key, reject. - warn!("Can't make other user join"); - false - } else if target_user_current_membership == MembershipState::Ban { - // If the sender is banned, reject. - warn!(?target_user_membership_event_id, "Banned user can't join"); - false - } else if (join_rules == JoinRule::Invite - || room_version.allow_knocking && join_rules == JoinRule::Knock) - // If the join_rule is invite then allow if membership state is invite or join - && (target_user_current_membership == MembershipState::Join - || target_user_current_membership == MembershipState::Invite) - { - true - } else if room_version.restricted_join_rules - && matches!(join_rules, JoinRule::Restricted(_)) - || room_version.knock_restricted_join_rule - && matches!(join_rules, JoinRule::KnockRestricted(_)) - { - // If the join_rule is restricted or knock_restricted - if matches!( - target_user_current_membership, - MembershipState::Invite | MembershipState::Join - ) { - // If membership state is join or invite, allow. - true - } else { - // If the join_authorised_via_users_server key in content is not a user with - // sufficient permission to invite other users, reject. - // Otherwise, allow. - user_for_join_auth_is_valid - } - } else { - // If the join_rule is public, allow. - // Otherwise, reject. - join_rules == JoinRule::Public - } - }, - | MembershipState::Invite => { - // If content has third_party_invite key - match third_party_invite.and_then(|i| i.deserialize().ok()) { - | Some(tp_id) => - if target_user_current_membership == MembershipState::Ban { - warn!(?target_user_membership_event_id, "Can't invite banned user"); - false - } else { - let allow = verify_third_party_invite( - Some(target_user), - sender, - &tp_id, - current_third_party_invite, - ); - if !allow { - warn!("Third party invite invalid"); - } - allow - }, - | _ => { - if !sender_is_joined - || target_user_current_membership == MembershipState::Join - || target_user_current_membership == MembershipState::Ban - { - warn!( - ?target_user_membership_event_id, - ?sender_membership_event_id, - "Can't invite user if sender not joined or the user is currently \ - joined or banned", - ); - false - } else { - let allow = sender_power - .filter(|&p| p >= &power_levels.invite) - .is_some(); - if !allow { - warn!( - ?target_user_membership_event_id, - ?power_levels_event_id, - "User does not have enough power to invite", - ); - } - allow - } - }, - } - }, - | MembershipState::Leave => - if sender == target_user { - let allow = target_user_current_membership == MembershipState::Join - || target_user_current_membership == MembershipState::Invite - || target_user_current_membership == MembershipState::Knock; - if !allow { - warn!( - ?target_user_membership_event_id, - ?target_user_current_membership, - "Can't leave if sender is not already invited, knocked, or joined" - ); - } - allow - } else if !sender_is_joined - || target_user_current_membership == MembershipState::Ban - && sender_power.filter(|&p| p < &power_levels.ban).is_some() - { - warn!( - ?target_user_membership_event_id, - ?sender_membership_event_id, - "Can't kick if sender not joined or user is already banned", - ); - false - } else { - let allow = sender_power.filter(|&p| p >= &power_levels.kick).is_some() - && target_power < sender_power; - if !allow { - warn!( - ?target_user_membership_event_id, - ?power_levels_event_id, - "User does not have enough power to kick", - ); - } - allow - }, - | MembershipState::Ban => - if !sender_is_joined { - warn!(?sender_membership_event_id, "Can't ban user if sender is not joined"); - false - } else { - let allow = sender_power.filter(|&p| p >= &power_levels.ban).is_some() - && target_power < sender_power; - if !allow { - warn!( - ?target_user_membership_event_id, - ?power_levels_event_id, - "User does not have enough power to ban", - ); - } - allow - }, - | MembershipState::Knock if room_version.allow_knocking => { - // 1. If the `join_rule` is anything other than `knock` or `knock_restricted`, - // reject. - if !matches!(join_rules, JoinRule::KnockRestricted(_) | JoinRule::Knock) { - warn!( - "Join rule is not set to knock or knock_restricted, knocking is not allowed" - ); - false - } else if matches!(join_rules, JoinRule::KnockRestricted(_)) - && !room_version.knock_restricted_join_rule - { - // 2. If the `join_rule` is `knock_restricted`, but the room does not support - // `knock_restricted`, reject. - warn!( - "Join rule is set to knock_restricted but room version does not support \ - knock_restricted, knocking is not allowed" - ); - false - } else if sender != target_user { - // 3. If `sender` does not match `state_key`, reject. - warn!( - ?sender, - ?target_user, - "Can't make another user knock, sender did not match target" - ); - false - } else if matches!( - sender_membership, - MembershipState::Ban | MembershipState::Invite | MembershipState::Join - ) { - // 4. If the `sender`'s current membership is not `ban`, `invite`, or `join`, - // allow. - // 5. Otherwise, reject. - warn!( - ?target_user_membership_event_id, - "Knocking with a membership state of ban, invite or join is invalid", - ); - false - } else { - true - } - }, - | _ => { - warn!("Unknown membership transition"); - false - }, - }) -} - -/// Is the user allowed to send a specific event based on the rooms power -/// levels. -/// -/// Does the event have the correct userId as its state_key if it's not the "" -/// state_key. -fn can_send_event(event: impl Event, ple: Option, user_level: Int) -> bool { - let event_type_power_level = get_send_level(event.event_type(), event.state_key(), ple); - - debug!( - required_level = i64::from(event_type_power_level), - user_level = i64::from(user_level), - state_key = ?event.state_key(), - "permissions factors", - ); - - if user_level < event_type_power_level { - return false; - } - - if event.state_key().is_some_and(|k| k.starts_with('@')) - && event.state_key() != Some(event.sender().as_str()) - { - return false; // permission required to post in this room - } - - true -} - -/// Confirm that the event sender has the required power levels. -fn check_power_levels( - room_version: &RoomVersion, - power_event: impl Event, - previous_power_event: Option, - user_level: Int, -) -> Option { - match power_event.state_key() { - | Some("") => {}, - | Some(key) => { - error!(state_key = key, "m.room.power_levels event has non-empty state key"); - return None; - }, - | None => { - error!("check_power_levels requires an m.room.power_levels *state* event argument"); - return None; - }, - } - - // - If any of the keys users_default, events_default, state_default, ban, - // redact, kick, or invite in content are present and not an integer, reject. - // - If either of the keys events or notifications in content are present and - // not a dictionary with values that are integers, reject. - // - If users key in content is not a dictionary with keys that are valid user - // IDs with values that are integers, reject. - let user_content: RoomPowerLevelsEventContent = - deserialize_power_levels(power_event.content().get(), room_version)?; - - // Validation of users is done in Ruma, synapse for loops validating user_ids - // and integers here - debug!("validation of power event finished"); - - #[allow(clippy::manual_let_else)] - let current_state = match previous_power_event { - | Some(current_state) => current_state, - // If there is no previous m.room.power_levels event in the room, allow - | None => return Some(true), - }; - - let current_content: RoomPowerLevelsEventContent = - deserialize_power_levels(current_state.content().get(), room_version)?; - - let mut user_levels_to_check = BTreeSet::new(); - let old_list = ¤t_content.users; - let user_list = &user_content.users; - for user in old_list.keys().chain(user_list.keys()) { - let user: &UserId = user; - user_levels_to_check.insert(user); - } - - trace!(set = ?user_levels_to_check, "user levels to check"); - - let mut event_levels_to_check = BTreeSet::new(); - let old_list = ¤t_content.events; - let new_list = &user_content.events; - for ev_id in old_list.keys().chain(new_list.keys()) { - event_levels_to_check.insert(ev_id); - } - - trace!(set = ?event_levels_to_check, "event levels to check"); - - let old_state = ¤t_content; - let new_state = &user_content; - - // synapse does not have to split up these checks since we can't combine UserIds - // and EventTypes we do 2 loops - - // UserId loop - for user in user_levels_to_check { - let old_level = old_state.users.get(user); - let new_level = new_state.users.get(user); - if old_level.is_some() && new_level.is_some() && old_level == new_level { - continue; - } - - // If the current value is equal to the sender's current power level, reject - if user != power_event.sender() && old_level == Some(&user_level) { - warn!("m.room.power_level cannot remove ops == to own"); - return Some(false); // cannot remove ops level == to own - } - - // If the current value is higher than the sender's current power level, reject - // If the new value is higher than the sender's current power level, reject - let old_level_too_big = old_level > Some(&user_level); - let new_level_too_big = new_level > Some(&user_level); - if old_level_too_big || new_level_too_big { - warn!("m.room.power_level failed to add ops > than own"); - return Some(false); // cannot add ops greater than own - } - } - - // EventType loop - for ev_type in event_levels_to_check { - let old_level = old_state.events.get(ev_type); - let new_level = new_state.events.get(ev_type); - if old_level.is_some() && new_level.is_some() && old_level == new_level { - continue; - } - - // If the current value is higher than the sender's current power level, reject - // If the new value is higher than the sender's current power level, reject - let old_level_too_big = old_level > Some(&user_level); - let new_level_too_big = new_level > Some(&user_level); - if old_level_too_big || new_level_too_big { - warn!("m.room.power_level failed to add ops > than own"); - return Some(false); // cannot add ops greater than own - } - } - - // Notifications, currently there is only @room - if room_version.limit_notifications_power_levels { - let old_level = old_state.notifications.room; - let new_level = new_state.notifications.room; - if old_level != new_level { - // If the current value is higher than the sender's current power level, reject - // If the new value is higher than the sender's current power level, reject - let old_level_too_big = old_level > user_level; - let new_level_too_big = new_level > user_level; - if old_level_too_big || new_level_too_big { - warn!("m.room.power_level failed to add ops > than own"); - return Some(false); // cannot add ops greater than own - } - } - } - - let levels = [ - "users_default", - "events_default", - "state_default", - "ban", - "redact", - "kick", - "invite", - ]; - let old_state = serde_json::to_value(old_state).unwrap(); - let new_state = serde_json::to_value(new_state).unwrap(); - for lvl_name in &levels { - if let Some((old_lvl, new_lvl)) = get_deserialize_levels(&old_state, &new_state, lvl_name) - { - let old_level_too_big = old_lvl > user_level; - let new_level_too_big = new_lvl > user_level; - - if old_level_too_big || new_level_too_big { - warn!("cannot add ops > than own"); - return Some(false); - } - } - } - - Some(true) -} - -fn get_deserialize_levels( - old: &serde_json::Value, - new: &serde_json::Value, - name: &str, -) -> Option<(Int, Int)> { - Some(( - serde_json::from_value(old.get(name)?.clone()).ok()?, - serde_json::from_value(new.get(name)?.clone()).ok()?, - )) -} - -/// Does the event redacting come from a user with enough power to redact the -/// given event. -fn check_redaction( - _room_version: &RoomVersion, - redaction_event: impl Event, - user_level: Int, - redact_level: Int, -) -> Result { - if user_level >= redact_level { - debug!("redaction allowed via power levels"); - return Ok(true); - } - - // If the domain of the event_id of the event being redacted is the same as the - // domain of the event_id of the m.room.redaction, allow - if redaction_event.event_id().borrow().server_name() - == redaction_event - .redacts() - .as_ref() - .and_then(|&id| id.borrow().server_name()) - { - debug!("redaction event allowed via room version 1 rules"); - return Ok(true); - } - - Ok(false) -} - -/// Helper function to fetch the power level needed to send an event of type -/// `e_type` based on the rooms "m.room.power_level" event. -fn get_send_level( - e_type: &TimelineEventType, - state_key: Option<&str>, - power_lvl: Option, -) -> Int { - power_lvl - .and_then(|ple| { - from_json_str::(ple.content().get()) - .map(|content| { - content.events.get(e_type).copied().unwrap_or_else(|| { - if state_key.is_some() { - content.state_default - } else { - content.events_default - } - }) - }) - .ok() - }) - .unwrap_or_else(|| if state_key.is_some() { int!(50) } else { int!(0) }) -} - -fn verify_third_party_invite( - target_user: Option<&UserId>, - sender: &UserId, - tp_id: &ThirdPartyInvite, - current_third_party_invite: Option, -) -> bool { - // 1. Check for user being banned happens before this is called - // checking for mxid and token keys is done by ruma when deserializing - - // The state key must match the invitee - if target_user != Some(&tp_id.signed.mxid) { - return false; - } - - // If there is no m.room.third_party_invite event in the current room state with - // state_key matching token, reject - #[allow(clippy::manual_let_else)] - let current_tpid = match current_third_party_invite { - | Some(id) => id, - | None => return false, - }; - - if current_tpid.state_key() != Some(&tp_id.signed.token) { - return false; - } - - if sender != current_tpid.sender() { - return false; - } - - // If any signature in signed matches any public key in the - // m.room.third_party_invite event, allow - #[allow(clippy::manual_let_else)] - let tpid_ev = - match from_json_str::(current_tpid.content().get()) { - | Ok(ev) => ev, - | Err(_) => return false, - }; - - #[allow(clippy::manual_let_else)] - let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { - | Ok(tok) => tok, - // FIXME: Log a warning? - | Err(_) => return false, - }; - - // A list of public keys in the public_keys field - for key in tpid_ev.public_keys.unwrap_or_default() { - if key.public_key == decoded_invite_token { - return true; - } - } - - // A single public key in the public_key field - tpid_ev.public_key == decoded_invite_token -} - -#[cfg(test)] -mod tests { - use ruma::events::{ - StateEventType, TimelineEventType, - room::{ - join_rules::{ - AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, - }, - member::{MembershipState, RoomMemberEventContent}, - }, - }; - use serde_json::value::to_raw_value as to_raw_json_value; - - use crate::state_res::{ - Event, EventTypeExt, RoomVersion, StateMap, - event_auth::valid_membership_change, - test_utils::{ - INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, PduEvent, alice, charlie, ella, event_id, - member_content_ban, member_content_join, room_id, to_pdu_event, - }, - }; - - #[test] - fn test_ban_pass() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let events = INITIAL_EVENTS(); - - let auth_events = events - .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) - .collect::>(); - - let requester = to_pdu_event( - "HELLO", - alice(), - TimelineEventType::RoomMember, - Some(charlie().as_str()), - member_content_ban(), - &[], - &["IMC"], - ); - - let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); - let target_user = charlie(); - let sender = alice(); - - assert!( - valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap() - ); - } - - #[test] - fn test_join_non_creator() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let events = INITIAL_EVENTS_CREATE_ROOM(); - - let auth_events = events - .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) - .collect::>(); - - let requester = to_pdu_event( - "HELLO", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().as_str()), - member_content_join(), - &["CREATE"], - &["CREATE"], - ); - - let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); - let target_user = charlie(); - let sender = charlie(); - - assert!( - !valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap() - ); - } - - #[test] - fn test_join_creator() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let events = INITIAL_EVENTS_CREATE_ROOM(); - - let auth_events = events - .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) - .collect::>(); - - let requester = to_pdu_event( - "HELLO", - alice(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_join(), - &["CREATE"], - &["CREATE"], - ); - - let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); - let target_user = alice(); - let sender = alice(); - - assert!( - valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap() - ); - } - - #[test] - fn test_ban_fail() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let events = INITIAL_EVENTS(); - - let auth_events = events - .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) - .collect::>(); - - let requester = to_pdu_event( - "HELLO", - charlie(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_ban(), - &[], - &["IMC"], - ); - - let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); - let target_user = alice(); - let sender = charlie(); - - assert!( - !valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap() - ); - } - - #[test] - fn test_restricted_join_rule() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let mut events = INITIAL_EVENTS(); - *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Restricted( - Restricted::new(vec![AllowRule::RoomMembership(RoomMembership::new( - room_id().to_owned(), - ))]), - ))) - .unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["IPOWER"], - ); - - let mut member = RoomMemberEventContent::new(MembershipState::Join); - member.join_authorized_via_users_server = Some(alice().to_owned()); - - let auth_events = events - .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) - .collect::>(); - - let requester = to_pdu_event( - "HELLO", - ella(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap(), - &["CREATE", "IJR", "IPOWER", "new"], - &["new"], - ); - - let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); - let target_user = ella(); - let sender = ella(); - - assert!( - valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(alice()), - &MembershipState::Join, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap() - ); - - assert!( - !valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(ella()), - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap() - ); - } - - #[test] - fn test_knock() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let mut events = INITIAL_EVENTS(); - *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Knock)).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["IPOWER"], - ); - - let auth_events = events - .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) - .collect::>(); - - let requester = to_pdu_event( - "HELLO", - ella(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Knock)).unwrap(), - &[], - &["IMC"], - ); - - let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); - let target_user = ella(); - let sender = ella(); - - assert!( - valid_membership_change( - &RoomVersion::V7, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap() - ); - } -} diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs deleted file mode 100644 index 2ab7cb64..00000000 --- a/src/core/matrix/state_res/mod.rs +++ /dev/null @@ -1,1654 +0,0 @@ -#![cfg_attr(test, allow(warnings))] - -pub(crate) mod error; -pub mod event_auth; -mod power_levels; -mod room_version; - -#[cfg(test)] -mod test_utils; - -#[cfg(test)] -mod benches; - -use std::{ - borrow::Borrow, - cmp::{Ordering, Reverse}, - collections::{BinaryHeap, HashMap, HashSet}, - hash::{BuildHasher, Hash}, -}; - -use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future}; -use ruma::{ - EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, - events::{ - StateEventType, TimelineEventType, - room::member::{MembershipState, RoomMemberEventContent}, - }, - int, -}; -use serde_json::from_str as from_json_str; - -pub(crate) use self::error::Error; -use self::power_levels::PowerLevelsContentFields; -pub use self::{ - event_auth::{auth_check, auth_types_for_event}, - room_version::RoomVersion, -}; -use crate::{ - debug, debug_error, - matrix::{event::Event, pdu::StateKey}, - trace, - utils::stream::{ - BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt, - }, - warn, -}; - -/// A mapping of event type and state_key to some value `T`, usually an -/// `EventId`. -pub type StateMap = HashMap; -pub type StateMapItem = (TypeStateKey, T); -pub type TypeStateKey = (StateEventType, StateKey); - -type Result = crate::Result; - -/// Resolve sets of state events as they come in. -/// -/// Internally `StateResolution` builds a graph and an auth chain to allow for -/// state conflict resolution. -/// -/// ## Arguments -/// -/// * `state_sets` - The incoming state to resolve. Each `StateMap` represents a -/// possible fork in the state of a room. -/// -/// * `auth_chain_sets` - The full recursive set of `auth_events` for each event -/// in the `state_sets`. -/// -/// * `event_fetch` - Any event not found in the `event_map` will defer to this -/// closure to find the event. -/// -/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight -/// for any given operation. -/// -/// ## Invariants -/// -/// The caller of `resolve` must ensure that all the events are from the same -/// room. Although this function takes a `RoomId` it does not check that each -/// event is part of the same room. -//#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, -//#[tracing::instrument(level event_fetch))] -pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( - room_version: &RoomVersionId, - state_sets: Sets, - auth_chain_sets: &'a [HashSet], - event_fetch: &Fetch, - event_exists: &Exists, - parallel_fetches: usize, -) -> Result> -where - Fetch: Fn(E::Id) -> FetchFut + Sync, - FetchFut: Future> + Send, - Exists: Fn(E::Id) -> ExistsFut + Sync, - ExistsFut: Future + Send, - Sets: IntoIterator + Send, - SetIter: Iterator> + Clone + Send, - Hasher: BuildHasher + Send + Sync, - E: Event + Clone + Send + Sync, - E::Id: Borrow + Send + Sync, - for<'b> &'b E: Send, -{ - debug!("State resolution starting"); - - // Split non-conflicting and conflicting state - let (clean, conflicting) = separate(state_sets.into_iter()); - - debug!(count = clean.len(), "non-conflicting events"); - trace!(map = ?clean, "non-conflicting events"); - - if conflicting.is_empty() { - debug!("no conflicting state found"); - return Ok(clean); - } - - debug!(count = conflicting.len(), "conflicting events"); - trace!(map = ?conflicting, "conflicting events"); - - let conflicting_values = conflicting.into_values().flatten().stream(); - - // `all_conflicted` contains unique items - // synapse says `full_set = {eid for eid in full_conflicted_set if eid in - // event_map}` - let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets) - .chain(conflicting_values) - .broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id)) - .collect() - .await; - - debug!(count = all_conflicted.len(), "full conflicted set"); - trace!(set = ?all_conflicted, "full conflicted set"); - - // We used to check that all events are events from the correct room - // this is now a check the caller of `resolve` must make. - - // Get only the control events with a state_key: "" or ban/kick event (sender != - // state_key) - let control_events: Vec<_> = all_conflicted - .iter() - .stream() - .wide_filter_map(async |id| { - is_power_event_id(id, &event_fetch) - .await - .then_some(id.clone()) - }) - .collect() - .await; - - // Sort the control events based on power_level/clock/event_id and - // outgoing/incoming edges - let sorted_control_levels = reverse_topological_power_sort( - control_events, - &all_conflicted, - &event_fetch, - parallel_fetches, - ) - .await?; - - debug!(count = sorted_control_levels.len(), "power events"); - trace!(list = ?sorted_control_levels, "sorted power events"); - - let room_version = RoomVersion::new(room_version)?; - // Sequentially auth check each control event. - let resolved_control = iterative_auth_check( - &room_version, - sorted_control_levels.iter().stream(), - clean.clone(), - &event_fetch, - ) - .await?; - - debug!(count = resolved_control.len(), "resolved power events"); - trace!(map = ?resolved_control, "resolved power events"); - - // At this point the control_events have been resolved we now have to - // sort the remaining events using the mainline of the resolved power level. - let deduped_power_ev: HashSet<_> = sorted_control_levels.into_iter().collect(); - - // This removes the control events that passed auth and more importantly those - // that failed auth - let events_to_resolve: Vec<_> = all_conflicted - .iter() - .filter(|&id| !deduped_power_ev.contains(id.borrow())) - .cloned() - .collect(); - - debug!(count = events_to_resolve.len(), "events left to resolve"); - trace!(list = ?events_to_resolve, "events left to resolve"); - - // This "epochs" power level event - let power_levels_ty_sk = (StateEventType::RoomPowerLevels, StateKey::new()); - let power_event = resolved_control.get(&power_levels_ty_sk); - - debug!(event_id = ?power_event, "power event"); - - let sorted_left_events = - mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?; - - trace!(list = ?sorted_left_events, "events left, sorted"); - - let mut resolved_state = iterative_auth_check( - &room_version, - sorted_left_events.iter().stream(), - resolved_control, // The control events are added to the final resolved state - &event_fetch, - ) - .await?; - - // Add unconflicted state to the resolved state - // We priorities the unconflicting state - resolved_state.extend(clean); - - debug!("state resolution finished"); - - Ok(resolved_state) -} - -/// Split the events that have no conflicts from those that are conflicting. -/// -/// The return tuple looks like `(unconflicted, conflicted)`. -/// -/// State is determined to be conflicting if for the given key (StateEventType, -/// StateKey) there is not exactly one event ID. This includes missing events, -/// if one state_set includes an event that none of the other have this is a -/// conflicting event. -fn separate<'a, Id>( - state_sets_iter: impl Iterator>, -) -> (StateMap, StateMap>) -where - Id: Clone + Eq + Hash + 'a, -{ - let mut state_set_count: usize = 0; - let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); - - let state_sets_iter = - state_sets_iter.inspect(|_| state_set_count = state_set_count.saturating_add(1)); - for (k, v) in state_sets_iter.flatten() { - occurrences - .entry(k) - .or_default() - .entry(v) - .and_modify(|x: &mut usize| *x = x.saturating_add(1)) - .or_insert(1); - } - - let mut unconflicted_state = StateMap::new(); - let mut conflicted_state = StateMap::new(); - - for (k, v) in occurrences { - for (id, occurrence_count) in v { - if occurrence_count == state_set_count { - unconflicted_state.insert((k.0.clone(), k.1.clone()), id.clone()); - } else { - conflicted_state - .entry((k.0.clone(), k.1.clone())) - .and_modify(|x: &mut Vec<_>| x.push(id.clone())) - .or_insert_with(|| vec![id.clone()]); - } - } - } - - (unconflicted_state, conflicted_state) -} - -/// Returns a Vec of deduped EventIds that appear in some chains but not others. -#[allow(clippy::arithmetic_side_effects)] -fn get_auth_chain_diff( - auth_chain_sets: &[HashSet], -) -> impl Stream + Send + use -where - Id: Clone + Eq + Hash + Send, - Hasher: BuildHasher + Send + Sync, -{ - let num_sets = auth_chain_sets.len(); - let mut id_counts: HashMap = HashMap::new(); - for id in auth_chain_sets.iter().flatten() { - *id_counts.entry(id.clone()).or_default() += 1; - } - - id_counts - .into_iter() - .filter_map(move |(id, count)| (count < num_sets).then_some(id)) - .stream() -} - -/// Events are sorted from "earliest" to "latest". -/// -/// They are compared using the negative power level (reverse topological -/// ordering), the origin server timestamp and in case of a tie the `EventId`s -/// are compared lexicographically. -/// -/// The power level is negative because a higher power level is equated to an -/// earlier (further back in time) origin server timestamp. -#[tracing::instrument(level = "debug", skip_all)] -async fn reverse_topological_power_sort( - events_to_sort: Vec, - auth_diff: &HashSet, - fetch_event: &F, - parallel_fetches: usize, -) -> Result> -where - F: Fn(E::Id) -> Fut + Sync, - Fut: Future> + Send, - E: Event + Send + Sync, - E::Id: Borrow + Send + Sync, -{ - debug!("reverse topological sort of power events"); - - let mut graph = HashMap::new(); - for event_id in events_to_sort { - add_event_and_auth_chain_to_graph(&mut graph, event_id, auth_diff, fetch_event).await; - } - - // This is used in the `key_fn` passed to the lexico_topo_sort fn - let event_to_pl = graph - .keys() - .stream() - .map(|event_id| { - get_power_level_for_sender(event_id.clone(), fetch_event) - .map(move |res| res.map(|pl| (event_id, pl))) - }) - .buffer_unordered(parallel_fetches) - .ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { - debug!( - event_id = event_id.borrow().as_str(), - power_level = i64::from(pl), - "found the power level of an event's sender", - ); - - event_to_pl.insert(event_id.clone(), pl); - Ok(event_to_pl) - }) - .boxed() - .await?; - - let event_to_pl = &event_to_pl; - let fetcher = |event_id: E::Id| async move { - let pl = *event_to_pl - .get(event_id.borrow()) - .ok_or_else(|| Error::NotFound(String::new()))?; - let ev = fetch_event(event_id) - .await - .ok_or_else(|| Error::NotFound(String::new()))?; - Ok((pl, ev.origin_server_ts())) - }; - - lexicographical_topological_sort(&graph, &fetcher).await -} - -/// Sorts the event graph based on number of outgoing/incoming edges. -/// -/// `key_fn` is used as to obtain the power level and age of an event for -/// breaking ties (together with the event ID). -#[tracing::instrument(level = "debug", skip_all)] -pub async fn lexicographical_topological_sort( - graph: &HashMap>, - key_fn: &F, -) -> Result> -where - F: Fn(Id) -> Fut + Sync, - Fut: Future> + Send, - Id: Borrow + Clone + Eq + Hash + Ord + Send + Sync, - Hasher: BuildHasher + Default + Clone + Send + Sync, -{ - #[derive(PartialEq, Eq)] - struct TieBreaker<'a, Id> { - power_level: Int, - origin_server_ts: MilliSecondsSinceUnixEpoch, - event_id: &'a Id, - } - - impl Ord for TieBreaker<'_, Id> - where - Id: Ord, - { - fn cmp(&self, other: &Self) -> Ordering { - // NOTE: the power level comparison is "backwards" intentionally. - // See the "Mainline ordering" section of the Matrix specification - // around where it says the following: - // - // > for events `x` and `y`, `x < y` if [...] - // - // - other - .power_level - .cmp(&self.power_level) - .then(self.origin_server_ts.cmp(&other.origin_server_ts)) - .then(self.event_id.cmp(other.event_id)) - } - } - - impl PartialOrd for TieBreaker<'_, Id> - where - Id: Ord, - { - fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } - } - - debug!("starting lexicographical topological sort"); - - // NOTE: an event that has no incoming edges happened most recently, - // and an event that has no outgoing edges happened least recently. - - // NOTE: this is basically Kahn's algorithm except we look at nodes with no - // outgoing edges, c.f. - // https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm - - // outdegree_map is an event referring to the events before it, the - // more outdegree's the more recent the event. - let mut outdegree_map = graph.clone(); - - // The number of events that depend on the given event (the EventId key) - // How many events reference this event in the DAG as a parent - let mut reverse_graph: HashMap<_, HashSet<_, Hasher>> = HashMap::new(); - - // Vec of nodes that have zero out degree, least recent events. - let mut zero_outdegree = Vec::new(); - - for (node, edges) in graph { - if edges.is_empty() { - let (power_level, origin_server_ts) = key_fn(node.clone()).await?; - // The `Reverse` is because rusts `BinaryHeap` sorts largest -> smallest we need - // smallest -> largest - zero_outdegree.push(Reverse(TieBreaker { - power_level, - origin_server_ts, - event_id: node, - })); - } - - reverse_graph.entry(node).or_default(); - for edge in edges { - reverse_graph.entry(edge).or_default().insert(node); - } - } - - let mut heap = BinaryHeap::from(zero_outdegree); - - // We remove the oldest node (most incoming edges) and check against all other - let mut sorted = vec![]; - // Destructure the `Reverse` and take the smallest `node` each time - while let Some(Reverse(item)) = heap.pop() { - let node = item.event_id; - - for &parent in reverse_graph - .get(node) - .expect("EventId in heap is also in reverse_graph") - { - // The number of outgoing edges this node has - let out = outdegree_map - .get_mut(parent.borrow()) - .expect("outdegree_map knows of all referenced EventIds"); - - // Only push on the heap once older events have been cleared - out.remove(node.borrow()); - if out.is_empty() { - let (power_level, origin_server_ts) = key_fn(parent.clone()).await?; - heap.push(Reverse(TieBreaker { - power_level, - origin_server_ts, - event_id: parent, - })); - } - } - - // synapse yields we push then return the vec - sorted.push(node.clone()); - } - - Ok(sorted) -} - -/// Find the power level for the sender of `event_id` or return a default value -/// of zero. -/// -/// Do NOT use this any where but topological sort, we find the power level for -/// the eventId at the eventId's generation (we walk backwards to `EventId`s -/// most recent previous power level event). -async fn get_power_level_for_sender( - event_id: E::Id, - fetch_event: &F, -) -> serde_json::Result -where - F: Fn(E::Id) -> Fut + Sync, - Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Send, -{ - debug!("fetch event ({event_id}) senders power level"); - - let event = fetch_event(event_id).await; - - let auth_events = event.as_ref().map(Event::auth_events); - - let pl = auth_events - .into_iter() - .flatten() - .stream() - .broadn_filter_map(5, |aid| fetch_event(aid.clone())) - .ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")) - .await; - - let content: PowerLevelsContentFields = match pl { - | None => return Ok(int!(0)), - | Some(ev) => from_json_str(ev.content().get())?, - }; - - if let Some(ev) = event { - if let Some(&user_level) = content.get_user_power(ev.sender()) { - debug!("found {} at power_level {user_level}", ev.sender()); - return Ok(user_level); - } - } - - Ok(content.users_default) -} - -/// Check the that each event is authenticated based on the events before it. -/// -/// ## Returns -/// -/// The `unconflicted_state` combined with the newly auth'ed events. So any -/// event that fails the `event_auth::auth_check` will be excluded from the -/// returned state map. -/// -/// For each `events_to_check` event we gather the events needed to auth it from -/// the the `fetch_event` closure and verify each event using the -/// `event_auth::auth_check` function. -async fn iterative_auth_check<'a, E, F, Fut, S>( - room_version: &RoomVersion, - events_to_check: S, - unconflicted_state: StateMap, - fetch_event: &F, -) -> Result> -where - F: Fn(E::Id) -> Fut + Sync, - Fut: Future> + Send, - E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, - S: Stream + Send + 'a, - E: Event + Clone + Send + Sync, -{ - debug!("starting iterative auth check"); - - let events_to_check: Vec<_> = events_to_check - .map(Result::Ok) - .broad_and_then(async |event_id| { - fetch_event(event_id.clone()) - .await - .ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) - }) - .try_collect() - .boxed() - .await?; - - let auth_event_ids: HashSet = events_to_check - .iter() - .flat_map(|event: &E| event.auth_events().map(Clone::clone)) - .collect(); - - let auth_events: HashMap = auth_event_ids - .into_iter() - .stream() - .broad_filter_map(fetch_event) - .map(|auth_event| (auth_event.event_id().clone(), auth_event)) - .collect() - .boxed() - .await; - - let auth_events = &auth_events; - let mut resolved_state = unconflicted_state; - for event in &events_to_check { - let state_key = event - .state_key() - .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; - - let auth_types = auth_types_for_event( - event.event_type(), - event.sender(), - Some(state_key), - event.content(), - )?; - - let mut auth_state = StateMap::new(); - for aid in event.auth_events() { - if let Some(ev) = auth_events.get(aid.borrow()) { - //TODO: synapse checks "rejected_reason" which is most likely related to - // soft-failing - auth_state.insert( - ev.event_type() - .with_state_key(ev.state_key().ok_or_else(|| { - Error::InvalidPdu("State event had no state key".to_owned()) - })?), - ev.clone(), - ); - } else { - warn!(event_id = aid.borrow().as_str(), "missing auth event"); - } - } - - auth_types - .iter() - .stream() - .ready_filter_map(|key| Some((key, resolved_state.get(key)?))) - .filter_map(|(key, ev_id)| async move { - if let Some(event) = auth_events.get(ev_id.borrow()) { - Some((key, event.clone())) - } else { - Some((key, fetch_event(ev_id.clone()).await?)) - } - }) - .ready_for_each(|(key, event)| { - //TODO: synapse checks "rejected_reason" is None here - auth_state.insert(key.to_owned(), event); - }) - .await; - - debug!("event to check {:?}", event.event_id()); - - // The key for this is (eventType + a state_key of the signed token not sender) - // so search for it - let current_third_party = auth_state.iter().find_map(|(_, pdu)| { - (*pdu.event_type() == TimelineEventType::RoomThirdPartyInvite).then_some(pdu) - }); - - let fetch_state = |ty: &StateEventType, key: &str| { - future::ready(auth_state.get(&ty.with_state_key(key))) - }; - - let auth_result = - auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await; - - match auth_result { - | Ok(true) => { - // add event to resolved state map - resolved_state.insert( - event.event_type().with_state_key(state_key), - event.event_id().clone(), - ); - }, - | Ok(false) => { - // synapse passes here on AuthError. We do not add this event to resolved_state. - warn!("event {} failed the authentication check", event.event_id()); - }, - | Err(e) => { - debug_error!("event {} failed the authentication check: {e}", event.event_id()); - return Err(e); - }, - } - } - - Ok(resolved_state) -} - -/// Returns the sorted `to_sort` list of `EventId`s based on a mainline sort -/// using the depth of `resolved_power_level`, the server timestamp, and the -/// eventId. -/// -/// The depth of the given event is calculated based on the depth of it's -/// closest "parent" power_level event. If there have been two power events the -/// after the most recent are depth 0, the events before (with the first power -/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth -/// 0. -async fn mainline_sort( - to_sort: &[E::Id], - resolved_power_level: Option, - fetch_event: &F, -) -> Result> -where - F: Fn(E::Id) -> Fut + Sync, - Fut: Future> + Send, - E: Event + Clone + Send + Sync, - E::Id: Borrow + Clone + Send + Sync, -{ - debug!("mainline sort of events"); - - // There are no EventId's to sort, bail. - if to_sort.is_empty() { - return Ok(vec![]); - } - - let mut mainline = vec![]; - let mut pl = resolved_power_level; - while let Some(p) = pl { - mainline.push(p.clone()); - - let event = fetch_event(p.clone()) - .await - .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; - - pl = None; - for aid in event.auth_events() { - let ev = fetch_event(aid.clone()) - .await - .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; - - if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { - pl = Some(aid.to_owned()); - break; - } - } - } - - let mainline_map: HashMap<_, _> = mainline - .iter() - .rev() - .enumerate() - .map(|(idx, eid)| ((*eid).clone(), idx)) - .collect(); - - let order_map: HashMap<_, _> = to_sort - .iter() - .stream() - .broad_filter_map(async |ev_id| { - fetch_event(ev_id.clone()).await.map(|event| (event, ev_id)) - }) - .broad_filter_map(|(event, ev_id)| { - get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) - .map_ok(move |depth| (ev_id, (depth, event.origin_server_ts(), ev_id))) - .map(Result::ok) - }) - .collect() - .boxed() - .await; - - // Sort the event_ids by their depth, timestamp and EventId - // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) - let mut sort_event_ids: Vec<_> = order_map.keys().map(|&k| k.clone()).collect(); - - sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); - - Ok(sort_event_ids) -} - -/// Get the mainline depth from the `mainline_map` or finds a power_level event -/// that has an associated mainline depth. -async fn get_mainline_depth( - mut event: Option, - mainline_map: &HashMap, - fetch_event: &F, -) -> Result -where - F: Fn(E::Id) -> Fut + Sync, - Fut: Future> + Send, - E: Event + Send + Sync, - E::Id: Borrow + Send + Sync, -{ - while let Some(sort_ev) = event { - debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); - - let id = sort_ev.event_id(); - if let Some(depth) = mainline_map.get(id.borrow()) { - return Ok(*depth); - } - - event = None; - for aid in sort_ev.auth_events() { - let aev = fetch_event(aid.clone()) - .await - .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; - - if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { - event = Some(aev); - break; - } - } - } - // Did not find a power level event so we default to zero - Ok(0) -} - -async fn add_event_and_auth_chain_to_graph( - graph: &mut HashMap>, - event_id: E::Id, - auth_diff: &HashSet, - fetch_event: &F, -) where - F: Fn(E::Id) -> Fut + Sync, - Fut: Future> + Send, - E: Event + Send + Sync, - E::Id: Borrow + Clone + Send + Sync, -{ - let mut state = vec![event_id]; - while let Some(eid) = state.pop() { - graph.entry(eid.clone()).or_default(); - let event = fetch_event(eid.clone()).await; - let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); - - // Prefer the store to event as the store filters dedups the events - for aid in auth_events { - if auth_diff.contains(aid.borrow()) { - if !graph.contains_key(aid.borrow()) { - state.push(aid.to_owned()); - } - - // We just inserted this at the start of the while loop - graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned()); - } - } - } -} - -async fn is_power_event_id(event_id: &E::Id, fetch: &F) -> bool -where - F: Fn(E::Id) -> Fut + Sync, - Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Send + Sync, -{ - match fetch(event_id.clone()).await.as_ref() { - | Some(state) => is_power_event(state), - | _ => false, - } -} - -fn is_type_and_key(ev: impl Event, ev_type: &TimelineEventType, state_key: &str) -> bool { - ev.event_type() == ev_type && ev.state_key() == Some(state_key) -} - -fn is_power_event(event: impl Event) -> bool { - match event.event_type() { - | TimelineEventType::RoomPowerLevels - | TimelineEventType::RoomJoinRules - | TimelineEventType::RoomCreate => event.state_key() == Some(""), - | TimelineEventType::RoomMember => { - if let Ok(content) = from_json_str::(event.content().get()) { - if [MembershipState::Leave, MembershipState::Ban].contains(&content.membership) { - return Some(event.sender().as_str()) != event.state_key(); - } - } - - false - }, - | _ => false, - } -} - -/// Convenience trait for adding event type plus state key to state maps. -pub trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey); -} - -impl EventTypeExt for StateEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { - (self, state_key.into()) - } -} - -impl EventTypeExt for TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { - (self.into(), state_key.into()) - } -} - -impl EventTypeExt for &T -where - T: EventTypeExt + Clone, -{ - fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { - self.to_owned().with_state_key(state_key) - } -} - -#[cfg(test)] -mod tests { - use std::collections::{HashMap, HashSet}; - - use maplit::{hashmap, hashset}; - use rand::seq::SliceRandom; - use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, - events::{ - StateEventType, TimelineEventType, - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, - }, - int, uint, - }; - use serde_json::{json, value::to_raw_value as to_raw_json_value}; - - use super::{ - Event, EventTypeExt, StateMap, is_power_event, - room_version::RoomVersion, - test_utils::{ - INITIAL_EVENTS, PduEvent, TestStore, alice, bob, charlie, do_check, ella, event_id, - member_content_ban, member_content_join, room_id, to_init_pdu_event, to_pdu_event, - zara, - }, - }; - use crate::{debug, utils::stream::IterStream}; - - async fn test_event_sort() { - use futures::future::ready; - - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let events = INITIAL_EVENTS(); - - let event_map = events - .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) - .collect::>(); - - let auth_chain: HashSet = HashSet::new(); - - let power_events = event_map - .values() - .filter(|&pdu| is_power_event(&*pdu)) - .map(|pdu| pdu.event_id.clone()) - .collect::>(); - - let fetcher = |id| ready(events.get(&id).cloned()); - let sorted_power_events = - super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) - .await - .unwrap(); - - let resolved_power = super::iterative_auth_check( - &RoomVersion::V6, - sorted_power_events.iter().stream(), - HashMap::new(), // unconflicted events - &fetcher, - ) - .await - .expect("iterative auth check failed on resolved events"); - - // don't remove any events so we know it sorts them all correctly - let mut events_to_sort = events.keys().cloned().collect::>(); - - events_to_sort.shuffle(&mut rand::thread_rng()); - - let power_level = resolved_power - .get(&(StateEventType::RoomPowerLevels, "".into())) - .cloned(); - - let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher) - .await - .unwrap(); - - assert_eq!( - vec![ - "$CREATE:foo", - "$IMA:foo", - "$IPOWER:foo", - "$IJR:foo", - "$IMB:foo", - "$IMC:foo", - "$START:foo", - "$END:foo" - ], - sorted_event_ids - .iter() - .map(|id| id.to_string()) - .collect::>() - ); - } - - #[tokio::test] - async fn test_sort() { - for _ in 0..20 { - // since we shuffle the eventIds before we sort them introducing randomness - // seems like we should test this a few times - test_event_sort().await; - } - } - - #[tokio::test] - async fn ban_vs_power_level() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let events = &[ - to_init_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - to_init_pdu_event( - "MA", - alice(), - TimelineEventType::RoomMember, - Some(alice().to_string().as_str()), - member_content_join(), - ), - to_init_pdu_event( - "MB", - alice(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_ban(), - ), - to_init_pdu_event( - "PB", - bob(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - ]; - - let edges = vec![vec!["END", "MB", "MA", "PA", "START"], vec!["END", "PA", "PB"]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec!["PA", "MA", "MB"] - .into_iter() - .map(event_id) - .collect::>(); - - do_check(events, edges, expected_state_ids).await; - } - - #[tokio::test] - async fn topic_basic() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let events = &[ - to_init_pdu_event( - "T1", - alice(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "PA1", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - to_init_pdu_event( - "T2", - alice(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "PA2", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), - ), - to_init_pdu_event( - "PB", - bob(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - to_init_pdu_event( - "T3", - bob(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - ]; - - let edges = - vec![vec!["END", "PA2", "T2", "PA1", "T1", "START"], vec!["END", "T3", "PB", "PA1"]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec!["PA2", "T2"] - .into_iter() - .map(event_id) - .collect::>(); - - do_check(events, edges, expected_state_ids).await; - } - - #[tokio::test] - async fn topic_reset() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let events = &[ - to_init_pdu_event( - "T1", - alice(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - to_init_pdu_event( - "T2", - bob(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "MB", - alice(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_ban(), - ), - ]; - - let edges = vec![vec!["END", "MB", "T2", "PA", "T1", "START"], vec!["END", "T1"]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec!["T1", "MB", "PA"] - .into_iter() - .map(event_id) - .collect::>(); - - do_check(events, edges, expected_state_ids).await; - } - - #[tokio::test] - async fn join_rule_evasion() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let events = &[ - to_init_pdu_event( - "JR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Private)).unwrap(), - ), - to_init_pdu_event( - "ME", - ella(), - TimelineEventType::RoomMember, - Some(ella().to_string().as_str()), - member_content_join(), - ), - ]; - - let edges = vec![vec!["END", "JR", "START"], vec!["END", "ME", "START"]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec![event_id("JR")]; - - do_check(events, edges, expected_state_ids).await; - } - - #[tokio::test] - async fn offtopic_power_level() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let events = &[ - to_init_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - to_init_pdu_event( - "PB", - bob(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value( - &json!({ "users": { alice(): 100, bob(): 50, charlie(): 50 } }), - ) - .unwrap(), - ), - to_init_pdu_event( - "PC", - charlie(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50, charlie(): 0 } })) - .unwrap(), - ), - ]; - - let edges = vec![vec!["END", "PC", "PB", "PA", "START"], vec!["END", "PA"]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec!["PC"].into_iter().map(event_id).collect::>(); - - do_check(events, edges, expected_state_ids).await; - } - - #[tokio::test] - async fn topic_setting() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let events = &[ - to_init_pdu_event( - "T1", - alice(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "PA1", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - to_init_pdu_event( - "T2", - alice(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "PA2", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), - ), - to_init_pdu_event( - "PB", - bob(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - ), - to_init_pdu_event( - "T3", - bob(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "MZ1", - zara(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - to_init_pdu_event( - "T4", - alice(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - ), - ]; - - let edges = vec![vec!["END", "T4", "MZ1", "PA2", "T2", "PA1", "T1", "START"], vec![ - "END", "MZ1", "T3", "PB", "PA1", - ]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec!["T4", "PA2"] - .into_iter() - .map(event_id) - .collect::>(); - - do_check(events, edges, expected_state_ids).await; - } - - #[tokio::test] - async fn test_event_map_none() { - use futures::future::ready; - - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let mut store = TestStore::(hashmap! {}); - - // build up the DAG - let (state_at_bob, state_at_charlie, expected) = store.set_up(); - - let ev_map = store.0.clone(); - let fetcher = |id| ready(ev_map.get(&id).cloned()); - - let exists = |id: ::Id| ready(ev_map.get(&*id).is_some()); - - let state_sets = [state_at_bob, state_at_charlie]; - let auth_chain: Vec<_> = state_sets - .iter() - .map(|map| { - store - .auth_event_ids(room_id(), map.values().cloned().collect()) - .unwrap() - }) - .collect(); - - let resolved = match super::resolve( - &RoomVersionId::V2, - &state_sets, - &auth_chain, - &fetcher, - &exists, - 1, - ) - .await - { - | Ok(state) => state, - | Err(e) => panic!("{e}"), - }; - - assert_eq!(expected, resolved); - } - - #[tokio::test] - async fn test_lexicographical_sort() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - - let graph = hashmap! { - event_id("l") => hashset![event_id("o")], - event_id("m") => hashset![event_id("n"), event_id("o")], - event_id("n") => hashset![event_id("o")], - event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges - event_id("p") => hashset![event_id("o")], - }; - - let res = super::lexicographical_topological_sort(&graph, &|_id| async { - Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) - }) - .await - .unwrap(); - - assert_eq!( - vec!["o", "l", "n", "m", "p"], - res.iter() - .map(ToString::to_string) - .map(|s| s.replace('$', "").replace(":foo", "")) - .collect::>() - ); - } - - #[tokio::test] - async fn ban_with_auth_chains() { - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let ban = BAN_STATE_SET(); - - let edges = vec![vec!["END", "MB", "PA", "START"], vec!["END", "IME", "MB"]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec!["PA", "MB"] - .into_iter() - .map(event_id) - .collect::>(); - - do_check(&ban.values().cloned().collect::>(), edges, expected_state_ids).await; - } - - #[tokio::test] - async fn ban_with_auth_chains2() { - use futures::future::ready; - - let _ = tracing::subscriber::set_default( - tracing_subscriber::fmt().with_test_writer().finish(), - ); - let init = INITIAL_EVENTS(); - let ban = BAN_STATE_SET(); - - let mut inner = init.clone(); - inner.extend(ban); - let store = TestStore(inner.clone()); - - let state_set_a = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("MB")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) - .collect::>(); - - let state_set_b = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("IME")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) - .collect::>(); - - let ev_map = &store.0; - let state_sets = [state_set_a, state_set_b]; - let auth_chain: Vec<_> = state_sets - .iter() - .map(|map| { - store - .auth_event_ids(room_id(), map.values().cloned().collect()) - .unwrap() - }) - .collect(); - - let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); - let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); - let resolved = match super::resolve( - &RoomVersionId::V6, - &state_sets, - &auth_chain, - &fetcher, - &exists, - 1, - ) - .await - { - | Ok(state) => state, - | Err(e) => panic!("{e}"), - }; - - debug!( - resolved = ?resolved - .iter() - .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) - .collect::>(), - "resolved state", - ); - - let expected = [ - "$CREATE:foo", - "$IJR:foo", - "$PA:foo", - "$IMA:foo", - "$IMB:foo", - "$IMC:foo", - "$MB:foo", - ]; - - for id in expected.iter().map(|i| event_id(i)) { - // make sure our resolved events are equal to the expected list - assert!(resolved.values().any(|eid| eid == &id) || init.contains_key(&id), "{id}"); - } - assert_eq!(expected.len(), resolved.len()); - } - - #[tokio::test] - async fn join_rule_with_auth_chain() { - let join_rule = JOIN_RULE(); - - let edges = vec![vec!["END", "JR", "START"], vec!["END", "IMZ", "START"]] - .into_iter() - .map(|list| list.into_iter().map(event_id).collect::>()) - .collect::>(); - - let expected_state_ids = vec!["JR"].into_iter().map(event_id).collect::>(); - - do_check(&join_rule.values().cloned().collect::>(), edges, expected_state_ids) - .await; - } - - #[allow(non_snake_case)] - fn BAN_STATE_SET() -> HashMap { - vec![ - to_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], // auth_events - &["START"], // prev_events - ), - to_pdu_event( - "PB", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["END"], - ), - to_pdu_event( - "MB", - alice(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_ban(), - &["CREATE", "IMA", "PB"], - &["PA"], - ), - to_pdu_event( - "IME", - ella(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_join(), - &["CREATE", "IJR", "PA"], - &["MB"], - ), - ] - .into_iter() - .map(|ev| (ev.event_id.clone(), ev)) - .collect() - } - - #[allow(non_snake_case)] - fn JOIN_RULE() -> HashMap { - vec![ - to_pdu_event( - "JR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&json!({ "join_rule": "invite" })).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["START"], - ), - to_pdu_event( - "IMZ", - zara(), - TimelineEventType::RoomPowerLevels, - Some(zara().as_str()), - member_content_join(), - &["CREATE", "JR", "IPOWER"], - &["START"], - ), - ] - .into_iter() - .map(|ev| (ev.event_id.clone(), ev)) - .collect() - } - - macro_rules! state_set { - ($($kind:expr_2021 => $key:expr_2021 => $id:expr_2021),* $(,)?) => {{ - #[allow(unused_mut)] - let mut x = StateMap::new(); - $( - x.insert(($kind, $key.into()), $id); - )* - x - }}; - } - - #[test] - fn separate_unique_conflicted() { - let (unconflicted, conflicted) = super::separate( - [ - state_set![StateEventType::RoomMember => "@a:hs1" => 0], - state_set![StateEventType::RoomMember => "@b:hs1" => 1], - state_set![StateEventType::RoomMember => "@c:hs1" => 2], - ] - .iter(), - ); - - assert_eq!(unconflicted, StateMap::new()); - assert_eq!(conflicted, state_set![ - StateEventType::RoomMember => "@a:hs1" => vec![0], - StateEventType::RoomMember => "@b:hs1" => vec![1], - StateEventType::RoomMember => "@c:hs1" => vec![2], - ],); - } - - #[test] - fn separate_conflicted() { - let (unconflicted, mut conflicted) = super::separate( - [ - state_set![StateEventType::RoomMember => "@a:hs1" => 0], - state_set![StateEventType::RoomMember => "@a:hs1" => 1], - state_set![StateEventType::RoomMember => "@a:hs1" => 2], - ] - .iter(), - ); - - // HashMap iteration order is random, so sort this before asserting on it - for v in conflicted.values_mut() { - v.sort_unstable(); - } - - assert_eq!(unconflicted, StateMap::new()); - assert_eq!(conflicted, state_set![ - StateEventType::RoomMember => "@a:hs1" => vec![0, 1, 2], - ],); - } - - #[test] - fn separate_unconflicted() { - let (unconflicted, conflicted) = super::separate( - [ - state_set![StateEventType::RoomMember => "@a:hs1" => 0], - state_set![StateEventType::RoomMember => "@a:hs1" => 0], - state_set![StateEventType::RoomMember => "@a:hs1" => 0], - ] - .iter(), - ); - - assert_eq!(unconflicted, state_set![ - StateEventType::RoomMember => "@a:hs1" => 0, - ],); - assert_eq!(conflicted, StateMap::new()); - } - - #[test] - fn separate_mixed() { - let (unconflicted, conflicted) = super::separate( - [ - state_set![StateEventType::RoomMember => "@a:hs1" => 0], - state_set![ - StateEventType::RoomMember => "@a:hs1" => 0, - StateEventType::RoomMember => "@b:hs1" => 1, - ], - state_set![ - StateEventType::RoomMember => "@a:hs1" => 0, - StateEventType::RoomMember => "@c:hs1" => 2, - ], - ] - .iter(), - ); - - assert_eq!(unconflicted, state_set![ - StateEventType::RoomMember => "@a:hs1" => 0, - ],); - assert_eq!(conflicted, state_set![ - StateEventType::RoomMember => "@b:hs1" => vec![1], - StateEventType::RoomMember => "@c:hs1" => vec![2], - ],); - } -} diff --git a/src/core/matrix/state_res/outcomes.txt b/src/core/matrix/state_res/outcomes.txt deleted file mode 100644 index 0fa1c734..00000000 --- a/src/core/matrix/state_res/outcomes.txt +++ /dev/null @@ -1,104 +0,0 @@ -11/29/2020 BRANCH: timo-spec-comp REV: d2a85669cc6056679ce6ca0fde4658a879ad2b08 -lexicographical topological sort - time: [1.7123 us 1.7157 us 1.7199 us] - change: [-1.7584% -1.5433% -1.3205%] (p = 0.00 < 0.05) - Performance has improved. -Found 8 outliers among 100 measurements (8.00%) - 2 (2.00%) low mild - 5 (5.00%) high mild - 1 (1.00%) high severe - -resolve state of 5 events one fork - time: [10.981 us 10.998 us 11.020 us] -Found 3 outliers among 100 measurements (3.00%) - 3 (3.00%) high mild - -resolve state of 10 events 3 conflicting - time: [26.858 us 26.946 us 27.037 us] - -11/29/2020 BRANCH: event-trait REV: f0eb1310efd49d722979f57f20bd1ac3592b0479 -lexicographical topological sort - time: [1.7686 us 1.7738 us 1.7810 us] - change: [-3.2752% -2.4634% -1.7635%] (p = 0.00 < 0.05) - Performance has improved. -Found 1 outliers among 100 measurements (1.00%) - 1 (1.00%) high severe - -resolve state of 5 events one fork - time: [10.643 us 10.656 us 10.669 us] - change: [-4.9990% -3.8078% -2.8319%] (p = 0.00 < 0.05) - Performance has improved. -Found 1 outliers among 100 measurements (1.00%) - 1 (1.00%) high severe - -resolve state of 10 events 3 conflicting - time: [29.149 us 29.252 us 29.375 us] - change: [-0.8433% -0.3270% +0.2656%] (p = 0.25 > 0.05) - No change in performance detected. -Found 1 outliers among 100 measurements (1.00%) - 1 (1.00%) high mild - -4/26/2020 BRANCH: fix-test-serde REV: -lexicographical topological sort - time: [1.6793 us 1.6823 us 1.6857 us] -Found 9 outliers among 100 measurements (9.00%) - 1 (1.00%) low mild - 4 (4.00%) high mild - 4 (4.00%) high severe - -resolve state of 5 events one fork - time: [9.9993 us 10.062 us 10.159 us] -Found 9 outliers among 100 measurements (9.00%) - 7 (7.00%) high mild - 2 (2.00%) high severe - -resolve state of 10 events 3 conflicting - time: [26.004 us 26.092 us 26.195 us] -Found 16 outliers among 100 measurements (16.00%) - 11 (11.00%) high mild - 5 (5.00%) high severe - -6/30/2021 BRANCH: state-closure REV: 174c3e2a72232ad75b3fb14b3551f5f746f4fe84 -lexicographical topological sort - time: [1.5496 us 1.5536 us 1.5586 us] -Found 9 outliers among 100 measurements (9.00%) - 1 (1.00%) low mild - 1 (1.00%) high mild - 7 (7.00%) high severe - -resolve state of 5 events one fork - time: [10.319 us 10.333 us 10.347 us] -Found 2 outliers among 100 measurements (2.00%) - 2 (2.00%) high severe - -resolve state of 10 events 3 conflicting - time: [25.770 us 25.805 us 25.839 us] -Found 7 outliers among 100 measurements (7.00%) - 5 (5.00%) high mild - 2 (2.00%) high severe - -7/20/2021 BRANCH stateres-result REV: -This marks the switch to HashSet/Map -lexicographical topological sort - time: [1.8122 us 1.8177 us 1.8233 us] - change: [+15.205% +15.919% +16.502%] (p = 0.00 < 0.05) - Performance has regressed. -Found 7 outliers among 100 measurements (7.00%) - 5 (5.00%) high mild - 2 (2.00%) high severe - -resolve state of 5 events one fork - time: [11.966 us 12.010 us 12.059 us] - change: [+16.089% +16.730% +17.469%] (p = 0.00 < 0.05) - Performance has regressed. -Found 7 outliers among 100 measurements (7.00%) - 3 (3.00%) high mild - 4 (4.00%) high severe - -resolve state of 10 events 3 conflicting - time: [29.092 us 29.201 us 29.311 us] - change: [+12.447% +12.847% +13.280%] (p = 0.00 < 0.05) - Performance has regressed. -Found 9 outliers among 100 measurements (9.00%) - 6 (6.00%) high mild - 3 (3.00%) high severe diff --git a/src/core/matrix/state_res/power_levels.rs b/src/core/matrix/state_res/power_levels.rs deleted file mode 100644 index 19ba8fb9..00000000 --- a/src/core/matrix/state_res/power_levels.rs +++ /dev/null @@ -1,256 +0,0 @@ -use std::collections::BTreeMap; - -use ruma::{ - Int, OwnedUserId, UserId, - events::{TimelineEventType, room::power_levels::RoomPowerLevelsEventContent}, - power_levels::{NotificationPowerLevels, default_power_level}, - serde::{ - deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, - vec_deserialize_v1_powerlevel_values, - }, -}; -use serde::Deserialize; -use serde_json::{Error, from_str as from_json_str}; - -use super::{Result, RoomVersion}; -use crate::error; - -#[derive(Deserialize)] -struct IntRoomPowerLevelsEventContent { - #[serde(default = "default_power_level")] - ban: Int, - - #[serde(default)] - events: BTreeMap, - - #[serde(default)] - events_default: Int, - - #[serde(default)] - invite: Int, - - #[serde(default = "default_power_level")] - kick: Int, - - #[serde(default = "default_power_level")] - redact: Int, - - #[serde(default = "default_power_level")] - state_default: Int, - - #[serde(default)] - users: BTreeMap, - - #[serde(default)] - users_default: Int, - - #[serde(default)] - notifications: IntNotificationPowerLevels, -} - -impl From for RoomPowerLevelsEventContent { - fn from(int_pl: IntRoomPowerLevelsEventContent) -> Self { - let IntRoomPowerLevelsEventContent { - ban, - events, - events_default, - invite, - kick, - redact, - state_default, - users, - users_default, - notifications, - } = int_pl; - - let mut pl = Self::new(); - pl.ban = ban; - pl.events = events; - pl.events_default = events_default; - pl.invite = invite; - pl.kick = kick; - pl.redact = redact; - pl.state_default = state_default; - pl.users = users; - pl.users_default = users_default; - pl.notifications = notifications.into(); - - pl - } -} - -#[derive(Deserialize)] -struct IntNotificationPowerLevels { - #[serde(default = "default_power_level")] - room: Int, -} - -impl Default for IntNotificationPowerLevels { - fn default() -> Self { Self { room: default_power_level() } } -} - -impl From for NotificationPowerLevels { - fn from(int_notif: IntNotificationPowerLevels) -> Self { - let mut notif = Self::new(); - notif.room = int_notif.room; - - notif - } -} - -#[inline] -pub(crate) fn deserialize_power_levels( - content: &str, - room_version: &RoomVersion, -) -> Option { - if room_version.integer_power_levels { - deserialize_integer_power_levels(content) - } else { - deserialize_legacy_power_levels(content) - } -} - -fn deserialize_integer_power_levels(content: &str) -> Option { - match from_json_str::(content) { - | Ok(content) => Some(content.into()), - | Err(_) => { - error!("m.room.power_levels event is not valid with integer values"); - None - }, - } -} - -fn deserialize_legacy_power_levels(content: &str) -> Option { - match from_json_str(content) { - | Ok(content) => Some(content), - | Err(_) => { - error!( - "m.room.power_levels event is not valid with integer or string integer values" - ); - None - }, - } -} - -#[derive(Deserialize)] -pub(crate) struct PowerLevelsContentFields { - #[serde(default, deserialize_with = "vec_deserialize_v1_powerlevel_values")] - pub(crate) users: Vec<(OwnedUserId, Int)>, - - #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] - pub(crate) users_default: Int, -} - -impl PowerLevelsContentFields { - pub(crate) fn get_user_power(&self, user_id: &UserId) -> Option<&Int> { - let comparator = |item: &(OwnedUserId, Int)| { - let item: &UserId = &item.0; - item.cmp(user_id) - }; - - self.users - .binary_search_by(comparator) - .ok() - .and_then(|idx| self.users.get(idx).map(|item| &item.1)) - } -} - -#[derive(Deserialize)] -struct IntPowerLevelsContentFields { - #[serde(default, deserialize_with = "vec_deserialize_int_powerlevel_values")] - users: Vec<(OwnedUserId, Int)>, - - #[serde(default)] - users_default: Int, -} - -impl From for PowerLevelsContentFields { - fn from(pl: IntPowerLevelsContentFields) -> Self { - let IntPowerLevelsContentFields { users, users_default } = pl; - Self { users, users_default } - } -} - -#[inline] -pub(crate) fn deserialize_power_levels_content_fields( - content: &str, - room_version: &RoomVersion, -) -> Result { - if room_version.integer_power_levels { - deserialize_integer_power_levels_content_fields(content) - } else { - deserialize_legacy_power_levels_content_fields(content) - } -} - -fn deserialize_integer_power_levels_content_fields( - content: &str, -) -> Result { - from_json_str::(content).map(Into::into) -} - -fn deserialize_legacy_power_levels_content_fields( - content: &str, -) -> Result { - from_json_str(content) -} - -#[derive(Deserialize)] -pub(crate) struct PowerLevelsContentInvite { - #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] - pub(crate) invite: Int, -} - -#[derive(Deserialize)] -struct IntPowerLevelsContentInvite { - #[serde(default)] - invite: Int, -} - -impl From for PowerLevelsContentInvite { - fn from(pl: IntPowerLevelsContentInvite) -> Self { - let IntPowerLevelsContentInvite { invite } = pl; - Self { invite } - } -} - -pub(crate) fn deserialize_power_levels_content_invite( - content: &str, - room_version: &RoomVersion, -) -> Result { - if room_version.integer_power_levels { - from_json_str::(content).map(Into::into) - } else { - from_json_str(content) - } -} - -#[derive(Deserialize)] -pub(crate) struct PowerLevelsContentRedact { - #[serde(default = "default_power_level", deserialize_with = "deserialize_v1_powerlevel")] - pub(crate) redact: Int, -} - -#[derive(Deserialize)] -pub(crate) struct IntPowerLevelsContentRedact { - #[serde(default = "default_power_level")] - redact: Int, -} - -impl From for PowerLevelsContentRedact { - fn from(pl: IntPowerLevelsContentRedact) -> Self { - let IntPowerLevelsContentRedact { redact } = pl; - Self { redact } - } -} - -pub(crate) fn deserialize_power_levels_content_redact( - content: &str, - room_version: &RoomVersion, -) -> Result { - if room_version.integer_power_levels { - from_json_str::(content).map(Into::into) - } else { - from_json_str(content) - } -} diff --git a/src/core/matrix/state_res/room_version.rs b/src/core/matrix/state_res/room_version.rs deleted file mode 100644 index 8dfd6cde..00000000 --- a/src/core/matrix/state_res/room_version.rs +++ /dev/null @@ -1,150 +0,0 @@ -use ruma::RoomVersionId; - -use super::{Error, Result}; - -#[derive(Debug)] -#[allow(clippy::exhaustive_enums)] -pub enum RoomDisposition { - /// A room version that has a stable specification. - Stable, - /// A room version that is not yet fully specified. - Unstable, -} - -#[derive(Debug)] -#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] -pub enum EventFormatVersion { - /// $id:server event id format - V1, - /// MSC1659-style $hash event id format: introduced for room v3 - V2, - /// MSC1884-style $hash format: introduced for room v4 - V3, -} - -#[derive(Debug)] -#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] -pub enum StateResolutionVersion { - /// State resolution for rooms at version 1. - V1, - /// State resolution for room at version 2 or later. - V2, -} - -#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] -#[allow(clippy::struct_excessive_bools)] -pub struct RoomVersion { - /// The stability of this room. - pub disposition: RoomDisposition, - /// The format of the EventId. - pub event_format: EventFormatVersion, - /// Which state resolution algorithm is used. - pub state_res: StateResolutionVersion, - // FIXME: not sure what this one means? - pub enforce_key_validity: bool, - - /// `m.room.aliases` had special auth rules and redaction rules - /// before room version 6. - /// - /// before MSC2261/MSC2432, - pub special_case_aliases_auth: bool, - /// Strictly enforce canonical json, do not allow: - /// * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] - /// * Floats - /// * NaN, Infinity, -Infinity - pub strict_canonicaljson: bool, - /// Verify notifications key while checking m.room.power_levels. - /// - /// bool: MSC2209: Check 'notifications' - pub limit_notifications_power_levels: bool, - /// Extra rules when verifying redaction events. - pub extra_redaction_checks: bool, - /// Allow knocking in event authentication. - /// - /// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/) for more information. - pub allow_knocking: bool, - /// Adds support for the restricted join rule. - /// - /// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289) for more information. - pub restricted_join_rules: bool, - /// Adds support for the knock_restricted join rule. - /// - /// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) for more information. - pub knock_restricted_join_rule: bool, - /// Enforces integer power levels. - /// - /// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667) for more information. - pub integer_power_levels: bool, - /// Determine the room creator using the `m.room.create` event's `sender`, - /// instead of the event content's `creator` field. - /// - /// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175) for more information. - pub use_room_create_sender: bool, -} - -impl RoomVersion { - pub const V1: Self = Self { - disposition: RoomDisposition::Stable, - event_format: EventFormatVersion::V1, - state_res: StateResolutionVersion::V1, - enforce_key_validity: false, - special_case_aliases_auth: true, - strict_canonicaljson: false, - limit_notifications_power_levels: false, - extra_redaction_checks: true, - allow_knocking: false, - restricted_join_rules: false, - knock_restricted_join_rule: false, - integer_power_levels: false, - use_room_create_sender: false, - }; - pub const V10: Self = Self { - knock_restricted_join_rule: true, - integer_power_levels: true, - ..Self::V9 - }; - pub const V11: Self = Self { - use_room_create_sender: true, - ..Self::V10 - }; - pub const V2: Self = Self { - state_res: StateResolutionVersion::V2, - ..Self::V1 - }; - pub const V3: Self = Self { - event_format: EventFormatVersion::V2, - extra_redaction_checks: false, - ..Self::V2 - }; - pub const V4: Self = Self { - event_format: EventFormatVersion::V3, - ..Self::V3 - }; - pub const V5: Self = Self { enforce_key_validity: true, ..Self::V4 }; - pub const V6: Self = Self { - special_case_aliases_auth: false, - strict_canonicaljson: true, - limit_notifications_power_levels: true, - ..Self::V5 - }; - pub const V7: Self = Self { allow_knocking: true, ..Self::V6 }; - pub const V8: Self = Self { restricted_join_rules: true, ..Self::V7 }; - pub const V9: Self = Self::V8; - - pub fn new(version: &RoomVersionId) -> Result { - Ok(match version { - | RoomVersionId::V1 => Self::V1, - | RoomVersionId::V2 => Self::V2, - | RoomVersionId::V3 => Self::V3, - | RoomVersionId::V4 => Self::V4, - | RoomVersionId::V5 => Self::V5, - | RoomVersionId::V6 => Self::V6, - | RoomVersionId::V7 => Self::V7, - | RoomVersionId::V8 => Self::V8, - | RoomVersionId::V9 => Self::V9, - | RoomVersionId::V10 => Self::V10, - | RoomVersionId::V11 => Self::V11, - | ver => return Err(Error::Unsupported(format!("found version `{ver}`"))), - }) - } -} diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs deleted file mode 100644 index a666748a..00000000 --- a/src/core/matrix/state_res/test_utils.rs +++ /dev/null @@ -1,691 +0,0 @@ -use std::{ - borrow::Borrow, - collections::{BTreeMap, HashMap, HashSet}, - sync::atomic::{AtomicU64, Ordering::SeqCst}, -}; - -use futures::future::ready; -use ruma::{ - EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, ServerSignatures, - UserId, event_id, - events::{ - TimelineEventType, - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - }, - int, room_id, uint, user_id, -}; -use serde_json::{ - json, - value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, -}; - -pub(crate) use self::event::PduEvent; -use super::auth_types_for_event; -use crate::{ - Result, info, - matrix::{Event, EventTypeExt, StateMap}, -}; - -static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); - -pub(crate) async fn do_check( - events: &[PduEvent], - edges: Vec>, - expected_state_ids: Vec, -) { - // To activate logging use `RUST_LOG=debug cargo t` - - let init_events = INITIAL_EVENTS(); - - let mut store = TestStore( - init_events - .values() - .chain(events) - .map(|ev| (ev.event_id().to_owned(), ev.clone())) - .collect(), - ); - - // This will be lexi_topo_sorted for resolution - let mut graph = HashMap::new(); - // This is the same as in `resolve` event_id -> OriginalStateEvent - let mut fake_event_map = HashMap::new(); - - // Create the DB of events that led up to this point - // TODO maybe clean up some of these clones it is just tests but... - for ev in init_events.values().chain(events) { - graph.insert(ev.event_id().to_owned(), HashSet::new()); - fake_event_map.insert(ev.event_id().to_owned(), ev.clone()); - } - - for pair in INITIAL_EDGES().windows(2) { - if let [a, b] = &pair { - graph - .entry(a.to_owned()) - .or_insert_with(HashSet::new) - .insert(b.clone()); - } - } - - for edge_list in edges { - for pair in edge_list.windows(2) { - if let [a, b] = &pair { - graph - .entry(a.to_owned()) - .or_insert_with(HashSet::new) - .insert(b.clone()); - } - } - } - - // event_id -> PduEvent - let mut event_map: HashMap = HashMap::new(); - // event_id -> StateMap - let mut state_at_event: HashMap> = HashMap::new(); - - // Resolve the current state and add it to the state_at_event map then continue - // on in "time" - for node in super::lexicographical_topological_sort(&graph, &|_id| async { - Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) - }) - .await - .unwrap() - { - let fake_event = fake_event_map.get(&node).unwrap(); - let event_id = fake_event.event_id().to_owned(); - - let prev_events = graph.get(&node).unwrap(); - - let state_before: StateMap = if prev_events.is_empty() { - HashMap::new() - } else if prev_events.len() == 1 { - state_at_event - .get(prev_events.iter().next().unwrap()) - .unwrap() - .clone() - } else { - let state_sets = prev_events - .iter() - .filter_map(|k| state_at_event.get(k)) - .collect::>(); - - info!( - "{:#?}", - state_sets - .iter() - .map(|map| map - .iter() - .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) - .collect::>()) - .collect::>() - ); - - let auth_chain_sets: Vec<_> = state_sets - .iter() - .map(|map| { - store - .auth_event_ids(room_id(), map.values().cloned().collect()) - .unwrap() - }) - .collect(); - - let event_map = &event_map; - let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); - let exists = |id: ::Id| ready(event_map.get(&id).is_some()); - let resolved = super::resolve( - &RoomVersionId::V6, - state_sets, - &auth_chain_sets, - &fetch, - &exists, - 1, - ) - .await; - - match resolved { - | Ok(state) => state, - | Err(e) => panic!("resolution for {node} failed: {e}"), - } - }; - - let mut state_after = state_before.clone(); - - let ty = fake_event.event_type(); - let key = fake_event.state_key().unwrap(); - state_after.insert(ty.with_state_key(key), event_id.to_owned()); - - let auth_types = auth_types_for_event( - fake_event.event_type(), - fake_event.sender(), - fake_event.state_key(), - fake_event.content(), - ) - .unwrap(); - - let mut auth_events = vec![]; - for key in auth_types { - if state_before.contains_key(&key) { - auth_events.push(state_before[&key].clone()); - } - } - - // TODO The event is just remade, adding the auth_events and prev_events here - // the `to_pdu_event` was split into `init` and the fn below, could be better - let e = fake_event; - let ev_id = e.event_id(); - let event = to_pdu_event( - e.event_id().as_str(), - e.sender(), - e.event_type().clone(), - e.state_key(), - e.content().to_owned(), - &auth_events, - &prev_events.iter().cloned().collect::>(), - ); - - // We have to update our store, an actual user of this lib would - // be giving us state from a DB. - store.0.insert(ev_id.to_owned(), event.clone()); - - state_at_event.insert(node, state_after); - event_map.insert(event_id.to_owned(), store.0.get(ev_id).unwrap().clone()); - } - - let mut expected_state = StateMap::new(); - for node in expected_state_ids { - let ev = event_map.get(&node).unwrap_or_else(|| { - panic!( - "{node} not found in {:?}", - event_map - .keys() - .map(ToString::to_string) - .collect::>() - ) - }); - - let key = ev.event_type().with_state_key(ev.state_key().unwrap()); - - expected_state.insert(key, node); - } - - let start_state = state_at_event.get(event_id!("$START:foo")).unwrap(); - - let end_state = state_at_event - .get(event_id!("$END:foo")) - .unwrap() - .iter() - .filter(|(k, v)| { - expected_state.contains_key(k) - || start_state.get(k) != Some(*v) - // Filter out the dummy messages events. - // These act as points in time where there should be a known state to - // test against. - && **k != ("m.room.message".into(), "dummy".into()) - }) - .map(|(k, v)| (k.clone(), v.clone())) - .collect::>(); - - assert_eq!(expected_state, end_state); -} - -#[allow(clippy::exhaustive_structs)] -pub(crate) struct TestStore(pub(crate) HashMap); - -impl TestStore { - pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result { - self.0 - .get(event_id) - .cloned() - .ok_or_else(|| super::Error::NotFound(format!("{event_id} not found"))) - .map_err(Into::into) - } - - /// Returns a Vec of the related auth events to the given `event`. - pub(crate) fn auth_event_ids( - &self, - room_id: &RoomId, - event_ids: Vec, - ) -> Result> { - let mut result = HashSet::new(); - let mut stack = event_ids; - - // DFS for auth event chain - while let Some(ev_id) = stack.pop() { - if result.contains(&ev_id) { - continue; - } - - result.insert(ev_id.clone()); - - let event = self.get_event(room_id, ev_id.borrow())?; - - stack.extend(event.auth_events().map(ToOwned::to_owned)); - } - - Ok(result) - } -} - -// A StateStore implementation for testing -#[allow(clippy::type_complexity)] -impl TestStore { - pub(crate) fn set_up( - &mut self, - ) -> (StateMap, StateMap, StateMap) { - let create_event = to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ); - let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), create_event.clone()); - - let alice_mem = to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_join(), - &[cre.clone()], - &[cre.clone()], - ); - self.0 - .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); - - let join_rules = to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &[cre.clone(), alice_mem.event_id().to_owned()], - &[alice_mem.event_id().to_owned()], - ); - self.0 - .insert(join_rules.event_id().to_owned(), join_rules.clone()); - - // Bob and Charlie join at the same time, so there is a fork - // this will be represented in the state_sets when we resolve - let bob_mem = to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().as_str()), - member_content_join(), - &[cre.clone(), join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0 - .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); - - let charlie_mem = to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().as_str()), - member_content_join(), - &[cre, join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0 - .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); - - let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - (state_at_bob, state_at_charlie, expected) - } -} - -pub(crate) fn event_id(id: &str) -> OwnedEventId { - if id.contains('$') { - return id.try_into().unwrap(); - } - - format!("${id}:foo").try_into().unwrap() -} - -pub(crate) fn alice() -> &'static UserId { user_id!("@alice:foo") } - -pub(crate) fn bob() -> &'static UserId { user_id!("@bob:foo") } - -pub(crate) fn charlie() -> &'static UserId { user_id!("@charlie:foo") } - -pub(crate) fn ella() -> &'static UserId { user_id!("@ella:foo") } - -pub(crate) fn zara() -> &'static UserId { user_id!("@zara:foo") } - -pub(crate) fn room_id() -> &'static RoomId { room_id!("!test:foo") } - -pub(crate) fn member_content_ban() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() -} - -pub(crate) fn member_content_join() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() -} - -pub(crate) fn to_init_pdu_event( - id: &str, - sender: &UserId, - ev_type: TimelineEventType, - state_key: Option<&str>, - content: Box, -) -> PduEvent { - let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); - let id = if id.contains('$') { - id.to_owned() - } else { - format!("${id}:foo") - }; - - let state_key = state_key.map(ToOwned::to_owned); - PduEvent { - event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: BTreeMap::new(), - auth_events: vec![], - prev_events: vec![], - depth: uint!(0), - hashes: EventHash::new("".to_owned()), - signatures: ServerSignatures::default(), - }), - } -} - -pub(crate) fn to_pdu_event( - id: &str, - sender: &UserId, - ev_type: TimelineEventType, - state_key: Option<&str>, - content: Box, - auth_events: &[S], - prev_events: &[S], -) -> PduEvent -where - S: AsRef, -{ - let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); - let id = if id.contains('$') { - id.to_owned() - } else { - format!("${id}:foo") - }; - let auth_events = auth_events - .iter() - .map(AsRef::as_ref) - .map(event_id) - .collect::>(); - let prev_events = prev_events - .iter() - .map(AsRef::as_ref) - .map(event_id) - .collect::>(); - - let state_key = state_key.map(ToOwned::to_owned); - PduEvent { - event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: BTreeMap::new(), - auth_events, - prev_events, - depth: uint!(0), - hashes: EventHash::new("".to_owned()), - signatures: ServerSignatures::default(), - }), - } -} - -// all graphs start with these input events -#[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS() -> HashMap { - vec![ - to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ), - to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_join(), - &["CREATE"], - &["CREATE"], - ), - to_pdu_event( - "IPOWER", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), - &["CREATE", "IMA"], - &["IMA"], - ), - to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["IPOWER"], - ), - to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IJR"], - ), - to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IMB"], - ), - to_pdu_event::<&EventId>( - "START", - charlie(), - TimelineEventType::RoomMessage, - Some("dummy"), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - to_pdu_event::<&EventId>( - "END", - charlie(), - TimelineEventType::RoomMessage, - Some("dummy"), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -// all graphs start with these input events -#[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap { - vec![to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - )] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -#[allow(non_snake_case)] -pub(crate) fn INITIAL_EDGES() -> Vec { - vec!["START", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] - .into_iter() - .map(event_id) - .collect::>() -} - -pub(crate) mod event { - use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, - events::{TimelineEventType, pdu::Pdu}, - }; - use serde::{Deserialize, Serialize}; - use serde_json::value::RawValue as RawJsonValue; - - use crate::Event; - - impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.room_id, - | Pdu::RoomV3Pdu(ev) => &ev.room_id, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn sender(&self) -> &UserId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.sender, - | Pdu::RoomV3Pdu(ev) => &ev.sender, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn event_type(&self) -> &TimelineEventType { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.kind, - | Pdu::RoomV3Pdu(ev) => &ev.kind, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn content(&self) -> &RawJsonValue { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.content, - | Pdu::RoomV3Pdu(ev) => &ev.content, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, - | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn state_key(&self) -> Option<&str> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), - | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - #[allow(refining_impl_trait)] - fn prev_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - #[allow(refining_impl_trait)] - fn auth_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn redacts(&self) -> Option<&Self::Id> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - #[allow(clippy::exhaustive_structs)] - pub(crate) struct PduEvent { - pub(crate) event_id: OwnedEventId, - #[serde(flatten)] - pub(crate) rest: Pdu, - } -} diff --git a/src/core/metrics/mod.rs b/src/core/metrics/mod.rs deleted file mode 100644 index 8f7a5571..00000000 --- a/src/core/metrics/mod.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::sync::atomic::AtomicU32; - -use tokio::runtime; -use tokio_metrics::TaskMonitor; -#[cfg(tokio_unstable)] -use tokio_metrics::{RuntimeIntervals, RuntimeMonitor}; - -pub struct Metrics { - _runtime: Option, - - runtime_metrics: Option, - - task_monitor: Option, - - #[cfg(tokio_unstable)] - _runtime_monitor: Option, - - #[cfg(tokio_unstable)] - runtime_intervals: std::sync::Mutex>, - - // TODO: move stats - pub requests_handle_active: AtomicU32, - pub requests_handle_finished: AtomicU32, - pub requests_panic: AtomicU32, -} - -impl Metrics { - #[must_use] - pub fn new(runtime: Option) -> Self { - #[cfg(tokio_unstable)] - let runtime_monitor = runtime.as_ref().map(RuntimeMonitor::new); - - #[cfg(tokio_unstable)] - let runtime_intervals = runtime_monitor.as_ref().map(RuntimeMonitor::intervals); - - Self { - _runtime: runtime.clone(), - - runtime_metrics: runtime.as_ref().map(runtime::Handle::metrics), - - task_monitor: runtime.map(|_| TaskMonitor::new()), - - #[cfg(tokio_unstable)] - _runtime_monitor: runtime_monitor, - - #[cfg(tokio_unstable)] - runtime_intervals: std::sync::Mutex::new(runtime_intervals), - - requests_handle_active: AtomicU32::new(0), - requests_handle_finished: AtomicU32::new(0), - requests_panic: AtomicU32::new(0), - } - } - - #[cfg(tokio_unstable)] - pub fn runtime_interval(&self) -> Option { - self.runtime_intervals - .lock() - .expect("locked") - .as_mut() - .map(Iterator::next) - .expect("next interval") - } - - #[inline] - pub fn task_root(&self) -> Option<&TaskMonitor> { self.task_monitor.as_ref() } - - #[inline] - pub fn num_workers(&self) -> usize { - self.runtime_metrics() - .map_or(0, runtime::RuntimeMetrics::num_workers) - } - - #[inline] - pub fn runtime_metrics(&self) -> Option<&runtime::RuntimeMetrics> { - self.runtime_metrics.as_ref() - } -} diff --git a/src/core/mod.rs b/src/core/mod.rs deleted file mode 100644 index b91cdf0b..00000000 --- a/src/core/mod.rs +++ /dev/null @@ -1,43 +0,0 @@ -#![type_length_limit = "12288"] - -pub mod alloc; -pub mod config; -pub mod debug; -pub mod error; -pub mod info; -pub mod log; -pub mod matrix; -pub mod metrics; -pub mod mods; -pub mod server; -pub mod utils; - -pub use ::arrayvec; -pub use ::http; -pub use ::ruma; -pub use ::smallstr; -pub use ::smallvec; -pub use ::toml; -pub use ::tracing; -pub use config::Config; -pub use error::Error; -pub use info::{rustc_flags_capture, version, version::version}; -pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res}; -pub use server::Server; -pub use utils::{ctor, dtor, implement, result, result::Result}; - -pub use crate as conduwuit_core; - -rustc_flags_capture! {} - -#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))] -pub mod mods { - #[macro_export] - macro_rules! mod_ctor { - () => {}; - } - #[macro_export] - macro_rules! mod_dtor { - () => {}; - } -} diff --git a/src/core/mods/canary.rs b/src/core/mods/canary.rs deleted file mode 100644 index 6095608c..00000000 --- a/src/core/mods/canary.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::sync::atomic::{AtomicI32, Ordering}; - -const ORDERING: Ordering = Ordering::Relaxed; -static STATIC_DTORS: AtomicI32 = AtomicI32::new(0); - -/// Called by Module::unload() to indicate module is about to be unloaded and -/// static destruction is intended. This will allow verifying it actually took -/// place. -pub(crate) fn prepare() { - let count = STATIC_DTORS.fetch_sub(1, ORDERING); - debug_assert!(count <= 0, "STATIC_DTORS should not be greater than zero."); -} - -/// Called by static destructor of a module. This call should only be found -/// inside a mod_fini! macro. Do not call from anywhere else. -#[inline(always)] -pub fn report() { let _count = STATIC_DTORS.fetch_add(1, ORDERING); } - -/// Called by Module::unload() (see check()) with action in case a check() -/// failed. This can allow a stuck module to be noted while allowing for other -/// independent modules to be diagnosed. -pub(crate) fn check_and_reset() -> bool { STATIC_DTORS.swap(0, ORDERING) == 0 } - -/// Called by Module::unload() after unload to verify static destruction took -/// place. A call to prepare() must be made prior to Module::unload() and making -/// this call. -#[allow(dead_code)] -pub(crate) fn check() -> bool { STATIC_DTORS.load(ORDERING) == 0 } diff --git a/src/core/mods/macros.rs b/src/core/mods/macros.rs deleted file mode 100644 index 1f3b7f5f..00000000 --- a/src/core/mods/macros.rs +++ /dev/null @@ -1,44 +0,0 @@ -#[macro_export] -macro_rules! mod_ctor { - ( $($body:block)? ) => { - $crate::mod_init! {{ - $crate::debug_info!("Module loaded"); - $($body)? - }} - } -} - -#[macro_export] -macro_rules! mod_dtor { - ( $($body:block)? ) => { - $crate::mod_fini! {{ - $crate::debug_info!("Module unloading"); - $($body)? - $crate::mods::canary::report(); - }} - } -} - -#[macro_export] -macro_rules! mod_init { - ($body:block) => { - #[used] - #[cfg_attr(target_family = "unix", unsafe(link_section = ".init_array"))] - static MOD_INIT: unsafe extern "C" fn() = { _mod_init }; - - #[cfg_attr(target_family = "unix", unsafe(link_section = ".text.startup"))] - unsafe extern "C" fn _mod_init() -> () $body - }; -} - -#[macro_export] -macro_rules! mod_fini { - ($body:block) => { - #[used] - #[cfg_attr(target_family = "unix", unsafe(link_section = ".fini_array"))] - static MOD_FINI: unsafe extern "C" fn() = { _mod_fini }; - - #[cfg_attr(target_family = "unix", unsafe(link_section = ".text.startup"))] - unsafe extern "C" fn _mod_fini() -> () $body - }; -} diff --git a/src/core/mods/mod.rs b/src/core/mods/mod.rs deleted file mode 100644 index b8f06f29..00000000 --- a/src/core/mods/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -#![cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] - -pub(crate) use libloading::os::unix::{Library, Symbol}; - -pub mod canary; -pub mod macros; -pub mod module; -pub mod new; -pub mod path; - -pub use module::Module; diff --git a/src/core/mods/module.rs b/src/core/mods/module.rs deleted file mode 100644 index b65bbca2..00000000 --- a/src/core/mods/module.rs +++ /dev/null @@ -1,74 +0,0 @@ -use std::{ - ffi::{CString, OsString}, - time::SystemTime, -}; - -use super::{Library, Symbol, canary, new, path}; -use crate::{Result, error}; - -pub struct Module { - handle: Option, - loaded: SystemTime, - path: OsString, -} - -impl Module { - pub fn from_name(name: &str) -> Result { Self::from_path(path::from_name(name)?) } - - pub fn from_path(path: OsString) -> Result { - Ok(Self { - handle: Some(new::from_path(&path)?), - loaded: SystemTime::now(), - path, - }) - } - - pub fn unload(&mut self) { - canary::prepare(); - self.close(); - if !canary::check_and_reset() { - let name = self.name().expect("Module is named"); - error!("Module {name:?} is stuck and failed to unload."); - } - } - - pub(crate) fn close(&mut self) { - if let Some(handle) = self.handle.take() { - handle.close().expect("Module handle closed"); - } - } - - pub fn get(&self, name: &str) -> Result> { - let cname = CString::new(name.to_owned()).expect("terminated string from provided name"); - let handle = self - .handle - .as_ref() - .expect("backing library loaded by this instance"); - // SAFETY: Calls dlsym(3) on unix platforms. This might not have to be unsafe - // if wrapped in libloading with_dlerror(). - let sym = unsafe { handle.get::(cname.as_bytes()) }; - let sym = sym.expect("symbol found; binding successful"); - - Ok(sym) - } - - pub fn deleted(&self) -> Result { - let mtime = path::mtime(self.path())?; - let res = mtime > self.loaded; - - Ok(res) - } - - pub fn name(&self) -> Result { path::to_name(self.path()) } - - #[must_use] - pub fn path(&self) -> &OsString { &self.path } -} - -impl Drop for Module { - fn drop(&mut self) { - if self.handle.is_some() { - self.unload(); - } - } -} diff --git a/src/core/mods/new.rs b/src/core/mods/new.rs deleted file mode 100644 index 258fdedc..00000000 --- a/src/core/mods/new.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::ffi::OsStr; - -use super::{Library, path}; -use crate::{Err, Result}; - -const OPEN_FLAGS: i32 = libloading::os::unix::RTLD_LAZY | libloading::os::unix::RTLD_GLOBAL; - -pub fn from_name(name: &str) -> Result { - let path = path::from_name(name)?; - from_path(&path) -} - -pub fn from_path(path: &OsStr) -> Result { - //SAFETY: Calls dlopen(3) on unix platforms. This might not have to be unsafe - // if wrapped in with_dlerror. - let lib = unsafe { Library::open(Some(path), OPEN_FLAGS) }; - if let Err(e) = lib { - let name = path::to_name(path)?; - return Err!("Loading module {name:?} failed: {e}"); - } - - Ok(lib.expect("module loaded")) -} diff --git a/src/core/mods/path.rs b/src/core/mods/path.rs deleted file mode 100644 index cde251b3..00000000 --- a/src/core/mods/path.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::{ - env::current_exe, - ffi::{OsStr, OsString}, - path::{Path, PathBuf}, - time::SystemTime, -}; - -use libloading::library_filename; - -use crate::Result; - -pub fn from_name(name: &str) -> Result { - let root = PathBuf::new(); - let exe_path = current_exe()?; - let exe_dir = exe_path.parent().unwrap_or(&root); - let mut mod_path = exe_dir.to_path_buf(); - let mod_file = library_filename(name); - mod_path.push(mod_file); - - Ok(mod_path.into_os_string()) -} - -pub fn to_name(path: &OsStr) -> Result { - let path = Path::new(path); - let name = path - .file_stem() - .expect("path file stem") - .to_str() - .expect("name string"); - let name = name.strip_prefix("lib").unwrap_or(name).to_owned(); - - Ok(name) -} - -pub fn mtime(path: &OsStr) -> Result { - let meta = std::fs::metadata(path)?; - let mtime = meta.modified()?; - - Ok(mtime) -} diff --git a/src/core/server.rs b/src/core/server.rs deleted file mode 100644 index 4b673f32..00000000 --- a/src/core/server.rs +++ /dev/null @@ -1,152 +0,0 @@ -use std::{ - sync::{ - Arc, - atomic::{AtomicBool, Ordering}, - }, - time::SystemTime, -}; - -use ruma::OwnedServerName; -use tokio::{runtime, sync::broadcast}; - -use crate::{Err, Result, config, config::Config, log::Log, metrics::Metrics}; - -/// Server runtime state; public portion -pub struct Server { - /// Configured name of server. This is the same as the one in the config - /// but developers can (and should) reference this string instead. - pub name: OwnedServerName, - - /// Server-wide configuration instance - pub config: config::Manager, - - /// Timestamp server was started; used for uptime. - pub started: SystemTime, - - /// Reload/shutdown pending indicator; server is shutting down. This is an - /// observable used on shutdown and should not be modified. - pub stopping: AtomicBool, - - /// Reload/shutdown desired indicator; when false, shutdown is desired. This - /// is an observable used on shutdown and modifying is not recommended. - pub reloading: AtomicBool, - - /// Restart desired; when true, restart it desired after shutdown. - pub restarting: AtomicBool, - - /// Handle to the runtime - pub runtime: Option, - - /// Reload/shutdown signal - pub signal: broadcast::Sender<&'static str>, - - /// Logging subsystem state - pub log: Log, - - /// Metrics subsystem state - pub metrics: Metrics, -} - -impl Server { - #[must_use] - pub fn new(config: Config, runtime: Option, log: Log) -> Self { - Self { - name: config.server_name.clone(), - config: config::Manager::new(config), - started: SystemTime::now(), - stopping: AtomicBool::new(false), - reloading: AtomicBool::new(false), - restarting: AtomicBool::new(false), - runtime: runtime.clone(), - signal: broadcast::channel::<&'static str>(1).0, - log, - metrics: Metrics::new(runtime), - } - } - - pub fn reload(&self) -> Result<()> { - if cfg!(any(not(conduwuit_mods), not(feature = "conduwuit_mods"))) { - return Err!("Reloading not enabled"); - } - - if self.reloading.swap(true, Ordering::AcqRel) { - return Err!("Reloading already in progress"); - } - - if self.stopping.swap(true, Ordering::AcqRel) { - return Err!("Shutdown already in progress"); - } - - self.signal("SIGINT").inspect_err(|_| { - self.stopping.store(false, Ordering::Release); - self.reloading.store(false, Ordering::Release); - }) - } - - pub fn restart(&self) -> Result { - if self.restarting.swap(true, Ordering::AcqRel) { - return Err!("Restart already in progress"); - } - - self.shutdown().inspect_err(|_| { - self.restarting.store(false, Ordering::Release); - }) - } - - pub fn shutdown(&self) -> Result { - if self.stopping.swap(true, Ordering::AcqRel) { - return Err!("Shutdown already in progress"); - } - - self.signal("SIGTERM").inspect_err(|_| { - self.stopping.store(false, Ordering::Release); - }) - } - - pub fn signal(&self, sig: &'static str) -> Result<()> { - if let Err(e) = self.signal.send(sig) { - return Err!("Failed to send signal: {e}"); - } - - Ok(()) - } - - #[inline] - pub async fn until_shutdown(self: &Arc) { - while self.running() { - self.signal.subscribe().recv().await.ok(); - } - } - - #[inline] - pub fn runtime(&self) -> &runtime::Handle { - self.runtime - .as_ref() - .expect("runtime handle available in Server") - } - - #[inline] - pub fn check_running(&self) -> Result { - use std::{io, io::ErrorKind::Interrupted}; - - self.running() - .then_some(()) - .ok_or_else(|| io::Error::new(Interrupted, "Server shutting down")) - .map_err(Into::into) - } - - #[inline] - pub fn running(&self) -> bool { !self.is_stopping() } - - #[inline] - pub fn is_stopping(&self) -> bool { self.stopping.load(Ordering::Relaxed) } - - #[inline] - pub fn is_reloading(&self) -> bool { self.reloading.load(Ordering::Relaxed) } - - #[inline] - pub fn is_restarting(&self) -> bool { self.restarting.load(Ordering::Relaxed) } - - #[inline] - pub fn is_ours(&self, name: &str) -> bool { name == self.config.server_name } -} diff --git a/src/core/utils/arrayvec.rs b/src/core/utils/arrayvec.rs deleted file mode 100644 index 685aaf18..00000000 --- a/src/core/utils/arrayvec.rs +++ /dev/null @@ -1,15 +0,0 @@ -use ::arrayvec::ArrayVec; - -pub trait ArrayVecExt { - fn extend_from_slice(&mut self, other: &[T]) -> &mut Self; -} - -impl ArrayVecExt for ArrayVec { - #[inline] - fn extend_from_slice(&mut self, other: &[T]) -> &mut Self { - self.try_extend_from_slice(other) - .expect("Insufficient buffer capacity to extend from slice"); - - self - } -} diff --git a/src/core/utils/bool.rs b/src/core/utils/bool.rs deleted file mode 100644 index b16f63e9..00000000 --- a/src/core/utils/bool.rs +++ /dev/null @@ -1,104 +0,0 @@ -//! Trait BoolExt - -/// Boolean extensions and chain.starters -pub trait BoolExt { - fn and(self, t: Option) -> Option; - - fn and_then Option>(self, f: F) -> Option; - - #[must_use] - fn clone_or(self, err: T, t: &T) -> T; - - #[must_use] - fn copy_or(self, err: T, t: T) -> T; - - #[must_use] - fn expect(self, msg: &str) -> Self; - - #[must_use] - fn expect_false(self, msg: &str) -> Self; - - fn into_option(self) -> Option<()>; - - #[allow(clippy::result_unit_err)] - fn into_result(self) -> Result<(), ()>; - - fn map T>(self, f: F) -> T - where - Self: Sized; - - fn map_ok_or T>(self, err: E, f: F) -> Result; - - fn map_or T>(self, err: T, f: F) -> T; - - fn map_or_else T>(self, err: F, f: F) -> T; - - fn ok_or(self, err: E) -> Result<(), E>; - - fn ok_or_else E>(self, err: F) -> Result<(), E>; - - fn or T>(self, f: F) -> Option; - - fn or_some(self, t: T) -> Option; -} - -impl BoolExt for bool { - #[inline] - fn and(self, t: Option) -> Option { self.then_some(t).flatten() } - - #[inline] - fn and_then Option>(self, f: F) -> Option { self.then(f).flatten() } - - #[inline] - fn clone_or(self, err: T, t: &T) -> T { self.map_or(err, || t.clone()) } - - #[inline] - fn copy_or(self, err: T, t: T) -> T { self.map_or(err, || t) } - - #[inline] - fn expect(self, msg: &str) -> Self { self.then_some(true).expect(msg) } - - #[inline] - fn expect_false(self, msg: &str) -> Self { (!self).then_some(false).expect(msg) } - - #[inline] - fn into_option(self) -> Option<()> { self.then_some(()) } - - #[inline] - fn into_result(self) -> Result<(), ()> { self.ok_or(()) } - - #[inline] - fn map T>(self, f: F) -> T - where - Self: Sized, - { - f(self) - } - - #[inline] - fn map_ok_or T>(self, err: E, f: F) -> Result { - self.ok_or(err).map(|()| f()) - } - - #[inline] - fn map_or T>(self, err: T, f: F) -> T { self.then(f).unwrap_or(err) } - - #[inline] - fn map_or_else T>(self, err: F, f: F) -> T { - self.then(f).unwrap_or_else(err) - } - - #[inline] - fn ok_or(self, err: E) -> Result<(), E> { self.into_option().ok_or(err) } - - #[inline] - fn ok_or_else E>(self, err: F) -> Result<(), E> { - self.into_option().ok_or_else(err) - } - - #[inline] - fn or T>(self, f: F) -> Option { (!self).then(f) } - - #[inline] - fn or_some(self, t: T) -> Option { (!self).then_some(t) } -} diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs deleted file mode 100644 index 507b9b9a..00000000 --- a/src/core/utils/bytes.rs +++ /dev/null @@ -1,58 +0,0 @@ -use bytesize::ByteSize; - -use crate::{Result, err}; - -/// Parse a human-writable size string w/ si-unit suffix into integer -#[inline] -pub fn from_str(str: &str) -> Result { - let bytes: ByteSize = str - .parse() - .map_err(|e| err!(Arithmetic("Failed to parse byte size: {e}")))?; - - let bytes: usize = bytes - .as_u64() - .try_into() - .map_err(|e| err!(Arithmetic("Failed to convert u64 to usize: {e}")))?; - - Ok(bytes) -} - -/// Output a human-readable size string w/ iec-unit suffix -#[inline] -#[must_use] -pub fn pretty(bytes: usize) -> String { - let bytes: u64 = bytes.try_into().expect("failed to convert usize to u64"); - - ByteSize::b(bytes).display().iec().to_string() -} - -#[inline] -#[must_use] -pub fn increment(old: Option<&[u8]>) -> [u8; 8] { - old.map_or(0_u64, u64_from_bytes_or_zero) - .wrapping_add(1) - .to_be_bytes() -} - -/// Parses 8 big-endian bytes into an u64; panic on invalid argument -#[inline] -#[must_use] -pub fn u64_from_u8(bytes: &[u8]) -> u64 { - u64_from_bytes(bytes).expect("must slice at least 8 bytes") -} - -/// Parses the big-endian bytes into an u64. -#[inline] -#[must_use] -pub fn u64_from_bytes_or_zero(bytes: &[u8]) -> u64 { u64_from_bytes(bytes).unwrap_or(0) } - -/// Parses the big-endian bytes into an u64. -#[inline] -pub fn u64_from_bytes(bytes: &[u8]) -> Result { Ok(u64_from_u8x8(*u8x8_from_bytes(bytes)?)) } - -#[inline] -#[must_use] -pub fn u64_from_u8x8(bytes: [u8; 8]) -> u64 { u64::from_be_bytes(bytes) } - -#[inline] -pub fn u8x8_from_bytes(bytes: &[u8]) -> Result<&[u8; 8]> { Ok(bytes.try_into()?) } diff --git a/src/core/utils/content_disposition.rs b/src/core/utils/content_disposition.rs deleted file mode 100644 index 82f11732..00000000 --- a/src/core/utils/content_disposition.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::borrow::Cow; - -use ruma::http_headers::{ContentDisposition, ContentDispositionType}; - -use crate::debug_info; - -/// as defined by MSC2702 -const ALLOWED_INLINE_CONTENT_TYPES: [&str; 26] = [ - // keep sorted - "application/json", - "application/ld+json", - "audio/aac", - "audio/flac", - "audio/mp4", - "audio/mpeg", - "audio/ogg", - "audio/wav", - "audio/wave", - "audio/webm", - "audio/x-flac", - "audio/x-pn-wav", - "audio/x-wav", - "image/apng", - "image/avif", - "image/gif", - "image/jpeg", - "image/png", - "image/webp", - "text/css", - "text/csv", - "text/plain", - "video/mp4", - "video/ogg", - "video/quicktime", - "video/webm", -]; - -/// Returns a Content-Disposition of `attachment` or `inline`, depending on the -/// Content-Type against MSC2702 list of safe inline Content-Types -/// (`ALLOWED_INLINE_CONTENT_TYPES`) -#[must_use] -pub fn content_disposition_type(content_type: Option<&str>) -> ContentDispositionType { - let Some(content_type) = content_type else { - debug_info!("No Content-Type was given, assuming attachment for Content-Disposition"); - return ContentDispositionType::Attachment; - }; - - debug_assert!( - ALLOWED_INLINE_CONTENT_TYPES.is_sorted(), - "ALLOWED_INLINE_CONTENT_TYPES is not sorted" - ); - - let content_type: Cow<'_, str> = content_type - .split(';') - .next() - .unwrap_or(content_type) - .to_ascii_lowercase() - .into(); - - if ALLOWED_INLINE_CONTENT_TYPES - .binary_search(&content_type.as_ref()) - .is_ok() - { - ContentDispositionType::Inline - } else { - ContentDispositionType::Attachment - } -} - -/// sanitises the file name for the Content-Disposition using -/// `sanitize_filename` crate -#[tracing::instrument(level = "debug")] -pub fn sanitise_filename(filename: &str) -> String { - sanitize_filename::sanitize_with_options(filename, sanitize_filename::Options { - truncate: false, - ..Default::default() - }) -} - -/// creates the final Content-Disposition based on whether the filename exists -/// or not, or if a requested filename was specified (media download with -/// filename) -/// -/// if filename exists: -/// `Content-Disposition: attachment/inline; filename=filename.ext` -/// -/// else: `Content-Disposition: attachment/inline` -pub fn make_content_disposition( - content_disposition: Option<&ContentDisposition>, - content_type: Option<&str>, - filename: Option<&str>, -) -> ContentDisposition { - ContentDisposition::new(content_disposition_type(content_type)).with_filename( - filename - .or_else(|| { - content_disposition - .and_then(|content_disposition| content_disposition.filename.as_deref()) - }) - .map(sanitise_filename), - ) -} - -#[cfg(test)] -mod tests { - #[test] - fn string_sanitisation() { - const SAMPLE: &str = "🏳️‍⚧️this\\r\\n įs \r\\n ä \\r\nstrïng 🥴that\n\r \ - ../../../../../../../may be\r\n malicious🏳️‍⚧️"; - const SANITISED: &str = "🏳️‍⚧️thisrn įs n ä rstrïng 🥴that ..............may be malicious🏳️‍⚧️"; - - let options = sanitize_filename::Options { - windows: true, - truncate: true, - replacement: "", - }; - - // cargo test -- --nocapture - println!("{SAMPLE}"); - println!("{}", sanitize_filename::sanitize_with_options(SAMPLE, options.clone())); - println!("{SAMPLE:?}"); - println!("{:?}", sanitize_filename::sanitize_with_options(SAMPLE, options.clone())); - - assert_eq!(SANITISED, sanitize_filename::sanitize_with_options(SAMPLE, options.clone())); - } - - #[test] - fn empty_sanitisation() { - use crate::utils::string::EMPTY; - - let result = - sanitize_filename::sanitize_with_options(EMPTY, sanitize_filename::Options { - windows: true, - truncate: true, - replacement: "", - }); - - assert_eq!(EMPTY, result); - } -} diff --git a/src/core/utils/debug.rs b/src/core/utils/debug.rs deleted file mode 100644 index b16ae754..00000000 --- a/src/core/utils/debug.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::fmt; - -/// Debug-formats the given slice, but only up to the first `max_len` elements. -/// Any further elements are replaced by an ellipsis. -/// -/// See also [`slice_truncated()`], -pub struct TruncatedSlice<'a, T> { - inner: &'a [T], - max_len: usize, -} - -impl fmt::Debug for TruncatedSlice<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.inner.len() <= self.max_len { - write!(f, "{:?}", self.inner) - } else { - f.debug_list() - .entries(&self.inner[..self.max_len]) - .entry(&"...") - .finish() - } - } -} - -/// See [`TruncatedSlice`]. Useful for `#[instrument]`: -/// -/// ``` -/// use conduwuit_core::utils::debug::slice_truncated; -/// -/// #[tracing::instrument(fields(foos = slice_truncated(foos, 42)))] -/// fn bar(foos: &[&str]); -/// ``` -pub fn slice_truncated( - slice: &[T], - max_len: usize, -) -> tracing::field::DebugValue> { - tracing::field::debug(TruncatedSlice { inner: slice, max_len }) -} diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs deleted file mode 100644 index 4887d164..00000000 --- a/src/core/utils/defer.rs +++ /dev/null @@ -1,26 +0,0 @@ -#[macro_export] -macro_rules! defer { - ($body:block) => { - struct _Defer_ { - closure: F, - } - - impl Drop for _Defer_ { - fn drop(&mut self) { (self.closure)(); } - } - - let _defer_ = _Defer_ { closure: || $body }; - }; - - ($body:expr_2021) => { - $crate::defer! {{ $body }} - }; -} - -#[macro_export] -macro_rules! scope_restore { - ($val:ident, $ours:expr_2021) => { - let theirs = $crate::utils::exchange($val, $ours); - $crate::defer! {{ *$val = theirs; }}; - }; -} diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs deleted file mode 100644 index 24f239ff..00000000 --- a/src/core/utils/future/bool_ext.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! Extended external extensions to futures::FutureExt - -use std::marker::Unpin; - -use futures::{ - Future, FutureExt, - future::{select_ok, try_join, try_join_all, try_select}, -}; - -pub trait BoolExt -where - Self: Future + Send, -{ - fn and(self, b: B) -> impl Future + Send - where - B: Future + Send, - Self: Sized; - - fn or(self, b: B) -> impl Future + Send - where - B: Future + Send + Unpin, - Self: Sized + Unpin; -} - -impl BoolExt for Fut -where - Fut: Future + Send, -{ - #[inline] - fn and(self, b: B) -> impl Future + Send - where - B: Future + Send, - Self: Sized, - { - type Result = crate::Result<(), ()>; - - let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); - - let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); - - try_join(a, b).map(|result| result.is_ok()) - } - - #[inline] - fn or(self, b: B) -> impl Future + Send - where - B: Future + Send + Unpin, - Self: Sized + Unpin, - { - type Result = crate::Result<(), ()>; - - let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); - - let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); - - try_select(a, b).map(|result| result.is_ok()) - } -} - -pub async fn and(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - try_join_all(args).map(|result| result.is_ok()) -} - -pub async fn or(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send + Unpin, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - select_ok(args).map(|result| result.is_ok()) -} diff --git a/src/core/utils/future/ext_ext.rs b/src/core/utils/future/ext_ext.rs deleted file mode 100644 index 219bb664..00000000 --- a/src/core/utils/future/ext_ext.rs +++ /dev/null @@ -1,34 +0,0 @@ -//! Extended external extensions to futures::FutureExt - -use std::marker::Unpin; - -use futures::{Future, future, future::Select}; - -/// This interface is not necessarily complete; feel free to add as-needed. -pub trait ExtExt -where - Self: Future + Send, -{ - fn until(self, f: F) -> Select - where - Self: Sized, - F: FnOnce() -> B, - A: Future + From + Send + Unpin, - B: Future + Send + Unpin; -} - -impl ExtExt for Fut -where - Fut: Future + Send, -{ - #[inline] - fn until(self, f: F) -> Select - where - Self: Sized, - F: FnOnce() -> B, - A: Future + From + Send + Unpin, - B: Future + Send + Unpin, - { - future::select(self.into(), f()) - } -} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs deleted file mode 100644 index d896e66d..00000000 --- a/src/core/utils/future/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod bool_ext; -mod ext_ext; -mod option_ext; -mod option_stream; -mod ready_eq_ext; -mod try_ext_ext; - -pub use bool_ext::{BoolExt, and, or}; -pub use ext_ext::ExtExt; -pub use option_ext::OptionExt; -pub use option_stream::OptionStream; -pub use ready_eq_ext::ReadyEqExt; -pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs deleted file mode 100644 index 920dd044..00000000 --- a/src/core/utils/future/option_ext.rs +++ /dev/null @@ -1,25 +0,0 @@ -#![allow(clippy::wrong_self_convention)] - -use futures::{Future, FutureExt, future::OptionFuture}; - -pub trait OptionExt { - fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; - - fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; -} - -impl OptionExt for OptionFuture -where - Fut: Future + Send, - T: Send, -{ - #[inline] - fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { - self.map(|o| o.as_ref().is_none_or(f)) - } - - #[inline] - fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { - self.map(|o| o.as_ref().is_some_and(f)) - } -} diff --git a/src/core/utils/future/option_stream.rs b/src/core/utils/future/option_stream.rs deleted file mode 100644 index 81130c87..00000000 --- a/src/core/utils/future/option_stream.rs +++ /dev/null @@ -1,25 +0,0 @@ -use futures::{Future, FutureExt, Stream, StreamExt, future::OptionFuture}; - -use super::super::IterStream; - -pub trait OptionStream { - fn stream(self) -> impl Stream + Send; -} - -impl OptionStream for OptionFuture -where - Fut: Future + Send, - S: Stream + Send, - O: IntoIterator + Send, - ::IntoIter: Send, - T: Send, -{ - #[inline] - fn stream(self) -> impl Stream + Send { - self.map(|opt| opt.map(|(curr, next)| curr.into_iter().stream().chain(next))) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten() - } -} diff --git a/src/core/utils/future/ready_eq_ext.rs b/src/core/utils/future/ready_eq_ext.rs deleted file mode 100644 index 1625adae..00000000 --- a/src/core/utils/future/ready_eq_ext.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Future extension for Partial Equality against present value - -use futures::{Future, FutureExt}; - -pub trait ReadyEqExt -where - Self: Future + Send + Sized, - T: PartialEq + Send + Sync, -{ - fn eq(self, t: &T) -> impl Future + Send; - - fn ne(self, t: &T) -> impl Future + Send; -} - -impl ReadyEqExt for Fut -where - Fut: Future + Send + Sized, - T: PartialEq + Send + Sync, -{ - #[inline] - fn eq(self, t: &T) -> impl Future + Send { self.map(move |r| r.eq(t)) } - - #[inline] - fn ne(self, t: &T) -> impl Future + Send { self.map(move |r| r.ne(t)) } -} diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs deleted file mode 100644 index b2114e56..00000000 --- a/src/core/utils/future/try_ext_ext.rs +++ /dev/null @@ -1,152 +0,0 @@ -//! Extended external extensions to futures::TryFutureExt -#![allow(clippy::type_complexity)] -// is_ok() has to consume *self rather than borrow. This extension is for a -// caller only ever caring about result status while discarding all contents. -#![allow(clippy::wrong_self_convention)] - -use std::marker::Unpin; - -use futures::{ - TryFuture, TryFutureExt, future, - future::{MapOkOrElse, TrySelect, UnwrapOrElse}, -}; - -/// This interface is not necessarily complete; feel free to add as-needed. -pub trait TryExtExt -where - Self: TryFuture + Send, -{ - fn is_err( - self, - ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> - where - Self: Sized; - - #[allow(clippy::wrong_self_convention)] - fn is_ok( - self, - ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> - where - Self: Sized; - - fn map_ok_or( - self, - default: U, - f: F, - ) -> MapOkOrElse U, impl FnOnce(Self::Error) -> U> - where - F: FnOnce(Self::Ok) -> U, - Self: Send + Sized; - - fn ok( - self, - ) -> MapOkOrElse< - Self, - impl FnOnce(Self::Ok) -> Option, - impl FnOnce(Self::Error) -> Option, - > - where - Self: Sized; - - fn try_until(self, f: F) -> TrySelect - where - Self: Sized, - F: FnOnce() -> B, - A: TryFuture + From + Send + Unpin, - B: TryFuture + Send + Unpin; - - fn unwrap_or( - self, - default: Self::Ok, - ) -> UnwrapOrElse Self::Ok> - where - Self: Sized; - - fn unwrap_or_default(self) -> UnwrapOrElse Self::Ok> - where - Self: Sized, - Self::Ok: Default; -} - -impl TryExtExt for Fut -where - Fut: TryFuture + Send, -{ - #[inline] - fn is_err( - self, - ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> - where - Self: Sized, - { - self.map_ok_or(true, |_| false) - } - - #[inline] - fn is_ok( - self, - ) -> MapOkOrElse bool, impl FnOnce(Self::Error) -> bool> - where - Self: Sized, - { - self.map_ok_or(false, |_| true) - } - - #[inline] - fn map_ok_or( - self, - default: U, - f: F, - ) -> MapOkOrElse U, impl FnOnce(Self::Error) -> U> - where - F: FnOnce(Self::Ok) -> U, - Self: Send + Sized, - { - self.map_ok_or_else(|_| default, f) - } - - #[inline] - fn ok( - self, - ) -> MapOkOrElse< - Self, - impl FnOnce(Self::Ok) -> Option, - impl FnOnce(Self::Error) -> Option, - > - where - Self: Sized, - { - self.map_ok_or(None, Some) - } - - #[inline] - fn try_until(self, f: F) -> TrySelect - where - Self: Sized, - F: FnOnce() -> B, - A: TryFuture + From + Send + Unpin, - B: TryFuture + Send + Unpin, - { - future::try_select(self.into(), f()) - } - - #[inline] - fn unwrap_or( - self, - default: Self::Ok, - ) -> UnwrapOrElse Self::Ok> - where - Self: Sized, - { - self.unwrap_or_else(move |_| default) - } - - #[inline] - fn unwrap_or_default(self) -> UnwrapOrElse Self::Ok> - where - Self: Sized, - Self::Ok: Default, - { - self.unwrap_or(Default::default()) - } -} diff --git a/src/core/utils/hash.rs b/src/core/utils/hash.rs deleted file mode 100644 index c12d4f66..00000000 --- a/src/core/utils/hash.rs +++ /dev/null @@ -1,10 +0,0 @@ -mod argon; -pub mod sha256; - -use crate::Result; - -pub fn verify_password(password: &str, password_hash: &str) -> Result { - argon::verify_password(password, password_hash) -} - -pub fn password(password: &str) -> Result { argon::password(password) } diff --git a/src/core/utils/hash/argon.rs b/src/core/utils/hash/argon.rs deleted file mode 100644 index 66dfab75..00000000 --- a/src/core/utils/hash/argon.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::sync::OnceLock; - -use argon2::{ - Algorithm, Argon2, Params, PasswordHash, PasswordHasher, PasswordVerifier, Version, - password_hash, password_hash::SaltString, -}; - -use crate::{Error, Result, err}; - -const M_COST: u32 = Params::DEFAULT_M_COST; // memory size in 1 KiB blocks -const T_COST: u32 = Params::DEFAULT_T_COST; // nr of iterations -const P_COST: u32 = Params::DEFAULT_P_COST; // parallelism - -static ARGON: OnceLock> = OnceLock::new(); - -fn init_argon() -> Argon2<'static> { - // 19456 Kib blocks, iterations = 2, parallelism = 1 - // * - debug_assert!(M_COST == 19_456, "M_COST default changed"); - debug_assert!(T_COST == 2, "T_COST default changed"); - debug_assert!(P_COST == 1, "P_COST default changed"); - - let algorithm = Algorithm::Argon2id; - let version = Version::default(); - let out_len: Option = None; - let params = Params::new(M_COST, T_COST, P_COST, out_len).expect("valid parameters"); - Argon2::new(algorithm, version, params) -} - -pub(super) fn password(password: &str) -> Result { - let salt = SaltString::generate(rand::thread_rng()); - ARGON - .get_or_init(init_argon) - .hash_password(password.as_bytes(), &salt) - .map(|it| it.to_string()) - .map_err(map_err) -} - -pub(super) fn verify_password(password: &str, password_hash: &str) -> Result<()> { - let password_hash = PasswordHash::new(password_hash).map_err(map_err)?; - ARGON - .get_or_init(init_argon) - .verify_password(password.as_bytes(), &password_hash) - .map_err(map_err) -} - -fn map_err(e: password_hash::Error) -> Error { err!("{e}") } - -#[cfg(test)] -mod tests { - #[test] - fn password_hash_and_verify() { - use crate::utils::hash; - let preimage = "temp123"; - let digest = hash::password(preimage).expect("digest"); - hash::verify_password(preimage, &digest).expect("verified"); - } - - #[test] - #[should_panic(expected = "unverified")] - fn password_hash_and_verify_fail() { - use crate::utils::hash; - let preimage = "temp123"; - let fakeimage = "temp321"; - let digest = hash::password(preimage).expect("digest"); - hash::verify_password(fakeimage, &digest).expect("unverified"); - } -} diff --git a/src/core/utils/hash/sha256.rs b/src/core/utils/hash/sha256.rs deleted file mode 100644 index 06e210a7..00000000 --- a/src/core/utils/hash/sha256.rs +++ /dev/null @@ -1,62 +0,0 @@ -use ring::{ - digest, - digest::{Context, SHA256, SHA256_OUTPUT_LEN}, -}; - -pub type Digest = [u8; SHA256_OUTPUT_LEN]; - -/// Sha256 hash (input gather joined by 0xFF bytes) -#[must_use] -#[tracing::instrument(skip(inputs), level = "trace")] -pub fn delimited<'a, T, I>(mut inputs: I) -> Digest -where - I: Iterator + 'a, - T: AsRef<[u8]> + 'a, -{ - let mut ctx = Context::new(&SHA256); - if let Some(input) = inputs.next() { - ctx.update(input.as_ref()); - for input in inputs { - ctx.update(b"\xFF"); - ctx.update(input.as_ref()); - } - } - - ctx.finish() - .as_ref() - .try_into() - .expect("failed to return Digest buffer") -} - -/// Sha256 hash (input gather) -#[must_use] -#[tracing::instrument(skip(inputs), level = "trace")] -pub fn concat<'a, T, I>(inputs: I) -> Digest -where - I: Iterator + 'a, - T: AsRef<[u8]> + 'a, -{ - inputs - .fold(Context::new(&SHA256), |mut ctx, input| { - ctx.update(input.as_ref()); - ctx - }) - .finish() - .as_ref() - .try_into() - .expect("failed to return Digest buffer") -} - -/// Sha256 hash -#[inline] -#[must_use] -#[tracing::instrument(skip(input), level = "trace")] -pub fn hash(input: T) -> Digest -where - T: AsRef<[u8]>, -{ - digest::digest(&SHA256, input.as_ref()) - .as_ref() - .try_into() - .expect("failed to return Digest buffer") -} diff --git a/src/core/utils/html.rs b/src/core/utils/html.rs deleted file mode 100644 index f2b6d861..00000000 --- a/src/core/utils/html.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::fmt; - -/// Wrapper struct which will emit the HTML-escaped version of the contained -/// string when passed to a format string. -pub struct Escape<'a>(pub &'a str); - -/// Copied from librustdoc: -/// * -#[allow(clippy::string_slice)] -impl fmt::Display for Escape<'_> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - // Because the internet is always right, turns out there's not that many - // characters to escape: http://stackoverflow.com/questions/7381974 - let Escape(s) = *self; - let pile_o_bits = s; - let mut last = 0; - for (i, ch) in s.char_indices() { - let s = match ch { - | '>' => ">", - | '<' => "<", - | '&' => "&", - | '\'' => "'", - | '"' => """, - | _ => continue, - }; - fmt.write_str(&pile_o_bits[last..i])?; - fmt.write_str(s)?; - // NOTE: we only expect single byte characters here - which is fine as long as - // we only match single byte characters - last = i.saturating_add(1); - } - - if last < s.len() { - fmt.write_str(&pile_o_bits[last..])?; - } - Ok(()) - } -} diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs deleted file mode 100644 index 3f2f225e..00000000 --- a/src/core/utils/json.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::{fmt, str::FromStr}; - -use ruma::{CanonicalJsonError, CanonicalJsonObject, canonical_json::try_from_json_map}; - -use crate::Result; - -/// Fallible conversion from any value that implements `Serialize` to a -/// `CanonicalJsonObject`. -/// -/// `value` must serialize to an `serde_json::Value::Object`. -pub fn to_canonical_object( - value: T, -) -> Result { - use serde::ser::Error; - - match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? { - | serde_json::Value::Object(map) => try_from_json_map(map), - | _ => - Err(CanonicalJsonError::SerDe(serde_json::Error::custom("Value must be an object"))), - } -} - -pub fn deserialize_from_str< - 'de, - D: serde::de::Deserializer<'de>, - T: FromStr, - E: fmt::Display, ->( - deserializer: D, -) -> Result { - struct Visitor, E>(std::marker::PhantomData); - impl, Err: fmt::Display> serde::de::Visitor<'_> for Visitor { - type Value = T; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(formatter, "a parsable string") - } - - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - v.parse().map_err(serde::de::Error::custom) - } - } - deserializer.deserialize_str(Visitor(std::marker::PhantomData)) -} diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs deleted file mode 100644 index 9316731c..00000000 --- a/src/core/utils/math.rs +++ /dev/null @@ -1,113 +0,0 @@ -mod expected; -mod tried; - -use std::{cmp, convert::TryFrom}; - -pub use checked_ops::checked_ops; - -pub use self::{expected::Expected, tried::Tried}; -use crate::{Err, Error, Result, debug::type_name, err}; - -/// Checked arithmetic expression. Returns a Result -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! checked { - ($($input:tt)+) => { - $crate::utils::math::checked_ops!($($input)+) - .ok_or_else(|| $crate::err!(Arithmetic("operation overflowed or result invalid"))) - }; -} - -/// Checked arithmetic expression which panics on failure. This is for -/// expressions which do not meet the threshold for validated! but the caller -/// has no realistic expectation for error and no interest in cluttering the -/// callsite with result handling from checked!. -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! expected { - ($msg:literal, $($input:tt)+) => { - $crate::checked!($($input)+).expect($msg) - }; - - ($($input:tt)+) => { - $crate::expected!("arithmetic expression expectation failure", $($input)+) - }; -} - -/// Unchecked arithmetic expression in release-mode. Use for performance when -/// the expression is obviously safe. The check remains in debug-mode for -/// regression analysis. -#[cfg(not(debug_assertions))] -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! validated { - ($($input:tt)+) => { - //#[allow(clippy::arithmetic_side_effects)] { - //Some($($input)*) - // .ok_or_else(|| $crate::err!(Arithmetic("this error should never been seen"))) - //} - - //NOTE: remove me when stmt_expr_attributes is stable - $crate::expected!("validated arithmetic expression failed", $($input)+) - }; -} - -/// Checked arithmetic expression in debug-mode. Use for performance when -/// the expression is obviously safe. The check is elided in release-mode. -#[cfg(debug_assertions)] -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! validated { - ($($input:tt)+) => { $crate::expected!($($input)+) } -} - -#[inline] -#[allow(clippy::as_conversions)] -pub fn usize_from_f64(val: f64) -> Result { - if val < 0.0 { - return Err!(Arithmetic("Converting negative float to unsigned integer")); - } - - //SAFETY: - Ok(unsafe { val.to_int_unchecked::() }) -} - -#[inline] -#[must_use] -pub fn usize_from_ruma(val: ruma::UInt) -> usize { - usize::try_from(val).expect("failed conversion from ruma::UInt to usize") -} - -#[inline] -#[must_use] -pub fn ruma_from_u64(val: u64) -> ruma::UInt { - ruma::UInt::try_from(val).expect("failed conversion from u64 to ruma::UInt") -} - -#[inline] -#[must_use] -pub fn ruma_from_usize(val: usize) -> ruma::UInt { - ruma::UInt::try_from(val).expect("failed conversion from usize to ruma::UInt") -} - -#[inline] -#[must_use] -#[allow(clippy::as_conversions, clippy::cast_possible_truncation)] -pub fn usize_from_u64_truncated(val: u64) -> usize { val as usize } - -#[inline] -pub fn try_into, Src>(src: Src) -> Result { - Dst::try_from(src).map_err(try_into_err::) -} - -fn try_into_err, Src>(e: >::Error) -> Error { - drop(e); - err!(Arithmetic( - "failed to convert from {} to {}", - type_name::(), - type_name::() - )) -} - -#[inline] -pub fn clamp(val: T, min: T, max: T) -> T { cmp::min(cmp::max(val, min), max) } diff --git a/src/core/utils/math/expected.rs b/src/core/utils/math/expected.rs deleted file mode 100644 index f0f71854..00000000 --- a/src/core/utils/math/expected.rs +++ /dev/null @@ -1,52 +0,0 @@ -use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; - -use crate::expected; - -pub trait Expected { - #[inline] - #[must_use] - fn expected_add(self, rhs: Self) -> Self - where - Self: CheckedAdd + Sized, - { - expected!(self + rhs) - } - - #[inline] - #[must_use] - fn expected_sub(self, rhs: Self) -> Self - where - Self: CheckedSub + Sized, - { - expected!(self - rhs) - } - - #[inline] - #[must_use] - fn expected_mul(self, rhs: Self) -> Self - where - Self: CheckedMul + Sized, - { - expected!(self * rhs) - } - - #[inline] - #[must_use] - fn expected_div(self, rhs: Self) -> Self - where - Self: CheckedDiv + Sized, - { - expected!(self / rhs) - } - - #[inline] - #[must_use] - fn expected_rem(self, rhs: Self) -> Self - where - Self: CheckedRem + Sized, - { - expected!(self % rhs) - } -} - -impl Expected for T {} diff --git a/src/core/utils/math/tried.rs b/src/core/utils/math/tried.rs deleted file mode 100644 index 09de731f..00000000 --- a/src/core/utils/math/tried.rs +++ /dev/null @@ -1,47 +0,0 @@ -use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; - -use crate::{Result, checked}; - -pub trait Tried { - #[inline] - fn try_add(self, rhs: Self) -> Result - where - Self: CheckedAdd + Sized, - { - checked!(self + rhs) - } - - #[inline] - fn try_sub(self, rhs: Self) -> Result - where - Self: CheckedSub + Sized, - { - checked!(self - rhs) - } - - #[inline] - fn try_mul(self, rhs: Self) -> Result - where - Self: CheckedMul + Sized, - { - checked!(self * rhs) - } - - #[inline] - fn try_div(self, rhs: Self) -> Result - where - Self: CheckedDiv + Sized, - { - checked!(self / rhs) - } - - #[inline] - fn try_rem(self, rhs: Self) -> Result - where - Self: CheckedRem + Sized, - { - checked!(self % rhs) - } -} - -impl Tried for T {} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs deleted file mode 100644 index 54404e4c..00000000 --- a/src/core/utils/mod.rs +++ /dev/null @@ -1,204 +0,0 @@ -pub mod arrayvec; -pub mod bool; -pub mod bytes; -pub mod content_disposition; -pub mod debug; -pub mod defer; -pub mod future; -pub mod hash; -pub mod html; -pub mod json; -pub mod math; -pub mod mutex_map; -pub mod rand; -pub mod result; -pub mod set; -pub mod stream; -pub mod string; -pub mod sys; -#[cfg(test)] -mod tests; -pub mod time; - -pub use ::conduwuit_macros::implement; -pub use ::ctor::{ctor, dtor}; - -pub use self::{ - arrayvec::ArrayVecExt, - bool::BoolExt, - bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, - debug::slice_truncated as debug_slice_truncated, - future::{BoolExt as FutureBoolExt, OptionStream, TryExtExt as TryFutureExtExt}, - hash::sha256::delimited as calculate_hash, - html::Escape as HtmlEscape, - json::{deserialize_from_str, to_canonical_object}, - math::clamp, - mutex_map::{Guard as MutexMapGuard, MutexMap}, - rand::{shuffle, string as random_string}, - stream::{IterStream, ReadyExt, Tools as StreamTools, TryReadyExt}, - string::{str_from_bytes, string_from_bytes}, - sys::compute::available_parallelism, - time::{ - exponential_backoff::{continue_exponential_backoff, continue_exponential_backoff_secs}, - now_millis as millis_since_unix_epoch, timepoint_ago, timepoint_from_now, - }, -}; - -#[inline] -pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } - -#[macro_export] -macro_rules! extract_variant { - ( $e:expr_2021, $( $variant:path )|* ) => { - match $e { - $( $variant(value) => Some(value), )* - _ => None, - } - }; -} - -/// Functor for !is_empty() -#[macro_export] -macro_rules! is_not_empty { - () => { - |x| !x.is_empty() - }; -} - -#[macro_export] -macro_rules! apply { - (1, $($idx:tt)+) => { - |t| (($($idx)+)(t.0),) - }; - - (2, $($idx:tt)+) => { - |t| (($($idx)+)(t.0), ($($idx)+)(t.1),) - }; - - (3, $($idx:tt)+) => { - |t| (($($idx)+)(t.0), ($($idx)+)(t.1), ($($idx)+)(t.2),) - }; - - (4, $($idx:tt)+) => { - |t| (($($idx)+)(t.0), ($($idx)+)(t.1), ($($idx)+)(t.2), ($($idx)+4)(t.3)) - }; -} - -#[macro_export] -macro_rules! pair_of { - ($decl:ty) => { - ($decl, $decl) - }; - - ($init:expr_2021) => { - ($init, $init) - }; -} - -/// Functor for truthy -#[macro_export] -macro_rules! is_true { - () => { - |x| !!x - }; -} - -/// Functor for falsy -#[macro_export] -macro_rules! is_false { - () => { - |x| !x - }; -} - -/// Functor for equality to non-zero -#[macro_export] -macro_rules! is_nonzero { - () => { - |x| x != 0 - }; -} - -/// Functor for equality to zero -#[macro_export] -macro_rules! is_zero { - () => { - $crate::is_matching!(0) - }; -} - -/// Functor for equality i.e. .is_some_and(is_equal!(2)) -#[macro_export] -macro_rules! is_equal_to { - ($val:ident) => { - |x| x == $val - }; - - ($val:expr_2021) => { - |x| x == $val - }; -} - -/// Functor for less i.e. .is_some_and(is_less_than!(2)) -#[macro_export] -macro_rules! is_less_than { - ($val:ident) => { - |x| x < $val - }; - - ($val:expr_2021) => { - |x| x < $val - }; -} - -/// Functor for matches! i.e. .is_some_and(is_matching!('A'..='Z')) -#[macro_export] -macro_rules! is_matching { - ($val:ident) => { - |x| matches!(x, $val) - }; - - ($($val:tt)+) => { - |x| matches!(x, $($val)+) - }; -} - -/// Functor for equality i.e. (a, b).map(is_equal!()) -#[macro_export] -macro_rules! is_equal { - () => { - |a, b| a == b - }; -} - -/// Functor for |x| *x.$i -#[macro_export] -macro_rules! deref_at { - ($idx:tt) => { - |t| *t.$idx - }; -} - -/// Functor for |ref x| x.$i -#[macro_export] -macro_rules! ref_at { - ($idx:tt) => { - |ref t| &t.$idx - }; -} - -/// Functor for |&x| x.$i -#[macro_export] -macro_rules! val_at { - ($idx:tt) => { - |&t| t.$idx - }; -} - -/// Functor for |x| x.$i -#[macro_export] -macro_rules! at { - ($idx:tt) => { - |t| t.$idx - }; -} diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs deleted file mode 100644 index 01504ce6..00000000 --- a/src/core/utils/mutex_map.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::{ - fmt::Debug, - hash::Hash, - sync::{Arc, TryLockError::WouldBlock}, -}; - -use tokio::sync::OwnedMutexGuard as Omg; - -use crate::{Result, err}; - -/// Map of Mutexes -pub struct MutexMap { - map: Map, -} - -pub struct Guard { - map: Map, - val: Omg, -} - -type Map = Arc>; -type MapMutex = std::sync::Mutex>; -type HashMap = std::collections::HashMap>; -type Value = Arc>; - -impl MutexMap -where - Key: Clone + Eq + Hash + Send, - Val: Default + Send, -{ - #[must_use] - pub fn new() -> Self { - Self { - map: Map::new(MapMutex::new(HashMap::new())), - } - } - - #[tracing::instrument(level = "trace", skip(self))] - pub async fn lock<'a, K>(&'a self, k: &'a K) -> Guard - where - K: Debug + Send + ?Sized + Sync, - Key: TryFrom<&'a K>, - >::Error: Debug, - { - let val = self - .map - .lock() - .expect("locked") - .entry(k.try_into().expect("failed to construct key")) - .or_default() - .clone(); - - Guard:: { - map: Arc::clone(&self.map), - val: val.lock_owned().await, - } - } - - #[tracing::instrument(level = "trace", skip(self))] - pub fn try_lock<'a, K>(&self, k: &'a K) -> Result> - where - K: Debug + Send + ?Sized + Sync, - Key: TryFrom<&'a K>, - >::Error: Debug, - { - let val = self - .map - .lock() - .expect("locked") - .entry(k.try_into().expect("failed to construct key")) - .or_default() - .clone(); - - Ok(Guard:: { - map: Arc::clone(&self.map), - val: val.try_lock_owned().map_err(|_| err!("would yield"))?, - }) - } - - #[tracing::instrument(level = "trace", skip(self))] - pub fn try_try_lock<'a, K>(&self, k: &'a K) -> Result> - where - K: Debug + Send + ?Sized + Sync, - Key: TryFrom<&'a K>, - >::Error: Debug, - { - let val = self - .map - .try_lock() - .map_err(|e| match e { - | WouldBlock => err!("would block"), - | _ => panic!("{e:?}"), - })? - .entry(k.try_into().expect("failed to construct key")) - .or_default() - .clone(); - - Ok(Guard:: { - map: Arc::clone(&self.map), - val: val.try_lock_owned().map_err(|_| err!("would yield"))?, - }) - } - - #[must_use] - pub fn contains(&self, k: &Key) -> bool { self.map.lock().expect("locked").contains_key(k) } - - #[must_use] - pub fn is_empty(&self) -> bool { self.map.lock().expect("locked").is_empty() } - - #[must_use] - pub fn len(&self) -> usize { self.map.lock().expect("locked").len() } -} - -impl Default for MutexMap -where - Key: Clone + Eq + Hash + Send, - Val: Default + Send, -{ - fn default() -> Self { Self::new() } -} - -impl Drop for Guard { - #[tracing::instrument(name = "unlock", level = "trace", skip_all)] - fn drop(&mut self) { - if Arc::strong_count(Omg::mutex(&self.val)) <= 2 { - self.map.lock().expect("locked").retain(|_, val| { - !Arc::ptr_eq(val, Omg::mutex(&self.val)) || Arc::strong_count(val) > 2 - }); - } - } -} diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs deleted file mode 100644 index 72487633..00000000 --- a/src/core/utils/rand.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::{ - ops::Range, - time::{Duration, SystemTime}, -}; - -use arrayvec::ArrayString; -use rand::{Rng, seq::SliceRandom, thread_rng}; - -pub fn shuffle(vec: &mut [T]) { - let mut rng = thread_rng(); - vec.shuffle(&mut rng); -} - -pub fn string(length: usize) -> String { - thread_rng() - .sample_iter(&rand::distributions::Alphanumeric) - .take(length) - .map(char::from) - .collect() -} - -#[inline] -pub fn string_array() -> ArrayString { - let mut ret = ArrayString::::new(); - thread_rng() - .sample_iter(&rand::distributions::Alphanumeric) - .take(LENGTH) - .map(char::from) - .for_each(|c| ret.push(c)); - - ret -} - -#[inline] -#[must_use] -pub fn time_from_now_secs(range: Range) -> SystemTime { - SystemTime::now() - .checked_add(secs(range)) - .expect("range does not overflow SystemTime") -} - -#[must_use] -pub fn secs(range: Range) -> Duration { - let mut rng = thread_rng(); - Duration::from_secs(rng.gen_range(range)) -} diff --git a/src/core/utils/result.rs b/src/core/utils/result.rs deleted file mode 100644 index 1ad5371b..00000000 --- a/src/core/utils/result.rs +++ /dev/null @@ -1,18 +0,0 @@ -mod debug_inspect; -mod filter; -mod flat_ok; -mod into_is_ok; -mod log_debug_err; -mod log_err; -mod map_expect; -mod not_found; -mod unwrap_infallible; -mod unwrap_or_err; - -pub use self::{ - debug_inspect::DebugInspect, filter::Filter, flat_ok::FlatOk, into_is_ok::IntoIsOk, - log_debug_err::LogDebugErr, log_err::LogErr, map_expect::MapExpect, not_found::NotFound, - unwrap_infallible::UnwrapInfallible, unwrap_or_err::UnwrapOrErr, -}; - -pub type Result = std::result::Result; diff --git a/src/core/utils/result/debug_inspect.rs b/src/core/utils/result/debug_inspect.rs deleted file mode 100644 index ef80979d..00000000 --- a/src/core/utils/result/debug_inspect.rs +++ /dev/null @@ -1,52 +0,0 @@ -use super::Result; - -/// Inspect Result values with release-mode elision. -pub trait DebugInspect { - /// Inspects an Err contained value in debug-mode. In release-mode closure F - /// is elided. - #[must_use] - fn debug_inspect_err(self, f: F) -> Self; - - /// Inspects an Ok contained value in debug-mode. In release-mode closure F - /// is elided. - #[must_use] - fn debug_inspect(self, f: F) -> Self; -} - -#[cfg(debug_assertions)] -impl DebugInspect for Result { - #[inline] - fn debug_inspect(self, f: F) -> Self - where - F: FnOnce(&T), - { - self.inspect(f) - } - - #[inline] - fn debug_inspect_err(self, f: F) -> Self - where - F: FnOnce(&E), - { - self.inspect_err(f) - } -} - -#[cfg(not(debug_assertions))] -impl DebugInspect for Result { - #[inline] - fn debug_inspect(self, _: F) -> Self - where - F: FnOnce(&T), - { - self - } - - #[inline] - fn debug_inspect_err(self, _: F) -> Self - where - F: FnOnce(&E), - { - self - } -} diff --git a/src/core/utils/result/filter.rs b/src/core/utils/result/filter.rs deleted file mode 100644 index f11d3632..00000000 --- a/src/core/utils/result/filter.rs +++ /dev/null @@ -1,21 +0,0 @@ -use super::Result; - -pub trait Filter { - /// Similar to Option::filter - #[must_use] - fn filter(self, predicate: P) -> Self - where - P: FnOnce(&T) -> Result<(), U>, - E: From; -} - -impl Filter for Result { - #[inline] - fn filter(self, predicate: P) -> Self - where - P: FnOnce(&T) -> Result<(), U>, - E: From, - { - self.and_then(move |t| predicate(&t).map(move |()| t).map_err(Into::into)) - } -} diff --git a/src/core/utils/result/flat_ok.rs b/src/core/utils/result/flat_ok.rs deleted file mode 100644 index 8e7bb968..00000000 --- a/src/core/utils/result/flat_ok.rs +++ /dev/null @@ -1,38 +0,0 @@ -use super::Result; - -pub trait FlatOk { - /// Equivalent to .transpose().ok().flatten() - fn flat_ok(self) -> Option; - - /// Equivalent to .transpose().ok().flatten().ok_or(...) - fn flat_ok_or(self, err: E) -> Result; - - /// Equivalent to .transpose().ok().flatten().ok_or_else(...) - fn flat_ok_or_else E>(self, err: F) -> Result; -} - -impl FlatOk for Option> { - #[inline] - fn flat_ok(self) -> Option { self.transpose().ok().flatten() } - - #[inline] - fn flat_ok_or(self, err: Ep) -> Result { self.flat_ok().ok_or(err) } - - #[inline] - fn flat_ok_or_else Ep>(self, err: F) -> Result { - self.flat_ok().ok_or_else(err) - } -} - -impl FlatOk for Result, E> { - #[inline] - fn flat_ok(self) -> Option { self.ok().flatten() } - - #[inline] - fn flat_ok_or(self, err: Ep) -> Result { self.flat_ok().ok_or(err) } - - #[inline] - fn flat_ok_or_else Ep>(self, err: F) -> Result { - self.flat_ok().ok_or_else(err) - } -} diff --git a/src/core/utils/result/inspect_log.rs b/src/core/utils/result/inspect_log.rs deleted file mode 100644 index e9f32663..00000000 --- a/src/core/utils/result/inspect_log.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::fmt; - -use tracing::Level; - -use super::Result; -use crate::error; - -pub trait ErrLog -where - E: fmt::Display, -{ - fn log_err(self, level: Level) -> Self; - - #[inline] - fn err_log(self) -> Self - where - Self: Sized, - { - self.log_err(Level::ERROR) - } -} - -pub trait ErrDebugLog -where - E: fmt::Debug, -{ - fn log_err_debug(self, level: Level) -> Self; - - #[inline] - fn err_debug_log(self) -> Self - where - Self: Sized, - { - self.log_err_debug(Level::ERROR) - } -} - -impl ErrLog for Result -where - E: fmt::Display, -{ - #[inline] - fn log_err(self, level: Level) -> Self - where - Self: Sized, - { - self.inspect_err(|error| error::inspect_log_level(&error, level)) - } -} - -impl ErrDebugLog for Result -where - E: fmt::Debug, -{ - #[inline] - fn log_err_debug(self, level: Level) -> Self - where - Self: Sized, - { - self.inspect_err(|error| error::inspect_debug_log_level(&error, level)) - } -} diff --git a/src/core/utils/result/into_is_ok.rs b/src/core/utils/result/into_is_ok.rs deleted file mode 100644 index 220ce010..00000000 --- a/src/core/utils/result/into_is_ok.rs +++ /dev/null @@ -1,10 +0,0 @@ -use super::Result; - -pub trait IntoIsOk { - fn into_is_ok(self) -> bool; -} - -impl IntoIsOk for Result { - #[inline] - fn into_is_ok(self) -> bool { self.is_ok() } -} diff --git a/src/core/utils/result/log_debug_err.rs b/src/core/utils/result/log_debug_err.rs deleted file mode 100644 index 8835afd1..00000000 --- a/src/core/utils/result/log_debug_err.rs +++ /dev/null @@ -1,26 +0,0 @@ -use std::fmt::Debug; - -use tracing::Level; - -use super::{DebugInspect, Result}; -use crate::error; - -pub trait LogDebugErr { - #[must_use] - fn err_debug_log(self, level: Level) -> Self; - - #[must_use] - fn log_debug_err(self) -> Self - where - Self: Sized, - { - self.err_debug_log(Level::ERROR) - } -} - -impl LogDebugErr for Result { - #[inline] - fn err_debug_log(self, level: Level) -> Self { - self.debug_inspect_err(|error| error::inspect_debug_log_level(&error, level)) - } -} diff --git a/src/core/utils/result/log_err.rs b/src/core/utils/result/log_err.rs deleted file mode 100644 index a1ce891f..00000000 --- a/src/core/utils/result/log_err.rs +++ /dev/null @@ -1,26 +0,0 @@ -use std::fmt::Display; - -use tracing::Level; - -use super::Result; -use crate::error; - -pub trait LogErr { - #[must_use] - fn err_log(self, level: Level) -> Self; - - #[must_use] - fn log_err(self) -> Self - where - Self: Sized, - { - self.err_log(Level::ERROR) - } -} - -impl LogErr for Result { - #[inline] - fn err_log(self, level: Level) -> Self { - self.inspect_err(|error| error::inspect_log_level(&error, level)) - } -} diff --git a/src/core/utils/result/map_expect.rs b/src/core/utils/result/map_expect.rs deleted file mode 100644 index b315ad3d..00000000 --- a/src/core/utils/result/map_expect.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::fmt::Debug; - -use super::Result; - -pub trait MapExpect<'a, T> { - /// Calls expect(msg) on the mapped Result value. This is similar to - /// map(Result::unwrap) but composes an expect call and message without - /// requiring a closure. - fn map_expect(self, msg: &'a str) -> T; -} - -impl<'a, T, E: Debug> MapExpect<'a, Option> for Option> { - #[inline] - fn map_expect(self, msg: &'a str) -> Option { self.map(|result| result.expect(msg)) } -} - -impl<'a, T, E: Debug> MapExpect<'a, Result> for Result, E> { - #[inline] - fn map_expect(self, msg: &'a str) -> Result { self.map(|result| result.expect(msg)) } -} diff --git a/src/core/utils/result/not_found.rs b/src/core/utils/result/not_found.rs deleted file mode 100644 index d61825af..00000000 --- a/src/core/utils/result/not_found.rs +++ /dev/null @@ -1,12 +0,0 @@ -use super::Result; -use crate::Error; - -pub trait NotFound { - #[must_use] - fn is_not_found(&self) -> bool; -} - -impl NotFound for Result { - #[inline] - fn is_not_found(&self) -> bool { self.as_ref().is_err_and(Error::is_not_found) } -} diff --git a/src/core/utils/result/unwrap_infallible.rs b/src/core/utils/result/unwrap_infallible.rs deleted file mode 100644 index 99309e02..00000000 --- a/src/core/utils/result/unwrap_infallible.rs +++ /dev/null @@ -1,17 +0,0 @@ -use std::convert::Infallible; - -use super::{DebugInspect, Result}; -use crate::error; - -pub trait UnwrapInfallible { - fn unwrap_infallible(self) -> T; -} - -impl UnwrapInfallible for Result { - #[inline] - fn unwrap_infallible(self) -> T { - // SAFETY: Branchless unwrap for errors that can never happen. In debug - // mode this is asserted. - unsafe { self.debug_inspect_err(error::infallible).unwrap_unchecked() } - } -} diff --git a/src/core/utils/result/unwrap_or_err.rs b/src/core/utils/result/unwrap_or_err.rs deleted file mode 100644 index 69901958..00000000 --- a/src/core/utils/result/unwrap_or_err.rs +++ /dev/null @@ -1,15 +0,0 @@ -use std::convert::identity; - -use super::Result; - -/// Returns the Ok value or the Err value. Available when the Ok and Err types -/// are the same. This is a way to default the result using the specific Err -/// value rather than unwrap_or_default() using Ok's default. -pub trait UnwrapOrErr { - fn unwrap_or_err(self) -> T; -} - -impl UnwrapOrErr for Result { - #[inline] - fn unwrap_or_err(self) -> T { self.unwrap_or_else(identity::) } -} diff --git a/src/core/utils/set.rs b/src/core/utils/set.rs deleted file mode 100644 index 032a9835..00000000 --- a/src/core/utils/set.rs +++ /dev/null @@ -1,79 +0,0 @@ -use std::{ - cmp::{Eq, Ord}, - pin::Pin, - sync::Arc, -}; - -use futures::{Stream, StreamExt}; - -use crate::{is_equal_to, is_less_than}; - -/// Intersection of sets -/// -/// Outputs the set of elements common to all input sets. Inputs do not have to -/// be sorted. If inputs are sorted a more optimized function is available in -/// this suite and should be used. -pub fn intersection(mut input: Iters) -> impl Iterator + Send -where - Iters: Iterator + Clone + Send, - Iter: Iterator + Send, - Item: Eq, -{ - input.next().into_iter().flat_map(move |first| { - let input = input.clone(); - first.filter(move |targ| { - input - .clone() - .all(|mut other| other.any(is_equal_to!(*targ))) - }) - }) -} - -/// Intersection of sets -/// -/// Outputs the set of elements common to all input sets. Inputs must be sorted. -pub fn intersection_sorted( - mut input: Iters, -) -> impl Iterator + Send -where - Iters: Iterator + Clone + Send, - Iter: Iterator + Send, - Item: Eq + Ord, -{ - input.next().into_iter().flat_map(move |first| { - let mut input = input.clone().collect::>(); - first.filter(move |targ| { - input.iter_mut().all(|it| { - it.by_ref() - .skip_while(is_less_than!(targ)) - .peekable() - .peek() - .is_some_and(is_equal_to!(targ)) - }) - }) - }) -} - -/// Intersection of sets -/// -/// Outputs the set of elements common to both streams. Streams must be sorted. -pub fn intersection_sorted_stream2(a: S, b: S) -> impl Stream + Send -where - S: Stream + Send + Unpin, - Item: Eq + PartialOrd + Send + Sync, -{ - use tokio::sync::Mutex; - - let b = Arc::new(Mutex::new(b.peekable())); - a.map(move |ai| (ai, b.clone())) - .filter_map(|(ai, b)| async move { - let mut lock = b.lock().await; - while let Some(bi) = Pin::new(&mut *lock).next_if(|bi| *bi <= ai).await.as_ref() { - if ai == *bi { - return Some(ai); - } - } - - None - }) -} diff --git a/src/core/utils/stream/band.rs b/src/core/utils/stream/band.rs deleted file mode 100644 index 45ad7d94..00000000 --- a/src/core/utils/stream/band.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; - -/// Stream concurrency factor; this is a live value. -static WIDTH: AtomicUsize = AtomicUsize::new(32); - -/// Stream throughput amplifier; this is a live value. -static AMPLIFICATION: AtomicUsize = AtomicUsize::new(1024); - -/// Practicable limits on the stream width. -pub const WIDTH_LIMIT: (usize, usize) = (1, 1024); - -/// Practicable limits on the stream amplifier. -pub const AMPLIFICATION_LIMIT: (usize, usize) = (32, 32768); - -/// Sets the live concurrency factor. The first return value is the previous -/// width which was replaced. The second return value is the value which was set -/// after any applied limits. -pub fn set_width(width: usize) -> (usize, usize) { - let width = width.clamp(WIDTH_LIMIT.0, WIDTH_LIMIT.1); - (WIDTH.swap(width, Ordering::Relaxed), width) -} - -/// Sets the live concurrency amplification. The first return value is the -/// previous width which was replaced. The second return value is the value -/// which was set after any applied limits. -pub fn set_amplification(width: usize) -> (usize, usize) { - let width = width.clamp(AMPLIFICATION_LIMIT.0, AMPLIFICATION_LIMIT.1); - (AMPLIFICATION.swap(width, Ordering::Relaxed), width) -} - -/// Used by stream operations where the concurrency factor hasn't been manually -/// supplied by the caller (most uses). Instead we provide a default value which -/// is adjusted at startup for the specific system and also dynamically. -#[inline] -pub fn automatic_width() -> usize { - let width = WIDTH.load(Ordering::Relaxed); - debug_assert!(width >= WIDTH_LIMIT.0, "WIDTH should not be zero"); - debug_assert!(width <= WIDTH_LIMIT.1, "WIDTH is probably too large"); - width -} - -/// Used by stream operations where the amplification hasn't been manually -/// supplied by the caller. Instead we provide a computed value. -#[inline] -pub fn automatic_amplification() -> usize { - let amplification = AMPLIFICATION.load(Ordering::Relaxed); - debug_assert!(amplification >= AMPLIFICATION_LIMIT.0, "amplification is too low"); - debug_assert!(amplification <= AMPLIFICATION_LIMIT.1, "amplification is too high"); - amplification -} diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs deleted file mode 100644 index 832f2638..00000000 --- a/src/core/utils/stream/broadband.rs +++ /dev/null @@ -1,164 +0,0 @@ -//! Broadband stream combinator extensions to futures::Stream - -use std::convert::identity; - -use futures::{ - Future, - stream::{Stream, StreamExt}, -}; - -use super::{ReadyExt, automatic_width}; - -/// Concurrency extensions to augment futures::StreamExt. broad_ combinators -/// produce out-of-order -pub trait BroadbandExt -where - Self: Stream + Send + Sized, -{ - fn broadn_all(self, n: N, f: F) -> impl Future + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send; - - fn broadn_any(self, n: N, f: F) -> impl Future + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send; - - /// Concurrent filter_map(); unordered results - fn broadn_filter_map(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future> + Send, - U: Send; - - fn broadn_flat_map(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Stream + Send + Unpin, - U: Send; - - fn broadn_then(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - U: Send; - - #[inline] - fn broad_all(self, f: F) -> impl Future + Send - where - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - { - self.broadn_all(None, f) - } - - #[inline] - fn broad_any(self, f: F) -> impl Future + Send - where - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - { - self.broadn_any(None, f) - } - - #[inline] - fn broad_filter_map(self, f: F) -> impl Stream + Send - where - F: Fn(Item) -> Fut + Send, - Fut: Future> + Send, - U: Send, - { - self.broadn_filter_map(None, f) - } - - #[inline] - fn broad_flat_map(self, f: F) -> impl Stream + Send - where - F: Fn(Item) -> Fut + Send, - Fut: Stream + Send + Unpin, - U: Send, - { - self.broadn_flat_map(None, f) - } - - #[inline] - fn broad_then(self, f: F) -> impl Stream + Send - where - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - U: Send, - { - self.broadn_then(None, f) - } -} - -impl BroadbandExt for S -where - S: Stream + Send + Sized, -{ - #[inline] - fn broadn_all(self, n: N, f: F) -> impl Future + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - { - self.map(f) - .buffer_unordered(n.into().unwrap_or_else(automatic_width)) - .ready_all(identity) - } - - #[inline] - fn broadn_any(self, n: N, f: F) -> impl Future + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - { - self.map(f) - .buffer_unordered(n.into().unwrap_or_else(automatic_width)) - .ready_any(identity) - } - - #[inline] - fn broadn_filter_map(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future> + Send, - U: Send, - { - self.map(f) - .buffer_unordered(n.into().unwrap_or_else(automatic_width)) - .ready_filter_map(identity) - } - - #[inline] - fn broadn_flat_map(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Stream + Send + Unpin, - U: Send, - { - self.flat_map_unordered(n.into().unwrap_or_else(automatic_width), f) - } - - #[inline] - fn broadn_then(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - U: Send, - { - self.map(f) - .buffer_unordered(n.into().unwrap_or_else(automatic_width)) - } -} diff --git a/src/core/utils/stream/cloned.rs b/src/core/utils/stream/cloned.rs deleted file mode 100644 index b89e4695..00000000 --- a/src/core/utils/stream/cloned.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::clone::Clone; - -use futures::{Stream, StreamExt, stream::Map}; - -pub trait Cloned<'a, T, S> -where - S: Stream, - T: Clone + 'a, -{ - fn cloned(self) -> Map T>; -} - -impl<'a, T, S> Cloned<'a, T, S> for S -where - S: Stream, - T: Clone + 'a, -{ - #[inline] - fn cloned(self) -> Map T> { self.map(Clone::clone) } -} diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs deleted file mode 100644 index ec572714..00000000 --- a/src/core/utils/stream/expect.rs +++ /dev/null @@ -1,26 +0,0 @@ -use futures::{Stream, StreamExt, TryStream}; - -use crate::Result; - -pub trait TryExpect<'a, Item> { - fn expect_ok(self) -> impl Stream + Send + 'a; - - fn map_expect(self, msg: &'a str) -> impl Stream + Send + 'a; -} - -impl<'a, T, Item> TryExpect<'a, Item> for T -where - T: Stream> + Send + TryStream + 'a, - Item: 'a, -{ - #[inline] - fn expect_ok(self: T) -> impl Stream + Send + 'a { - self.map_expect("stream expectation failure") - } - - //TODO: move to impl MapExpect - #[inline] - fn map_expect(self, msg: &'a str) -> impl Stream + Send + 'a { - self.map(|res| res.expect(msg)) - } -} diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs deleted file mode 100644 index 37c89d9a..00000000 --- a/src/core/utils/stream/ignore.rs +++ /dev/null @@ -1,34 +0,0 @@ -use futures::{Stream, StreamExt, TryStream, future::ready}; - -use crate::{Error, Result}; - -pub trait TryIgnore<'a, Item> { - fn ignore_err(self) -> impl Stream + Send + 'a; - - fn ignore_ok(self) -> impl Stream + Send + 'a; -} - -impl<'a, T, Item> TryIgnore<'a, Item> for T -where - T: Stream> + TryStream + Send + 'a, - Item: Send + 'a, -{ - #[cfg(debug_assertions)] - #[inline] - fn ignore_err(self: T) -> impl Stream + Send + 'a { - use super::TryExpect; - - self.expect_ok() - } - - #[cfg(not(debug_assertions))] - #[inline] - fn ignore_err(self: T) -> impl Stream + Send + 'a { - self.filter_map(|res| ready(res.ok())) - } - - #[inline] - fn ignore_ok(self: T) -> impl Stream + Send + 'a { - self.filter_map(|res| ready(res.err())) - } -} diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs deleted file mode 100644 index e9a91b1c..00000000 --- a/src/core/utils/stream/iter_stream.rs +++ /dev/null @@ -1,40 +0,0 @@ -use futures::{ - StreamExt, stream, - stream::{Stream, TryStream}, -}; - -use crate::{Error, Result}; - -pub trait IterStream { - /// Convert an Iterator into a Stream - fn stream(self) -> impl Stream::Item> + Send; - - /// Convert an Iterator into a TryStream - fn try_stream( - self, - ) -> impl TryStream< - Ok = ::Item, - Error = Error, - Item = Result<::Item, Error>, - > + Send; -} - -impl IterStream for I -where - I: IntoIterator + Send, - ::IntoIter: Send, -{ - #[inline] - fn stream(self) -> impl Stream::Item> + Send { stream::iter(self) } - - #[inline] - fn try_stream( - self, - ) -> impl TryStream< - Ok = ::Item, - Error = Error, - Item = Result<::Item, Error>, - > + Send { - self.stream().map(Ok) - } -} diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs deleted file mode 100644 index a356f05f..00000000 --- a/src/core/utils/stream/mod.rs +++ /dev/null @@ -1,32 +0,0 @@ -mod band; -mod broadband; -mod cloned; -mod expect; -mod ignore; -mod iter_stream; -mod ready; -mod tools; -mod try_broadband; -mod try_parallel; -mod try_ready; -mod try_tools; -mod try_wideband; -mod wideband; - -pub use band::{ - AMPLIFICATION_LIMIT, WIDTH_LIMIT, automatic_amplification, automatic_width, - set_amplification, set_width, -}; -pub use broadband::BroadbandExt; -pub use cloned::Cloned; -pub use expect::TryExpect; -pub use ignore::TryIgnore; -pub use iter_stream::IterStream; -pub use ready::ReadyExt; -pub use tools::Tools; -pub use try_broadband::TryBroadbandExt; -pub use try_parallel::TryParallelExt; -pub use try_ready::TryReadyExt; -pub use try_tools::TryTools; -pub use try_wideband::TryWidebandExt; -pub use wideband::WidebandExt; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs deleted file mode 100644 index be4d1b25..00000000 --- a/src/core/utils/stream/ready.rs +++ /dev/null @@ -1,239 +0,0 @@ -//! Synchronous combinator extensions to futures::Stream -#![allow(clippy::type_complexity)] - -use futures::{ - future::{FutureExt, Ready, ready}, - stream::{ - All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, - }, -}; - -/// Synchronous combinators to augment futures::StreamExt. Most Stream -/// combinators take asynchronous arguments, but often only simple predicates -/// are required to steer a Stream like an Iterator. This suite provides a -/// convenience to reduce boilerplate by de-cluttering non-async predicates. -/// -/// This interface is not necessarily complete; feel free to add as-needed. -pub trait ReadyExt -where - Self: Stream + Sized, -{ - fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> - where - F: Fn(Item) -> bool; - - fn ready_any(self, f: F) -> Any, impl FnMut(Item) -> Ready> - where - F: Fn(Item) -> bool; - - fn ready_find<'a, F>(self, f: F) -> impl Future> + Send - where - Self: Send + Unpin + 'a, - F: Fn(&Item) -> bool + Send + 'a, - Item: Send; - - fn ready_filter<'a, F>( - self, - f: F, - ) -> Filter, impl FnMut(&Item) -> Ready + 'a> - where - F: Fn(&Item) -> bool + 'a; - - fn ready_filter_map( - self, - f: F, - ) -> FilterMap>, impl FnMut(Item) -> Ready>> - where - F: Fn(Item) -> Option; - - fn ready_fold( - self, - init: T, - f: F, - ) -> Fold, T, impl FnMut(T, Item) -> Ready> - where - F: Fn(T, Item) -> T; - - fn ready_fold_default( - self, - f: F, - ) -> Fold, T, impl FnMut(T, Item) -> Ready> - where - F: Fn(T, Item) -> T, - T: Default; - - fn ready_for_each(self, f: F) -> ForEach, impl FnMut(Item) -> Ready<()>> - where - F: FnMut(Item); - - fn ready_take_while<'a, F>( - self, - f: F, - ) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> - where - F: Fn(&Item) -> bool + 'a; - - fn ready_scan( - self, - init: T, - f: F, - ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> - where - F: Fn(&mut T, Item) -> Option; - - fn ready_scan_each( - self, - init: T, - f: F, - ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> - where - F: Fn(&mut T, &Item); - - fn ready_skip_while<'a, F>( - self, - f: F, - ) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> - where - F: Fn(&Item) -> bool + 'a; -} - -impl ReadyExt for S -where - S: Stream + Sized, -{ - #[inline] - fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> - where - F: Fn(Item) -> bool, - { - self.all(move |t| ready(f(t))) - } - - #[inline] - fn ready_any(self, f: F) -> Any, impl FnMut(Item) -> Ready> - where - F: Fn(Item) -> bool, - { - self.any(move |t| ready(f(t))) - } - - #[inline] - fn ready_find<'a, F>(self, f: F) -> impl Future> + Send - where - Self: Send + Unpin + 'a, - F: Fn(&Item) -> bool + Send + 'a, - Item: Send, - { - self.ready_filter(f) - .take(1) - .into_future() - .map(|(curr, _next)| curr) - } - - #[inline] - fn ready_filter<'a, F>( - self, - f: F, - ) -> Filter, impl FnMut(&Item) -> Ready + 'a> - where - F: Fn(&Item) -> bool + 'a, - { - self.filter(move |t| ready(f(t))) - } - - #[inline] - fn ready_filter_map( - self, - f: F, - ) -> FilterMap>, impl FnMut(Item) -> Ready>> - where - F: Fn(Item) -> Option, - { - self.filter_map(move |t| ready(f(t))) - } - - #[inline] - fn ready_fold( - self, - init: T, - f: F, - ) -> Fold, T, impl FnMut(T, Item) -> Ready> - where - F: Fn(T, Item) -> T, - { - self.fold(init, move |a, t| ready(f(a, t))) - } - - #[inline] - fn ready_fold_default( - self, - f: F, - ) -> Fold, T, impl FnMut(T, Item) -> Ready> - where - F: Fn(T, Item) -> T, - T: Default, - { - self.ready_fold(T::default(), f) - } - - #[inline] - #[allow(clippy::unit_arg)] - fn ready_for_each( - self, - mut f: F, - ) -> ForEach, impl FnMut(Item) -> Ready<()>> - where - F: FnMut(Item), - { - self.for_each(move |t| ready(f(t))) - } - - #[inline] - fn ready_take_while<'a, F>( - self, - f: F, - ) -> TakeWhile, impl FnMut(&Item) -> Ready + 'a> - where - F: Fn(&Item) -> bool + 'a, - { - self.take_while(move |t| ready(f(t))) - } - - #[inline] - fn ready_scan( - self, - init: T, - f: F, - ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> - where - F: Fn(&mut T, Item) -> Option, - { - self.scan(init, move |s, t| ready(f(s, t))) - } - - #[inline] - fn ready_scan_each( - self, - init: T, - f: F, - ) -> Scan>, impl FnMut(&mut T, Item) -> Ready>> - where - F: Fn(&mut T, &Item), - { - self.ready_scan(init, move |s, t| { - f(s, &t); - Some(t) - }) - } - - #[inline] - fn ready_skip_while<'a, F>( - self, - f: F, - ) -> SkipWhile, impl FnMut(&Item) -> Ready + 'a> - where - F: Fn(&Item) -> bool + 'a, - { - self.skip_while(move |t| ready(f(t))) - } -} diff --git a/src/core/utils/stream/tools.rs b/src/core/utils/stream/tools.rs deleted file mode 100644 index 7b24642e..00000000 --- a/src/core/utils/stream/tools.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! StreamTools for futures::Stream - -use std::{collections::HashMap, hash::Hash}; - -use futures::{Future, Stream, StreamExt}; - -use super::ReadyExt; -use crate::expected; - -/// StreamTools -/// -/// This interface is not necessarily complete; feel free to add as-needed. -pub trait Tools -where - Self: Stream + Send + Sized, - ::Item: Send, -{ - fn counts(self) -> impl Future> + Send - where - ::Item: Eq + Hash; - - fn counts_by(self, f: F) -> impl Future> + Send - where - F: Fn(Item) -> K + Send, - K: Eq + Hash + Send; - - fn counts_by_with_cap( - self, - f: F, - ) -> impl Future> + Send - where - F: Fn(Item) -> K + Send, - K: Eq + Hash + Send; - - fn counts_with_cap( - self, - ) -> impl Future> + Send - where - ::Item: Eq + Hash; - - fn fold_default(self, f: F) -> impl Future + Send - where - F: Fn(T, Item) -> Fut + Send, - Fut: Future + Send, - T: Default + Send; -} - -impl Tools for S -where - S: Stream + Send + Sized, - ::Item: Send, -{ - #[inline] - fn counts(self) -> impl Future> + Send - where - ::Item: Eq + Hash, - { - self.counts_with_cap::<0>() - } - - #[inline] - fn counts_by(self, f: F) -> impl Future> + Send - where - F: Fn(Item) -> K + Send, - K: Eq + Hash + Send, - { - self.counts_by_with_cap::<0, K, F>(f) - } - - #[inline] - fn counts_by_with_cap( - self, - f: F, - ) -> impl Future> + Send - where - F: Fn(Item) -> K + Send, - K: Eq + Hash + Send, - { - self.map(f).counts_with_cap::() - } - - #[inline] - fn counts_with_cap( - self, - ) -> impl Future> + Send - where - ::Item: Eq + Hash, - { - self.ready_fold(HashMap::with_capacity(CAP), |mut counts, item| { - let entry = counts.entry(item).or_default(); - let value = *entry; - *entry = expected!(value + 1); - counts - }) - } - - #[inline] - fn fold_default(self, f: F) -> impl Future + Send - where - F: Fn(T, Item) -> Fut + Send, - Fut: Future + Send, - T: Default + Send, - { - self.fold(T::default(), f) - } -} diff --git a/src/core/utils/stream/try_broadband.rs b/src/core/utils/stream/try_broadband.rs deleted file mode 100644 index 361b4a92..00000000 --- a/src/core/utils/stream/try_broadband.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Synchronous combinator extensions to futures::TryStream - -use futures::{TryFuture, TryStream, TryStreamExt}; - -use super::automatic_width; -use crate::Result; - -/// Concurrency extensions to augment futures::TryStreamExt. broad_ combinators -/// produce out-of-order -pub trait TryBroadbandExt -where - Self: TryStream> + Send + Sized, -{ - fn broadn_and_then( - self, - n: N, - f: F, - ) -> impl TryStream> + Send - where - N: Into>, - F: Fn(Self::Ok) -> Fut + Send, - Fut: TryFuture> + Send; - - fn broad_and_then( - self, - f: F, - ) -> impl TryStream> + Send - where - F: Fn(Self::Ok) -> Fut + Send, - Fut: TryFuture> + Send, - { - self.broadn_and_then(None, f) - } -} - -impl TryBroadbandExt for S -where - S: TryStream> + Send + Sized, -{ - fn broadn_and_then( - self, - n: N, - f: F, - ) -> impl TryStream> + Send - where - N: Into>, - F: Fn(Self::Ok) -> Fut + Send, - Fut: TryFuture> + Send, - { - self.map_ok(f) - .try_buffer_unordered(n.into().unwrap_or_else(automatic_width)) - } -} diff --git a/src/core/utils/stream/try_parallel.rs b/src/core/utils/stream/try_parallel.rs deleted file mode 100644 index 60fef0ae..00000000 --- a/src/core/utils/stream/try_parallel.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Parallelism stream combinator extensions to futures::Stream - -use futures::{TryFutureExt, stream::TryStream}; -use tokio::{runtime, task::JoinError}; - -use super::TryBroadbandExt; -use crate::{Error, Result, utils::sys::available_parallelism}; - -/// Parallelism extensions to augment futures::StreamExt. These combinators are -/// for computation-oriented workloads, unlike -band combinators for I/O -/// workloads; these default to the available compute parallelism for the -/// system. Threads are currently drawn from the tokio-spawn pool. Results are -/// unordered. -pub trait TryParallelExt -where - Self: TryStream> + Send + Sized, - E: From + From + Send + 'static, - T: Send + 'static, -{ - fn paralleln_and_then( - self, - h: H, - n: N, - f: F, - ) -> impl TryStream> + Send - where - N: Into>, - H: Into>, - F: Fn(Self::Ok) -> Result + Clone + Send + 'static, - U: Send + 'static; - - fn parallel_and_then( - self, - h: H, - f: F, - ) -> impl TryStream> + Send - where - H: Into>, - F: Fn(Self::Ok) -> Result + Clone + Send + 'static, - U: Send + 'static, - { - self.paralleln_and_then(h, None, f) - } -} - -impl TryParallelExt for S -where - S: TryStream> + Send + Sized, - E: From + From + Send + 'static, - T: Send + 'static, -{ - fn paralleln_and_then( - self, - h: H, - n: N, - f: F, - ) -> impl TryStream> + Send - where - N: Into>, - H: Into>, - F: Fn(Self::Ok) -> Result + Clone + Send + 'static, - U: Send + 'static, - { - let n = n.into().unwrap_or_else(available_parallelism); - let h = h.into().unwrap_or_else(runtime::Handle::current); - self.broadn_and_then(n, move |val| { - let (h, f) = (h.clone(), f.clone()); - async move { h.spawn_blocking(move || f(val)).map_err(E::from).await? } - }) - } -} diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs deleted file mode 100644 index 287fa1e1..00000000 --- a/src/core/utils/stream/try_ready.rs +++ /dev/null @@ -1,143 +0,0 @@ -//! Synchronous combinator extensions to futures::TryStream -#![allow(clippy::type_complexity)] - -use futures::{ - future::{Ready, ready}, - stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt, TryTakeWhile}, -}; - -use crate::Result; - -/// Synchronous combinators to augment futures::TryStreamExt. -/// -/// This interface is not necessarily complete; feel free to add as-needed. -pub trait TryReadyExt -where - S: TryStream> + ?Sized, - Self: TryStream + Sized, -{ - fn ready_and_then( - self, - f: F, - ) -> AndThen>, impl FnMut(S::Ok) -> Ready>> - where - F: Fn(S::Ok) -> Result; - - fn ready_try_filter_map( - self, - f: F, - ) -> TryFilterMap< - Self, - Ready, E>>, - impl FnMut(S::Ok) -> Ready, E>>, - > - where - F: Fn(S::Ok) -> Result, E>; - - fn ready_try_fold( - self, - init: U, - f: F, - ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> - where - F: Fn(U, S::Ok) -> Result; - - fn ready_try_fold_default( - self, - f: F, - ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> - where - F: Fn(U, S::Ok) -> Result, - U: Default; - - fn ready_try_for_each( - self, - f: F, - ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> - where - F: FnMut(S::Ok) -> Result<(), E>; - - fn ready_try_take_while( - self, - f: F, - ) -> TryTakeWhile>, impl FnMut(&S::Ok) -> Ready>> - where - F: Fn(&S::Ok) -> Result; -} - -impl TryReadyExt for S -where - S: TryStream> + ?Sized, - Self: TryStream + Sized, -{ - #[inline] - fn ready_and_then( - self, - f: F, - ) -> AndThen>, impl FnMut(S::Ok) -> Ready>> - where - F: Fn(S::Ok) -> Result, - { - self.and_then(move |t| ready(f(t))) - } - - fn ready_try_filter_map( - self, - f: F, - ) -> TryFilterMap< - Self, - Ready, E>>, - impl FnMut(S::Ok) -> Ready, E>>, - > - where - F: Fn(S::Ok) -> Result, E>, - { - self.try_filter_map(move |t| ready(f(t))) - } - - #[inline] - fn ready_try_fold( - self, - init: U, - f: F, - ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> - where - F: Fn(U, S::Ok) -> Result, - { - self.try_fold(init, move |a, t| ready(f(a, t))) - } - - #[inline] - fn ready_try_fold_default( - self, - f: F, - ) -> TryFold>, U, impl FnMut(U, S::Ok) -> Ready>> - where - F: Fn(U, S::Ok) -> Result, - U: Default, - { - self.ready_try_fold(U::default(), f) - } - - #[inline] - fn ready_try_for_each( - self, - mut f: F, - ) -> TryForEach>, impl FnMut(S::Ok) -> Ready>> - where - F: FnMut(S::Ok) -> Result<(), E>, - { - self.try_for_each(move |t| ready(f(t))) - } - - #[inline] - fn ready_try_take_while( - self, - f: F, - ) -> TryTakeWhile>, impl FnMut(&S::Ok) -> Ready>> - where - F: Fn(&S::Ok) -> Result, - { - self.try_take_while(move |t| ready(f(t))) - } -} diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs deleted file mode 100644 index 417806fc..00000000 --- a/src/core/utils/stream/try_tools.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! TryStreamTools for futures::TryStream -#![allow(clippy::type_complexity)] - -use futures::{TryStream, TryStreamExt, future, future::Ready, stream::TryTakeWhile}; - -use crate::Result; - -/// TryStreamTools -pub trait TryTools -where - S: TryStream> + ?Sized, - Self: TryStream + Sized, -{ - fn try_take( - self, - n: usize, - ) -> TryTakeWhile< - Self, - Ready>, - impl FnMut(&S::Ok) -> Ready>, - >; -} - -impl TryTools for S -where - S: TryStream> + ?Sized, - Self: TryStream + Sized, -{ - #[inline] - fn try_take( - self, - mut n: usize, - ) -> TryTakeWhile< - Self, - Ready>, - impl FnMut(&S::Ok) -> Ready>, - > { - self.try_take_while(move |_| { - let res = future::ok(n > 0); - n = n.saturating_sub(1); - res - }) - } -} diff --git a/src/core/utils/stream/try_wideband.rs b/src/core/utils/stream/try_wideband.rs deleted file mode 100644 index 0af3c8ec..00000000 --- a/src/core/utils/stream/try_wideband.rs +++ /dev/null @@ -1,57 +0,0 @@ -//! Synchronous combinator extensions to futures::TryStream - -use futures::{TryFuture, TryStream, TryStreamExt}; - -use super::automatic_width; -use crate::Result; - -/// Concurrency extensions to augment futures::TryStreamExt. wide_ combinators -/// produce in-order results -pub trait TryWidebandExt -where - Self: TryStream> + Send + Sized, -{ - fn widen_and_then( - self, - n: N, - f: F, - ) -> impl TryStream> + Send - where - N: Into>, - F: Fn(Self::Ok) -> Fut + Send, - Fut: TryFuture> + Send, - U: Send; - - fn wide_and_then( - self, - f: F, - ) -> impl TryStream> + Send - where - F: Fn(Self::Ok) -> Fut + Send, - Fut: TryFuture> + Send, - U: Send, - { - self.widen_and_then(None, f) - } -} - -impl TryWidebandExt for S -where - S: TryStream> + Send + Sized, - E: Send, -{ - fn widen_and_then( - self, - n: N, - f: F, - ) -> impl TryStream> + Send - where - N: Into>, - F: Fn(Self::Ok) -> Fut + Send, - Fut: TryFuture> + Send, - U: Send, - { - self.map_ok(f) - .try_buffered(n.into().unwrap_or_else(automatic_width)) - } -} diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs deleted file mode 100644 index cbebf610..00000000 --- a/src/core/utils/stream/wideband.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! Wideband stream combinator extensions to futures::Stream - -use std::convert::identity; - -use futures::{ - Future, - stream::{Stream, StreamExt}, -}; - -use super::{ReadyExt, automatic_width}; - -/// Concurrency extensions to augment futures::StreamExt. wideband_ combinators -/// produce in-order. -pub trait WidebandExt -where - Self: Stream + Send + Sized, -{ - /// Concurrent filter_map(); ordered results - fn widen_filter_map(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future> + Send, - U: Send; - - fn widen_then(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - U: Send; - - #[inline] - fn wide_filter_map(self, f: F) -> impl Stream + Send - where - F: Fn(Item) -> Fut + Send, - Fut: Future> + Send, - U: Send, - { - self.widen_filter_map(None, f) - } - - #[inline] - fn wide_then(self, f: F) -> impl Stream + Send - where - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - U: Send, - { - self.widen_then(None, f) - } -} - -impl WidebandExt for S -where - S: Stream + Send + Sized, -{ - #[inline] - fn widen_filter_map(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future> + Send, - U: Send, - { - self.map(f) - .buffered(n.into().unwrap_or_else(automatic_width)) - .ready_filter_map(identity) - } - - #[inline] - fn widen_then(self, n: N, f: F) -> impl Stream + Send - where - N: Into>, - F: Fn(Item) -> Fut + Send, - Fut: Future + Send, - U: Send, - { - self.map(f) - .buffered(n.into().unwrap_or_else(automatic_width)) - } -} diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs deleted file mode 100644 index 7d81903d..00000000 --- a/src/core/utils/string.rs +++ /dev/null @@ -1,116 +0,0 @@ -mod between; -mod split; -mod tests; -mod unquote; -mod unquoted; - -pub use self::{between::Between, split::SplitInfallible, unquote::Unquote, unquoted::Unquoted}; -use crate::{Result, utils::exchange}; - -pub const EMPTY: &str = ""; - -/// Constant expression to bypass format! if the argument is a string literal -/// but not a format string. If the literal is a format string then String is -/// returned otherwise the input (i.e. &'static str) is returned. If multiple -/// arguments are provided the first is assumed to be a format string. -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! format_maybe { - ($s:literal $(,)?) => { - if $crate::is_format!($s) { std::format!($s).into() } else { $s.into() } - }; - - ($s:literal, $($args:tt)+) => { - std::format!($s, $($args)+).into() - }; -} - -/// Constant expression to decide if a literal is a format string. Note: could -/// use some improvement. -#[macro_export] -#[collapse_debuginfo(yes)] -macro_rules! is_format { - ($s:literal) => { - ::const_str::contains!($s, "{") && ::const_str::contains!($s, "}") - }; - - ($($s:tt)+) => { - false - }; -} - -#[inline] -pub fn collect_stream(func: F) -> Result -where - F: FnOnce(&mut dyn std::fmt::Write) -> Result<()>, -{ - let mut out = String::new(); - func(&mut out)?; - Ok(out) -} - -#[inline] -#[must_use] -pub fn camel_to_snake_string(s: &str) -> String { - let est_len = s - .chars() - .fold(s.len(), |est, c| est.saturating_add(usize::from(c.is_ascii_uppercase()))); - - let mut ret = String::with_capacity(est_len); - camel_to_snake_case(&mut ret, s.as_bytes()).expect("string-to-string stream error"); - ret -} - -#[inline] -#[allow(clippy::unbuffered_bytes)] // these are allocated string utilities, not file I/O utils -pub fn camel_to_snake_case(output: &mut O, input: I) -> Result<()> -where - I: std::io::Read, - O: std::fmt::Write, -{ - let mut state = false; - input - .bytes() - .take_while(Result::is_ok) - .map(Result::unwrap) - .map(char::from) - .try_for_each(|ch| { - let m = ch.is_ascii_uppercase(); - let s = exchange(&mut state, !m); - if m && s { - output.write_char('_')?; - } - output.write_char(ch.to_ascii_lowercase())?; - Result::<()>::Ok(()) - }) -} - -/// Find the common prefix from a collection of strings and return a slice -/// ``` -/// use conduwuit_core::utils::string::common_prefix; -/// let input = ["conduwuit", "conduit", "construct"]; -/// common_prefix(&input) == "con"; -/// ``` -#[must_use] -#[allow(clippy::string_slice)] -pub fn common_prefix<'a>(choice: &'a [&str]) -> &'a str { - choice.first().map_or(EMPTY, move |best| { - choice.iter().skip(1).fold(*best, |best, choice| { - &best[0..choice - .char_indices() - .zip(best.char_indices()) - .take_while(|&(a, b)| a == b) - .count()] - }) - }) -} - -/// Parses the bytes into a string. -pub fn string_from_bytes(bytes: &[u8]) -> Result { - let str: &str = str_from_bytes(bytes)?; - Ok(str.to_owned()) -} - -/// Parses the bytes into a string. -#[inline] -pub fn str_from_bytes(bytes: &[u8]) -> Result<&str> { Ok(std::str::from_utf8(bytes)?) } diff --git a/src/core/utils/string/between.rs b/src/core/utils/string/between.rs deleted file mode 100644 index 05c137b4..00000000 --- a/src/core/utils/string/between.rs +++ /dev/null @@ -1,28 +0,0 @@ -type Delim<'a> = (&'a str, &'a str); - -/// Slice a string between a pair of delimeters. -pub trait Between<'a> { - /// Extract a string between the delimeters. If the delimeters were not - /// found None is returned, otherwise the first extraction is returned. - fn between(&self, delim: Delim<'_>) -> Option<&'a str>; - - /// Extract a string between the delimeters. If the delimeters were not - /// found the original string is returned; take note of this behavior, - /// if an empty slice is desired for this case use the fallible version and - /// unwrap to EMPTY. - fn between_infallible(&self, delim: Delim<'_>) -> &'a str; -} - -impl<'a> Between<'a> for &'a str { - #[inline] - fn between_infallible(&self, delim: Delim<'_>) -> &'a str { - self.between(delim).unwrap_or(self) - } - - #[inline] - fn between(&self, delim: Delim<'_>) -> Option<&'a str> { - self.split_once(delim.0) - .and_then(|(_, b)| b.rsplit_once(delim.1)) - .map(|(a, _)| a) - } -} diff --git a/src/core/utils/string/split.rs b/src/core/utils/string/split.rs deleted file mode 100644 index e643f51b..00000000 --- a/src/core/utils/string/split.rs +++ /dev/null @@ -1,26 +0,0 @@ -use super::EMPTY; - -type Pair<'a> = (&'a str, &'a str); - -/// Split a string with default behaviors on non-match. -pub trait SplitInfallible<'a> { - /// Split a string at the first occurrence of delim. If not found, the - /// entire string is returned in \[0\], while \[1\] is empty. - fn split_once_infallible(&self, delim: &str) -> Pair<'a>; - - /// Split a string from the last occurrence of delim. If not found, the - /// entire string is returned in \[0\], while \[1\] is empty. - fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a>; -} - -impl<'a> SplitInfallible<'a> for &'a str { - #[inline] - fn rsplit_once_infallible(&self, delim: &str) -> Pair<'a> { - self.rsplit_once(delim).unwrap_or((self, EMPTY)) - } - - #[inline] - fn split_once_infallible(&self, delim: &str) -> Pair<'a> { - self.split_once(delim).unwrap_or((self, EMPTY)) - } -} diff --git a/src/core/utils/string/tests.rs b/src/core/utils/string/tests.rs deleted file mode 100644 index e8c17de6..00000000 --- a/src/core/utils/string/tests.rs +++ /dev/null @@ -1,70 +0,0 @@ -#![cfg(test)] - -#[test] -fn common_prefix() { - let input = ["conduwuit", "conduit", "construct"]; - let output = super::common_prefix(&input); - assert_eq!(output, "con"); -} - -#[test] -fn common_prefix_empty() { - let input = ["abcdefg", "hijklmn", "opqrstu"]; - let output = super::common_prefix(&input); - assert_eq!(output, ""); -} - -#[test] -fn common_prefix_none() { - let input = []; - let output = super::common_prefix(&input); - assert_eq!(output, ""); -} - -#[test] -fn camel_to_snake_case_0() { - let res = super::camel_to_snake_string("CamelToSnakeCase"); - assert_eq!(res, "camel_to_snake_case"); -} - -#[test] -fn camel_to_snake_case_1() { - let res = super::camel_to_snake_string("CAmelTOSnakeCase"); - assert_eq!(res, "camel_tosnake_case"); -} - -#[test] -fn unquote() { - use super::Unquote; - - assert_eq!("\"foo\"".unquote(), Some("foo")); - assert_eq!("\"foo".unquote(), None); - assert_eq!("foo".unquote(), None); -} - -#[test] -fn unquote_infallible() { - use super::Unquote; - - assert_eq!("\"foo\"".unquote_infallible(), "foo"); - assert_eq!("\"foo".unquote_infallible(), "\"foo"); - assert_eq!("foo".unquote_infallible(), "foo"); -} - -#[test] -fn between() { - use super::Between; - - assert_eq!("\"foo\"".between(("\"", "\"")), Some("foo")); - assert_eq!("\"foo".between(("\"", "\"")), None); - assert_eq!("foo".between(("\"", "\"")), None); -} - -#[test] -fn between_infallible() { - use super::Between; - - assert_eq!("\"foo\"".between_infallible(("\"", "\"")), "foo"); - assert_eq!("\"foo".between_infallible(("\"", "\"")), "\"foo"); - assert_eq!("foo".between_infallible(("\"", "\"")), "foo"); -} diff --git a/src/core/utils/string/unquote.rs b/src/core/utils/string/unquote.rs deleted file mode 100644 index ea7ddbf9..00000000 --- a/src/core/utils/string/unquote.rs +++ /dev/null @@ -1,35 +0,0 @@ -const QUOTE: char = '"'; - -/// Slice a string between quotes -pub trait Unquote<'a> { - /// Whether the input is quoted. If this is false the fallible methods of - /// this interface will fail. - fn is_quoted(&self) -> bool; - - /// Unquotes a string. If the input is not quoted it is simply returned - /// as-is. If the input is partially quoted on either end that quote is not - /// removed. - fn unquote(&self) -> Option<&'a str>; - - /// Unquotes a string. The input must be quoted on each side for Some to be - /// returned - fn unquote_infallible(&self) -> &'a str; -} - -impl<'a> Unquote<'a> for &'a str { - #[inline] - fn unquote_infallible(&self) -> &'a str { - self.strip_prefix(QUOTE) - .unwrap_or(self) - .strip_suffix(QUOTE) - .unwrap_or(self) - } - - #[inline] - fn unquote(&self) -> Option<&'a str> { - self.strip_prefix(QUOTE).and_then(|s| s.strip_suffix(QUOTE)) - } - - #[inline] - fn is_quoted(&self) -> bool { self.starts_with(QUOTE) && self.ends_with(QUOTE) } -} diff --git a/src/core/utils/string/unquoted.rs b/src/core/utils/string/unquoted.rs deleted file mode 100644 index 88fa011f..00000000 --- a/src/core/utils/string/unquoted.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::ops::Deref; - -use serde::{Deserialize, Deserializer, de}; - -use super::Unquote; -use crate::{Result, err}; - -/// Unquoted string which deserialized from a quoted string. Construction from a -/// &str is infallible such that the input can already be unquoted. Construction -/// from serde deserialization is fallible and the input must be quoted. -#[repr(transparent)] -pub struct Unquoted(str); - -impl<'a> Unquoted { - #[inline] - #[must_use] - pub fn as_str(&'a self) -> &'a str { &self.0 } -} - -impl<'a, 'de: 'a> Deserialize<'de> for &'a Unquoted { - fn deserialize>(deserializer: D) -> Result { - let s = <&'a str>::deserialize(deserializer)?; - s.is_quoted() - .then_some(s) - .ok_or(err!(SerdeDe("expected quoted string"))) - .map_err(de::Error::custom) - .map(Into::into) - } -} - -impl<'a> From<&'a str> for &'a Unquoted { - fn from(s: &'a str) -> &'a Unquoted { - let s: &'a str = s.unquote_infallible(); - - //SAFETY: This is a pattern I lifted from ruma-identifiers for strong-type strs - // by wrapping in a tuple-struct. - #[allow(clippy::transmute_ptr_to_ptr)] - unsafe { - std::mem::transmute(s) - } - } -} - -impl Deref for Unquoted { - type Target = str; - - fn deref(&self) -> &Self::Target { &self.0 } -} - -impl<'a> AsRef for &'a Unquoted { - fn as_ref(&self) -> &'a str { &self.0 } -} diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs deleted file mode 100644 index f795ccb8..00000000 --- a/src/core/utils/sys.rs +++ /dev/null @@ -1,56 +0,0 @@ -pub mod compute; -pub mod storage; - -use std::path::PathBuf; - -pub use compute::available_parallelism; - -use crate::{Result, debug}; - -/// This is needed for opening lots of file descriptors, which tends to -/// happen more often when using RocksDB and making lots of federation -/// connections at startup. The soft limit is usually 1024, and the hard -/// limit is usually 512000; I've personally seen it hit >2000. -/// -/// * -/// * -#[cfg(unix)] -pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { - use nix::sys::resource::{Resource::RLIMIT_NOFILE as NOFILE, getrlimit, setrlimit}; - - let (soft_limit, hard_limit) = getrlimit(NOFILE)?; - if soft_limit < hard_limit { - setrlimit(NOFILE, hard_limit, hard_limit)?; - assert_eq!((hard_limit, hard_limit), getrlimit(NOFILE)?, "getrlimit != setrlimit"); - debug!(to = hard_limit, from = soft_limit, "Raised RLIMIT_NOFILE",); - } - - Ok(()) -} - -/// Return a possibly corrected std::env::current_exe() even if the path is -/// marked deleted. -/// -/// # Safety -/// This function is declared unsafe because the original result was altered for -/// security purposes, and altering it back ignores those urposes and should be -/// understood by the user. -pub unsafe fn current_exe() -> Result { - let exe = std::env::current_exe()?; - match exe.to_str() { - | None => Ok(exe), - | Some(str) => Ok(str - .strip_suffix(" (deleted)") - .map(PathBuf::from) - .unwrap_or(exe)), - } -} - -/// Determine if the server's executable was removed or replaced. This is a -/// specific check; useful for successful restarts. May not be available or -/// accurate on all platforms; defaults to false. -#[must_use] -pub fn current_exe_deleted() -> bool { - std::env::current_exe() - .is_ok_and(|exe| exe.to_str().is_some_and(|exe| exe.ends_with(" (deleted)"))) -} diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs deleted file mode 100644 index 5274cd66..00000000 --- a/src/core/utils/sys/compute.rs +++ /dev/null @@ -1,174 +0,0 @@ -//! System utilities related to compute/processing - -use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock}; - -use crate::{Result, is_equal_to}; - -type Id = usize; - -type Mask = u128; -type Masks = [Mask; MASK_BITS]; - -const MASK_BITS: usize = 128; - -/// The mask of logical cores available to the process (at startup). -static CORES_AVAILABLE: LazyLock = LazyLock::new(|| into_mask(query_cores_available())); - -/// Stores the mask of logical-cores with thread/HT/SMT association. Each group -/// here makes up a physical-core. -static SMT_TOPOLOGY: LazyLock = LazyLock::new(init_smt_topology); - -/// Stores the mask of logical-core associations on a node/socket. Bits are set -/// for all logical cores within all physical cores of the node. -static NODE_TOPOLOGY: LazyLock = LazyLock::new(init_node_topology); - -thread_local! { - /// Tracks the affinity for this thread. This is updated when affinities - /// are set via our set_affinity() interface. - static CORE_AFFINITY: Cell = Cell::default(); -} - -/// Set the core affinity for this thread. The ID should be listed in -/// CORES_AVAILABLE. Empty input is a no-op; prior affinity unchanged. -#[tracing::instrument( - level = "debug", - skip_all, - fields( - id = ?std::thread::current().id(), - name = %std::thread::current().name().unwrap_or("None"), - set = ?ids.clone().collect::>(), - CURRENT = %format!("[b{:b}]", CORE_AFFINITY.get()), - AVAILABLE = %format!("[b{:b}]", *CORES_AVAILABLE), - ), -)] -pub fn set_affinity(mut ids: I) -where - I: Iterator + Clone + Debug, -{ - use core_affinity::{CoreId, set_each_for_current, set_for_current}; - - let n = ids.clone().count(); - let mask: Mask = ids.clone().fold(0, |mask, id| { - debug_assert!(is_core_available(id), "setting affinity to unavailable core"); - mask | (1 << id) - }); - - if n > 1 { - set_each_for_current(ids.map(|id| CoreId { id })); - } else if n > 0 { - set_for_current(CoreId { id: ids.next().expect("n > 0") }); - } - - if mask.count_ones() > 0 { - CORE_AFFINITY.replace(mask); - } -} - -/// Get the core affinity for this thread. -pub fn get_affinity() -> impl Iterator { from_mask(CORE_AFFINITY.get()) } - -/// List the cores sharing SMT-tier resources -pub fn smt_siblings() -> impl Iterator { - from_mask(get_affinity().fold(0_u128, |mask, id| { - mask | SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus") - })) -} - -/// List the cores sharing Node-tier resources relative to this threads current -/// affinity. -pub fn node_siblings() -> impl Iterator { - from_mask(get_affinity().fold(0_u128, |mask, id| { - mask | NODE_TOPOLOGY.get(id).expect("Id must not exceed max cpus") - })) -} - -/// Get the cores sharing SMT resources relative to id. -#[inline] -pub fn smt_affinity(id: Id) -> impl Iterator { - from_mask(*SMT_TOPOLOGY.get(id).expect("ID must not exceed max cpus")) -} - -/// Get the cores sharing Node resources relative to id. -#[inline] -pub fn node_affinity(id: Id) -> impl Iterator { - from_mask(*NODE_TOPOLOGY.get(id).expect("ID must not exceed max cpus")) -} - -/// Get the number of threads which could execute in parallel based on hardware -/// constraints of this system. -#[inline] -#[must_use] -pub fn available_parallelism() -> usize { cores_available().count() } - -/// Gets the ID of the nth core available. This bijects our sequence of cores to -/// actual ID's which may have gaps for cores which are not available. -#[inline] -#[must_use] -pub fn nth_core_available(i: usize) -> Option { cores_available().nth(i) } - -/// Determine if core (by id) is available to the process. -#[inline] -#[must_use] -pub fn is_core_available(id: Id) -> bool { cores_available().any(is_equal_to!(id)) } - -/// Get the list of cores available. The values were recorded at program start. -#[inline] -pub fn cores_available() -> impl Iterator { from_mask(*CORES_AVAILABLE) } - -#[cfg(target_os = "linux")] -#[inline] -pub fn getcpu() -> Result { - use crate::{Error, utils::math}; - - // SAFETY: This is part of an interface with many low-level calls taking many - // raw params, but it's unclear why this specific call is unsafe. Nevertheless - // the value obtained here is semantically unsafe because it can change on the - // instruction boundary trailing its own acquisition and also any other time. - let ret: i32 = unsafe { libc::sched_getcpu() }; - - #[cfg(target_os = "linux")] - // SAFETY: On modern linux systems with a vdso if we can optimize away the branch checking - // for error (see getcpu(2)) then this system call becomes a memory access. - unsafe { - std::hint::assert_unchecked(ret >= 0); - }; - - if ret == -1 { - return Err(Error::from_errno()); - } - - math::try_into(ret) -} - -#[cfg(not(target_os = "linux"))] -#[inline] -pub fn getcpu() -> Result { Err(crate::Error::Io(std::io::ErrorKind::Unsupported.into())) } - -fn query_cores_available() -> impl Iterator { - core_affinity::get_core_ids() - .unwrap_or_default() - .into_iter() - .map(|core_id| core_id.id) -} - -fn init_smt_topology() -> [Mask; MASK_BITS] { [Mask::default(); MASK_BITS] } - -fn init_node_topology() -> [Mask; MASK_BITS] { [Mask::default(); MASK_BITS] } - -fn into_mask(ids: I) -> Mask -where - I: Iterator, -{ - ids.inspect(|&id| { - debug_assert!(id < MASK_BITS, "Core ID must be < Mask::BITS at least for now"); - }) - .fold(Mask::default(), |mask, id| mask | (1 << id)) -} - -fn from_mask(v: Mask) -> impl Iterator { - (0..MASK_BITS).filter(move |&i| (v & (1 << i)) != 0) -} - -fn _sys_path(id: usize, suffix: &str) -> PathBuf { - format!("/sys/devices/system/cpu/cpu{id}/{suffix}").into() -} diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs deleted file mode 100644 index b71c3437..00000000 --- a/src/core/utils/sys/storage.rs +++ /dev/null @@ -1,133 +0,0 @@ -//! System utilities related to devices/peripherals - -use std::{ - ffi::OsStr, - fs, - fs::{FileType, read_to_string}, - iter::IntoIterator, - path::{Path, PathBuf}, -}; - -use libc::dev_t; - -use crate::{ - Result, - result::FlatOk, - utils::{result::LogDebugErr, string::SplitInfallible}, -}; - -/// Device characteristics useful for random access throughput -#[derive(Clone, Debug, Default)] -pub struct Parallelism { - /// Number of requests for the device. - pub nr_requests: Option, - - /// Individual queue characteristics. - pub mq: Vec, -} - -/// Device queue characteristics -#[derive(Clone, Debug, Default)] -pub struct Queue { - /// Queue's indice. - pub id: usize, - - /// Number of requests for the queue. - pub nr_tags: Option, - - /// CPU affinities for the queue. - pub cpu_list: Vec, -} - -/// Get device characteristics useful for random access throughput by name. -#[must_use] -pub fn parallelism(path: &Path) -> Parallelism { - let dev_id = dev_from_path(path).log_debug_err().unwrap_or_default(); - - let mq_path = block_path(dev_id).join("mq/"); - - let nr_requests_path = block_path(dev_id).join("queue/nr_requests"); - - Parallelism { - nr_requests: read_to_string(&nr_requests_path) - .ok() - .as_deref() - .map(str::trim) - .map(str::parse) - .flat_ok(), - - mq: fs::read_dir(&mq_path) - .into_iter() - .flat_map(IntoIterator::into_iter) - .filter_map(Result::ok) - .filter(|entry| entry.file_type().as_ref().is_ok_and(FileType::is_dir)) - .map(|dir| queue_parallelism(&dir.path())) - .collect(), - } -} - -/// Get device queue characteristics by mq path on sysfs(5) -fn queue_parallelism(dir: &Path) -> Queue { - let queue_id = dir.file_name(); - - let nr_tags_path = dir.join("nr_tags"); - - let cpu_list_path = dir.join("cpu_list"); - - Queue { - id: queue_id - .and_then(OsStr::to_str) - .map(str::parse) - .flat_ok() - .expect("queue has some numerical identifier"), - - nr_tags: read_to_string(&nr_tags_path) - .ok() - .as_deref() - .map(str::trim) - .map(str::parse) - .flat_ok(), - - cpu_list: read_to_string(&cpu_list_path) - .iter() - .flat_map(|list| list.trim().split(',')) - .map(str::trim) - .map(str::parse) - .filter_map(Result::ok) - .collect(), - } -} - -/// Get the name of the block device on which Path is mounted. -pub fn name_from_path(path: &Path) -> Result { - use std::io::{Error, ErrorKind::NotFound}; - - let (major, minor) = dev_from_path(path)?; - let path = block_path((major, minor)).join("uevent"); - read_to_string(path) - .iter() - .map(String::as_str) - .flat_map(str::lines) - .map(|line| line.split_once_infallible("=")) - .find_map(|(key, val)| (key == "DEVNAME").then_some(val)) - .ok_or_else(|| Error::new(NotFound, "DEVNAME not found.")) - .map_err(Into::into) - .map(Into::into) -} - -/// Get the (major, minor) of the block device on which Path is mounted. -#[allow(clippy::useless_conversion, clippy::unnecessary_fallible_conversions)] -fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { - #[cfg(target_family = "unix")] - use std::os::unix::fs::MetadataExt; - - let stat = fs::metadata(path)?; - let dev_id = stat.dev().try_into()?; - let (major, minor) = (libc::major(dev_id), libc::minor(dev_id)); - - Ok((major.try_into()?, minor.try_into()?)) -} - -fn block_path((major, minor): (dev_t, dev_t)) -> PathBuf { - format!("/sys/dev/block/{major}:{minor}/").into() -} diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs deleted file mode 100644 index 05a0655b..00000000 --- a/src/core/utils/tests.rs +++ /dev/null @@ -1,278 +0,0 @@ -#![allow(clippy::disallowed_methods)] - -use crate::utils; - -#[test] -fn increment_none() { - let bytes: [u8; 8] = utils::increment(None); - let res = u64::from_be_bytes(bytes); - assert_eq!(res, 1); -} - -#[test] -fn increment_fault() { - let start: u8 = 127; - let bytes: [u8; 1] = start.to_be_bytes(); - let bytes: [u8; 8] = utils::increment(Some(&bytes)); - let res = u64::from_be_bytes(bytes); - assert_eq!(res, 1); -} - -#[test] -fn increment_norm() { - let start: u64 = 1_234_567; - let bytes: [u8; 8] = start.to_be_bytes(); - let bytes: [u8; 8] = utils::increment(Some(&bytes)); - let res = u64::from_be_bytes(bytes); - assert_eq!(res, 1_234_568); -} - -#[test] -fn increment_wrap() { - let start = u64::MAX; - let bytes: [u8; 8] = start.to_be_bytes(); - let bytes: [u8; 8] = utils::increment(Some(&bytes)); - let res = u64::from_be_bytes(bytes); - assert_eq!(res, 0); -} - -#[test] -fn checked_add() { - use crate::checked; - - let a = 1234; - let res = checked!(a + 1).unwrap(); - assert_eq!(res, 1235); -} - -#[test] -#[should_panic(expected = "overflow")] -fn checked_add_overflow() { - use crate::checked; - - let a = u64::MAX; - let res = checked!(a + 1).expect("overflow"); - assert_eq!(res, 0); -} - -#[tokio::test] -async fn mutex_map_cleanup() { - use crate::utils::MutexMap; - - let map = MutexMap::::new(); - - let lock = map.lock("foo").await; - assert!(!map.is_empty(), "map must not be empty"); - - drop(lock); - assert!(map.is_empty(), "map must be empty"); -} - -#[tokio::test] -async fn mutex_map_contend() { - use std::sync::Arc; - - use tokio::sync::Barrier; - - use crate::utils::MutexMap; - - let map = Arc::new(MutexMap::::new()); - let seq = Arc::new([Barrier::new(2), Barrier::new(2)]); - let str = "foo".to_owned(); - - let seq_ = seq.clone(); - let map_ = map.clone(); - let str_ = str.clone(); - let join_a = tokio::spawn(async move { - let _lock = map_.lock(&str_).await; - assert!(!map_.is_empty(), "A0 must not be empty"); - seq_[0].wait().await; - assert!(map_.contains(&str_), "A1 must contain key"); - }); - - let seq_ = seq.clone(); - let map_ = map.clone(); - let str_ = str.clone(); - let join_b = tokio::spawn(async move { - let _lock = map_.lock(&str_).await; - assert!(!map_.is_empty(), "B0 must not be empty"); - seq_[1].wait().await; - assert!(map_.contains(&str_), "B1 must contain key"); - }); - - seq[0].wait().await; - assert!(map.contains(&str), "Must contain key"); - seq[1].wait().await; - - tokio::try_join!(join_b, join_a).expect("joined"); - assert!(map.is_empty(), "Must be empty"); -} - -#[test] -#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] -fn set_intersection_none() { - use utils::set::intersection; - - let a: [&str; 0] = []; - let b: [&str; 0] = []; - let i = [a.iter(), b.iter()]; - let r = intersection(i.into_iter()); - assert_eq!(r.count(), 0); - - let a: [&str; 0] = []; - let b = ["abc", "def"]; - let i = [a.iter(), b.iter()]; - let r = intersection(i.into_iter()); - assert_eq!(r.count(), 0); - let i = [b.iter(), a.iter()]; - let r = intersection(i.into_iter()); - assert_eq!(r.count(), 0); - let i = [a.iter()]; - let r = intersection(i.into_iter()); - assert_eq!(r.count(), 0); - - let a = ["foo", "bar", "baz"]; - let b = ["def", "hij", "klm", "nop"]; - let i = [a.iter(), b.iter()]; - let r = intersection(i.into_iter()); - assert_eq!(r.count(), 0); -} - -#[test] -#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] -fn set_intersection_all() { - use utils::set::intersection; - - let a = ["foo"]; - let b = ["foo"]; - let i = [a.iter(), b.iter()]; - let r = intersection(i.into_iter()); - assert!(r.eq(["foo"].iter())); - - let a = ["foo", "bar"]; - let b = ["bar", "foo"]; - let i = [a.iter(), b.iter()]; - let r = intersection(i.into_iter()); - assert!(r.eq(["foo", "bar"].iter())); - let i = [b.iter()]; - let r = intersection(i.into_iter()); - assert!(r.eq(["bar", "foo"].iter())); - - let a = ["foo", "bar", "baz"]; - let b = ["baz", "foo", "bar"]; - let c = ["bar", "baz", "foo"]; - let i = [a.iter(), b.iter(), c.iter()]; - let r = intersection(i.into_iter()); - assert!(r.eq(["foo", "bar", "baz"].iter())); -} - -#[test] -#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] -fn set_intersection_some() { - use utils::set::intersection; - - let a = ["foo"]; - let b = ["bar", "foo"]; - let i = [a.iter(), b.iter()]; - let r = intersection(i.into_iter()); - assert!(r.eq(["foo"].iter())); - let i = [b.iter(), a.iter()]; - let r = intersection(i.into_iter()); - assert!(r.eq(["foo"].iter())); - - let a = ["abcdef", "foo", "hijkl", "abc"]; - let b = ["hij", "bar", "baz", "abc", "foo"]; - let c = ["abc", "xyz", "foo", "ghi"]; - let i = [a.iter(), b.iter(), c.iter()]; - let r = intersection(i.into_iter()); - assert!(r.eq(["foo", "abc"].iter())); -} - -#[test] -#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] -fn set_intersection_sorted_some() { - use utils::set::intersection_sorted; - - let a = ["bar"]; - let b = ["bar", "foo"]; - let i = [a.iter(), b.iter()]; - let r = intersection_sorted(i.into_iter()); - assert!(r.eq(["bar"].iter())); - let i = [b.iter(), a.iter()]; - let r = intersection_sorted(i.into_iter()); - assert!(r.eq(["bar"].iter())); - - let a = ["aaa", "ccc", "eee", "ggg"]; - let b = ["aaa", "bbb", "ccc", "ddd", "eee"]; - let c = ["bbb", "ccc", "eee", "fff"]; - let i = [a.iter(), b.iter(), c.iter()]; - let r = intersection_sorted(i.into_iter()); - assert!(r.eq(["ccc", "eee"].iter())); -} - -#[test] -#[allow(clippy::iter_on_single_items, clippy::many_single_char_names)] -fn set_intersection_sorted_all() { - use utils::set::intersection_sorted; - - let a = ["foo"]; - let b = ["foo"]; - let i = [a.iter(), b.iter()]; - let r = intersection_sorted(i.into_iter()); - assert!(r.eq(["foo"].iter())); - - let a = ["bar", "foo"]; - let b = ["bar", "foo"]; - let i = [a.iter(), b.iter()]; - let r = intersection_sorted(i.into_iter()); - assert!(r.eq(["bar", "foo"].iter())); - let i = [b.iter()]; - let r = intersection_sorted(i.into_iter()); - assert!(r.eq(["bar", "foo"].iter())); - - let a = ["bar", "baz", "foo"]; - let b = ["bar", "baz", "foo"]; - let c = ["bar", "baz", "foo"]; - let i = [a.iter(), b.iter(), c.iter()]; - let r = intersection_sorted(i.into_iter()); - assert!(r.eq(["bar", "baz", "foo"].iter())); -} - -#[tokio::test] -async fn set_intersection_sorted_stream2() { - use futures::StreamExt; - use utils::{IterStream, set::intersection_sorted_stream2}; - - let a = ["bar"]; - let b = ["bar", "foo"]; - let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) - .collect::>() - .await; - assert!(r.eq(&["bar"])); - - let r = intersection_sorted_stream2(b.iter().stream(), a.iter().stream()) - .collect::>() - .await; - assert!(r.eq(&["bar"])); - - let a = ["aaa", "ccc", "xxx", "yyy"]; - let b = ["hhh", "iii", "jjj", "zzz"]; - let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) - .collect::>() - .await; - assert!(r.is_empty()); - - let a = ["aaa", "ccc", "eee", "ggg"]; - let b = ["aaa", "bbb", "ccc", "ddd", "eee"]; - let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) - .collect::>() - .await; - assert!(r.eq(&["aaa", "ccc", "eee"])); - - let a = ["aaa", "ccc", "eee", "ggg", "hhh", "iii"]; - let b = ["bbb", "ccc", "ddd", "fff", "ggg", "iii"]; - let r = intersection_sorted_stream2(a.iter().stream(), b.iter().stream()) - .collect::>() - .await; - assert!(r.eq(&["ccc", "ggg", "iii"])); -} diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs deleted file mode 100644 index 73f73971..00000000 --- a/src/core/utils/time.rs +++ /dev/null @@ -1,131 +0,0 @@ -pub mod exponential_backoff; - -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - -use crate::{Result, err}; - -#[inline] -#[must_use] -#[allow(clippy::as_conversions, clippy::cast_possible_truncation)] -pub fn now_millis() -> u64 { - UNIX_EPOCH - .elapsed() - .expect("positive duration after epoch") - .as_millis() as u64 -} - -#[inline] -pub fn parse_timepoint_ago(ago: &str) -> Result { - timepoint_ago(parse_duration(ago)?) -} - -#[inline] -pub fn timepoint_ago(duration: Duration) -> Result { - SystemTime::now() - .checked_sub(duration) - .ok_or_else(|| err!(Arithmetic("Duration {duration:?} is too large"))) -} - -#[inline] -pub fn timepoint_from_now(duration: Duration) -> Result { - SystemTime::now() - .checked_add(duration) - .ok_or_else(|| err!(Arithmetic("Duration {duration:?} is too large"))) -} - -#[inline] -pub fn parse_duration(duration: &str) -> Result { - cyborgtime::parse_duration(duration) - .map_err(|error| err!("'{duration:?}' is not a valid duration string: {error:?}")) -} - -#[must_use] -pub fn rfc2822_from_seconds(epoch: i64) -> String { - use chrono::{DateTime, Utc}; - - DateTime::::from_timestamp(epoch, 0) - .unwrap_or_default() - .to_rfc2822() -} - -#[must_use] -pub fn format(ts: SystemTime, str: &str) -> String { - use chrono::{DateTime, Utc}; - - let dt: DateTime = ts.into(); - dt.format(str).to_string() -} - -#[must_use] -#[allow(clippy::as_conversions, clippy::cast_possible_truncation, clippy::cast_sign_loss)] -pub fn pretty(d: Duration) -> String { - use Unit::*; - - let fmt = |w, f, u| format!("{w}.{f} {u}"); - let gen64 = |w, f, u| fmt(w, (f * 100.0) as u32, u); - let gen128 = |w, f, u| gen64(u64::try_from(w).expect("u128 to u64"), f, u); - match whole_and_frac(d) { - | (Days(whole), frac) => gen64(whole, frac, "days"), - | (Hours(whole), frac) => gen64(whole, frac, "hours"), - | (Mins(whole), frac) => gen64(whole, frac, "minutes"), - | (Secs(whole), frac) => gen64(whole, frac, "seconds"), - | (Millis(whole), frac) => gen128(whole, frac, "milliseconds"), - | (Micros(whole), frac) => gen128(whole, frac, "microseconds"), - | (Nanos(whole), frac) => gen128(whole, frac, "nanoseconds"), - } -} - -/// Return a pair of (whole part, frac part) from a duration where. The whole -/// part is the largest Unit containing a non-zero value, the frac part is a -/// rational remainder left over. -#[must_use] -#[allow(clippy::as_conversions, clippy::cast_precision_loss)] -pub fn whole_and_frac(d: Duration) -> (Unit, f64) { - use Unit::*; - - let whole = whole_unit(d); - (whole, match whole { - | Days(_) => (d.as_secs() % 86_400) as f64 / 86_400.0, - | Hours(_) => (d.as_secs() % 3_600) as f64 / 3_600.0, - | Mins(_) => (d.as_secs() % 60) as f64 / 60.0, - | Secs(_) => f64::from(d.subsec_millis()) / 1000.0, - | Millis(_) => f64::from(d.subsec_micros()) / 1000.0, - | Micros(_) => f64::from(d.subsec_nanos()) / 1000.0, - | Nanos(_) => 0.0, - }) -} - -/// Return the largest Unit which represents the duration. The value is -/// rounded-down, but never zero. -#[must_use] -pub fn whole_unit(d: Duration) -> Unit { - use Unit::*; - - match d.as_secs() { - | 86_400.. => Days(d.as_secs() / 86_400), - | 3_600..=86_399 => Hours(d.as_secs() / 3_600), - | 60..=3_599 => Mins(d.as_secs() / 60), - - | _ => match d.as_micros() { - | 1_000_000.. => Secs(d.as_secs()), - | 1_000..=999_999 => Millis(d.subsec_millis().into()), - - | _ => match d.as_nanos() { - | 1_000.. => Micros(d.subsec_micros().into()), - - | _ => Nanos(d.subsec_nanos().into()), - }, - }, - } -} - -#[derive(Eq, PartialEq, Clone, Copy, Debug)] -pub enum Unit { - Days(u64), - Hours(u64), - Mins(u64), - Secs(u64), - Millis(u128), - Micros(u128), - Nanos(u128), -} diff --git a/src/core/utils/time/exponential_backoff.rs b/src/core/utils/time/exponential_backoff.rs deleted file mode 100644 index 682c2592..00000000 --- a/src/core/utils/time/exponential_backoff.rs +++ /dev/null @@ -1,29 +0,0 @@ -use std::{cmp, time::Duration}; - -/// Returns false if the exponential backoff has expired based on the inputs -#[inline] -#[must_use] -pub fn continue_exponential_backoff_secs( - min: u64, - max: u64, - elapsed: Duration, - tries: u32, -) -> bool { - let min = Duration::from_secs(min); - let max = Duration::from_secs(max); - continue_exponential_backoff(min, max, elapsed, tries) -} - -/// Returns false if the exponential backoff has expired based on the inputs -#[inline] -#[must_use] -pub fn continue_exponential_backoff( - min: Duration, - max: Duration, - elapsed: Duration, - tries: u32, -) -> bool { - let min = min.saturating_mul(tries).saturating_mul(tries); - let min = cmp::min(min, max); - elapsed < min -} diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml deleted file mode 100644 index 55d4793f..00000000 --- a/src/database/Cargo.toml +++ /dev/null @@ -1,63 +0,0 @@ -[package] -name = "conduwuit_database" -categories.workspace = true -description.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version.workspace = true - -[lib] -path = "mod.rs" -crate-type = [ - "rlib", -# "dylib", -] - -[features] -io_uring = [ - "rust-rocksdb/io-uring", -] -jemalloc = [ - "conduwuit-core/jemalloc", - "rust-rocksdb/jemalloc", -] -jemalloc_conf = [ - "conduwuit-core/jemalloc_conf", -] -jemalloc_prof = [ - "conduwuit-core/jemalloc_prof", -] -jemalloc_stats = [ - "conduwuit-core/jemalloc_stats", -] -release_max_log_level = [ - "conduwuit-core/release_max_log_level", - "log/max_level_trace", - "log/release_max_level_info", - "tracing/max_level_trace", - "tracing/release_max_level_info", -] -zstd_compression = [ - "conduwuit-core/zstd_compression", - "rust-rocksdb/zstd", -] - -[dependencies] -async-channel.workspace = true -conduwuit-core.workspace = true -const-str.workspace = true -futures.workspace = true -log.workspace = true -minicbor.workspace = true -minicbor-serde.workspace = true -rust-rocksdb.workspace = true -serde.workspace = true -serde_json.workspace = true -tokio.workspace = true -tracing.workspace = true - -[lints] -workspace = true diff --git a/src/database/benches.rs b/src/database/benches.rs deleted file mode 100644 index 56d1411c..00000000 --- a/src/database/benches.rs +++ /dev/null @@ -1,17 +0,0 @@ -#[cfg(conduwuit_bench)] -extern crate test; - -#[cfg(conduwuit_bench)] -#[cfg_attr(conduwuit_bench, bench)] -fn ser_str(b: &mut test::Bencher) { - use conduwuit::ruma::{RoomId, UserId}; - - use crate::ser::serialize_to_vec; - - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - b.iter(|| { - let key = (user_id, room_id); - let _s = serialize_to_vec(key).expect("failed to serialize user_id"); - }); -} diff --git a/src/database/cork.rs b/src/database/cork.rs index 11b6efd7..27b59d17 100644 --- a/src/database/cork.rs +++ b/src/database/cork.rs @@ -1,38 +1,27 @@ use std::sync::Arc; -use crate::{Database, Engine}; +use super::KeyValueDatabaseEngine; pub struct Cork { - db: Arc, + db: Arc, flush: bool, sync: bool, } -impl Database { - #[inline] - #[must_use] - pub fn cork(&self) -> Cork { Cork::new(&self.db, false, false) } - - #[inline] - #[must_use] - pub fn cork_and_flush(&self) -> Cork { Cork::new(&self.db, true, false) } - - #[inline] - #[must_use] - pub fn cork_and_sync(&self) -> Cork { Cork::new(&self.db, true, true) } -} - impl Cork { - #[inline] - pub(super) fn new(db: &Arc, flush: bool, sync: bool) -> Self { - db.cork(); - Self { db: db.clone(), flush, sync } + pub(crate) fn new(db: &Arc, flush: bool, sync: bool) -> Self { + db.cork().unwrap(); + Cork { + db: db.clone(), + flush, + sync, + } } } impl Drop for Cork { fn drop(&mut self) { - self.db.uncork(); + self.db.uncork().ok(); if self.flush { self.db.flush().ok(); } diff --git a/src/database/de.rs b/src/database/de.rs deleted file mode 100644 index 849b3b2e..00000000 --- a/src/database/de.rs +++ /dev/null @@ -1,488 +0,0 @@ -use conduwuit::{ - Error, Result, arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, -}; -use serde::{ - Deserialize, de, - de::{DeserializeSeed, Visitor}, -}; - -use crate::util::unhandled; - -/// Deserialize into T from buffer. -#[cfg_attr( - unabridged, - tracing::instrument( - name = "deserialize", - level = "trace", - skip_all, - fields(len = %buf.len()), - ) -)] -pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result -where - T: Deserialize<'a>, -{ - let mut deserializer = Deserializer { buf, pos: 0, rec: 0, seq: false }; - - T::deserialize(&mut deserializer).debug_inspect(|_| { - deserializer - .finished() - .expect("deserialization failed to consume trailing bytes"); - }) -} - -/// Deserialization state. -pub(crate) struct Deserializer<'de> { - buf: &'de [u8], - pos: usize, - rec: usize, - seq: bool, -} - -/// Directive to ignore a record. This type can be used to skip deserialization -/// until the next separator is found. -#[derive(Debug, Deserialize)] -pub struct Ignore; - -/// Directive to ignore all remaining records. This can be used in a sequence to -/// ignore the rest of the sequence. -#[derive(Debug, Deserialize)] -pub struct IgnoreAll; - -impl<'de> Deserializer<'de> { - const SEP: u8 = crate::ser::SEP; - - /// Determine if the input was fully consumed and error if bytes remaining. - /// This is intended for debug assertions; not optimized for parsing logic. - fn finished(&self) -> Result<()> { - let pos = self.pos; - let len = self.buf.len(); - let parsed = &self.buf[0..pos]; - let unparsed = &self.buf[pos..]; - let remain = self.remaining()?; - let trailing_sep = remain == 1 && unparsed[0] == Self::SEP; - (remain == 0 || trailing_sep) - .then_some(()) - .ok_or(err!(SerdeDe( - "{remain} trailing of {len} bytes not deserialized.\n{parsed:?}\n{unparsed:?}", - ))) - } - - /// Called at the start of arrays and tuples - #[inline] - fn sequence_start(&mut self) { - debug_assert!(!self.seq, "Nested sequences are not handled at this time"); - self.seq = true; - } - - /// Consume the current record to ignore it. Inside a sequence the next - /// record is skipped but at the top-level all records are skipped such that - /// deserialization completes with self.finished() == Ok. - #[inline] - fn record_ignore(&mut self) { - if self.seq { - self.record_next(); - } else { - self.record_ignore_all(); - } - } - - /// Consume the current and all remaining records to ignore them. Similar to - /// Ignore at the top-level, but it can be provided in a sequence to Ignore - /// all remaining elements. - #[inline] - fn record_ignore_all(&mut self) { self.record_trail(); } - - /// Consume the current record. The position pointer is moved to the start - /// of the next record. Slice of the current record is returned. - #[inline] - fn record_next(&mut self) -> &'de [u8] { - self.buf[self.pos..] - .split(|b| *b == Deserializer::SEP) - .inspect(|record| self.inc_pos(record.len())) - .next() - .expect("remainder of buf even if SEP was not found") - } - - /// Peek at the first byte of the current record. If all records were - /// consumed None is returned instead. - #[inline] - fn record_peek_byte(&self) -> Option { - let started = self.pos != 0 || self.rec > 0; - let buf = &self.buf[self.pos..]; - debug_assert!( - !started || buf[0] == Self::SEP, - "Missing expected record separator at current position" - ); - - buf.get::(started.into()).copied() - } - - /// Consume the record separator such that the position cleanly points to - /// the start of the next record. (Case for some sequences) - #[inline] - fn record_start(&mut self) { - let started = self.pos != 0 || self.rec > 0; - debug_assert!( - !started || self.buf[self.pos] == Self::SEP, - "Missing expected record separator at current position" - ); - - self.inc_pos(started.into()); - self.inc_rec(1); - } - - /// Consume all remaining bytes, which may include record separators, - /// returning a raw slice. - #[inline] - fn record_trail(&mut self) -> &'de [u8] { - let record = &self.buf[self.pos..]; - self.inc_pos(record.len()); - record - } - - /// Increment the position pointer. - #[inline] - #[cfg_attr( - unabridged, - tracing::instrument( - level = "trace", - skip(self), - fields( - len = self.buf.len(), - rem = self.remaining().unwrap_or_default().saturating_sub(n), - ), - ) - )] - fn inc_pos(&mut self, n: usize) { - self.pos = self.pos.saturating_add(n); - debug_assert!(self.pos <= self.buf.len(), "pos out of range"); - } - - #[inline] - fn inc_rec(&mut self, n: usize) { self.rec = self.rec.saturating_add(n); } - - /// Unconsumed input bytes. - #[inline] - fn remaining(&self) -> Result { - let pos = self.pos; - let len = self.buf.len(); - checked!(len - pos) - } -} - -impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { - type Error = Error; - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_seq(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.sequence_start(); - visitor.visit_seq(self) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] - fn deserialize_tuple(self, _len: usize, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.sequence_start(); - visitor.visit_seq(self) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] - fn deserialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - self.sequence_start(); - visitor.visit_seq(self) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_map(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let input = self.record_next(); - let mut d = serde_json::Deserializer::from_slice(input); - d.deserialize_map(visitor).map_err(Into::into) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] - fn deserialize_struct( - self, - name: &'static str, - fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - let input = self.record_next(); - let mut d = serde_json::Deserializer::from_slice(input); - d.deserialize_struct(name, fields, visitor) - .map_err(Into::into) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] - fn deserialize_unit_struct(self, name: &'static str, visitor: V) -> Result - where - V: Visitor<'de>, - { - match name { - | "Ignore" => self.record_ignore(), - | "IgnoreAll" => self.record_ignore_all(), - | _ => unhandled!("Unrecognized deserialization Directive {name:?}"), - } - - visitor.visit_unit() - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, visitor)))] - fn deserialize_newtype_struct(self, name: &'static str, visitor: V) -> Result - where - V: Visitor<'de>, - { - match name { - | "$serde_json::private::RawValue" => visitor.visit_map(self), - | "Cbor" => visitor - .visit_newtype_struct(&mut minicbor_serde::Deserializer::new(self.record_trail())) - .map_err(|e| Self::Error::SerdeDe(e.to_string().into())), - - | _ => visitor.visit_newtype_struct(self), - } - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, _visitor)))] - fn deserialize_enum( - self, - _name: &'static str, - _variants: &'static [&'static str], - _visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - unhandled!("deserialize Enum not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_option>(self, visitor: V) -> Result { - if self - .buf - .get(self.pos) - .is_none_or(|b| *b == Deserializer::SEP) - { - visitor.visit_none() - } else { - visitor.visit_some(self) - } - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_bool>(self, _visitor: V) -> Result { - unhandled!("deserialize bool not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_i8>(self, _visitor: V) -> Result { - unhandled!("deserialize i8 not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_i16>(self, _visitor: V) -> Result { - unhandled!("deserialize i16 not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_i32>(self, _visitor: V) -> Result { - unhandled!("deserialize i32 not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_i64>(self, visitor: V) -> Result { - const BYTES: usize = size_of::(); - - let end = self.pos.saturating_add(BYTES).min(self.buf.len()); - let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; - let bytes = bytes - .into_inner() - .map_err(|_| Self::Error::SerdeDe("i64 buffer underflow".into()))?; - - self.inc_pos(BYTES); - visitor.visit_i64(i64::from_be_bytes(bytes)) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_u8>(self, _visitor: V) -> Result { - unhandled!( - "deserialize u8 not implemented; try dereferencing the Handle for [u8] access \ - instead" - ) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_u16>(self, _visitor: V) -> Result { - unhandled!("deserialize u16 not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_u32>(self, _visitor: V) -> Result { - unhandled!("deserialize u32 not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_u64>(self, visitor: V) -> Result { - const BYTES: usize = size_of::(); - - let end = self.pos.saturating_add(BYTES).min(self.buf.len()); - let bytes: ArrayVec = self.buf[self.pos..end].try_into()?; - let bytes = bytes - .into_inner() - .map_err(|_| Self::Error::SerdeDe("u64 buffer underflow".into()))?; - - self.inc_pos(BYTES); - visitor.visit_u64(u64::from_be_bytes(bytes)) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_f32>(self, _visitor: V) -> Result { - unhandled!("deserialize f32 not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_f64>(self, _visitor: V) -> Result { - unhandled!("deserialize f64 not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_char>(self, _visitor: V) -> Result { - unhandled!("deserialize char not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_str>(self, visitor: V) -> Result { - let input = self.record_next(); - let out = deserialize_str(input)?; - visitor.visit_borrowed_str(out) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_string>(self, visitor: V) -> Result { - let input = self.record_next(); - let out = string::string_from_bytes(input)?; - visitor.visit_string(out) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_bytes>(self, visitor: V) -> Result { - let input = self.record_trail(); - visitor.visit_borrowed_bytes(input) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_byte_buf>(self, _visitor: V) -> Result { - unhandled!("deserialize Byte Buf not implemented") - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_unit>(self, _visitor: V) -> Result { - unhandled!("deserialize Unit not implemented") - } - - // this only used for $serde_json::private::RawValue at this time; see MapAccess - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_identifier>(self, visitor: V) -> Result { - let input = "$serde_json::private::RawValue"; - visitor.visit_borrowed_str(input) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_ignored_any>(self, _visitor: V) -> Result { - unhandled!("deserialize Ignored Any not implemented") - } - - #[cfg_attr( - unabridged, - tracing::instrument(level = "trace", skip_all, fields(?self.buf)) - )] - fn deserialize_any>(self, visitor: V) -> Result { - debug_assert_eq!( - conduwuit::debug::type_name::(), - "serde_json::value::de::::deserialize::ValueVisitor", - "deserialize_any: type not expected" - ); - - match self.record_peek_byte() { - | Some(b'{') => self.deserialize_map(visitor), - | Some(b'[') => serde_json::Deserializer::from_slice(self.record_next()) - .deserialize_seq(visitor) - .map_err(Into::into), - - | _ => self.deserialize_str(visitor), - } - } -} - -impl<'a, 'de: 'a> de::SeqAccess<'de> for &'a mut Deserializer<'de> { - type Error = Error; - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, seed)))] - fn next_element_seed(&mut self, seed: T) -> Result> - where - T: DeserializeSeed<'de>, - { - if self.pos >= self.buf.len() { - return Ok(None); - } - - self.record_start(); - seed.deserialize(&mut **self).map(Some) - } -} - -// this only used for $serde_json::private::RawValue at this time. our db -// schema doesn't have its own map format; we use json for that anyway -impl<'a, 'de: 'a> de::MapAccess<'de> for &'a mut Deserializer<'de> { - type Error = Error; - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, seed)))] - fn next_key_seed(&mut self, seed: K) -> Result> - where - K: DeserializeSeed<'de>, - { - seed.deserialize(&mut **self).map(Some) - } - - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip(self, seed)))] - fn next_value_seed(&mut self, seed: V) -> Result - where - V: DeserializeSeed<'de>, - { - seed.deserialize(&mut **self) - } -} - -// activate when stable; too soon now -//#[cfg(debug_assertions)] -#[inline] -fn deserialize_str(input: &[u8]) -> Result<&str> { string::str_from_bytes(input) } - -//#[cfg(not(debug_assertions))] -#[cfg(disable)] -#[inline] -fn deserialize_str(input: &[u8]) -> Result<&str> { - // SAFETY: Strings were written by the serializer to the database. Assuming no - // database corruption, the string will be valid. Database corruption is - // detected via rocksdb checksums. - unsafe { std::str::from_utf8_unchecked(input) } -} diff --git a/src/database/deserialized.rs b/src/database/deserialized.rs deleted file mode 100644 index 66541b2a..00000000 --- a/src/database/deserialized.rs +++ /dev/null @@ -1,20 +0,0 @@ -use std::convert::identity; - -use conduwuit::Result; -use serde::Deserialize; - -pub trait Deserialized { - fn map_de(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>; - - #[inline] - fn deserialized(self) -> Result - where - T: for<'de> Deserialize<'de>, - Self: Sized, - { - self.map_de(identity::) - } -} diff --git a/src/database/engine.rs b/src/database/engine.rs deleted file mode 100644 index 38dd7512..00000000 --- a/src/database/engine.rs +++ /dev/null @@ -1,155 +0,0 @@ -mod backup; -mod cf_opts; -pub(crate) mod context; -mod db_opts; -pub(crate) mod descriptor; -mod files; -mod logger; -mod memory_usage; -mod open; -mod repair; - -use std::{ - ffi::CStr, - sync::{ - Arc, - atomic::{AtomicU32, Ordering}, - }, -}; - -use conduwuit::{Err, Result, debug, info, warn}; -use rocksdb::{ - AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded, - WaitForCompactOptions, -}; - -use crate::{ - Context, - pool::Pool, - util::{map_err, result}, -}; - -pub struct Engine { - pub(crate) db: Db, - pub(crate) pool: Arc, - pub(crate) ctx: Arc, - pub(super) read_only: bool, - pub(super) secondary: bool, - pub(crate) checksums: bool, - corks: AtomicU32, -} - -pub(crate) type Db = DBWithThreadMode; - -impl Engine { - #[tracing::instrument( - level = "info", - skip_all, - fields( - sequence = ?self.current_sequence(), - ), - )] - pub fn wait_compactions_blocking(&self) -> Result { - let mut opts = WaitForCompactOptions::default(); - opts.set_abort_on_pause(true); - opts.set_flush(false); - opts.set_timeout(0); - - self.db.wait_for_compact(&opts).map_err(map_err) - } - - #[tracing::instrument( - level = "info", - skip_all, - fields( - sequence = ?self.current_sequence(), - ), - )] - pub fn sort(&self) -> Result { - let flushoptions = rocksdb::FlushOptions::default(); - result(DBCommon::flush_opt(&self.db, &flushoptions)) - } - - #[tracing::instrument( - level = "debug", - skip_all, - fields( - sequence = ?self.current_sequence(), - ), - )] - pub fn update(&self) -> Result { self.db.try_catch_up_with_primary().map_err(map_err) } - - #[tracing::instrument(level = "info", skip_all)] - pub fn sync(&self) -> Result { result(DBCommon::flush_wal(&self.db, true)) } - - #[tracing::instrument(level = "debug", skip_all)] - pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) } - - #[inline] - pub(crate) fn cork(&self) { self.corks.fetch_add(1, Ordering::Relaxed); } - - #[inline] - pub(crate) fn uncork(&self) { self.corks.fetch_sub(1, Ordering::Relaxed); } - - #[inline] - pub fn corked(&self) -> bool { self.corks.load(Ordering::Relaxed) > 0 } - - /// Query for database property by null-terminated name which is expected to - /// have a result with an integer representation. This is intended for - /// low-overhead programmatic use. - pub(crate) fn property_integer( - &self, - cf: &impl AsColumnFamilyRef, - name: &CStr, - ) -> Result { - result(self.db.property_int_value_cf(cf, name)) - .and_then(|val| val.map_or_else(|| Err!("Property {name:?} not found."), Ok)) - } - - /// Query for database property by name receiving the result in a string. - pub(crate) fn property(&self, cf: &impl AsColumnFamilyRef, name: &str) -> Result { - result(self.db.property_value_cf(cf, name)) - .and_then(|val| val.map_or_else(|| Err!("Property {name:?} not found."), Ok)) - } - - pub(crate) fn cf(&self, name: &str) -> Arc> { - self.db - .cf_handle(name) - .expect("column must be described prior to database open") - } - - #[inline] - #[must_use] - #[tracing::instrument(name = "sequence", level = "debug", skip_all, fields(sequence))] - pub fn current_sequence(&self) -> u64 { - let sequence = self.db.latest_sequence_number(); - - #[cfg(debug_assertions)] - tracing::Span::current().record("sequence", sequence); - - sequence - } - - #[inline] - #[must_use] - pub fn is_read_only(&self) -> bool { self.secondary || self.read_only } - - #[inline] - #[must_use] - pub fn is_secondary(&self) -> bool { self.secondary } -} - -impl Drop for Engine { - #[cold] - fn drop(&mut self) { - const BLOCKING: bool = true; - - debug!("Waiting for background tasks to finish..."); - self.db.cancel_all_background_work(BLOCKING); - - info!( - sequence = %self.current_sequence(), - "Closing database..." - ); - } -} diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs deleted file mode 100644 index ac72e6d4..00000000 --- a/src/database/engine/backup.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::{ffi::OsString, path::PathBuf}; - -use conduwuit::{Err, Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; -use rocksdb::backup::{BackupEngine, BackupEngineOptions}; - -use super::Engine; -use crate::util::map_err; - -#[implement(Engine)] -#[tracing::instrument(skip(self))] -pub fn backup(&self) -> Result { - let mut engine = self.backup_engine()?; - let config = &self.ctx.server.config; - if config.database_backups_to_keep > 0 { - let flush = !self.is_read_only(); - engine - .create_new_backup_flush(&self.db, flush) - .map_err(map_err)?; - - let engine_info = engine.get_backup_info(); - let info = &engine_info.last().expect("backup engine info is not empty"); - info!( - "Created database backup #{} using {} bytes in {} files", - info.backup_id, info.size, info.num_files, - ); - } - - if config.database_backups_to_keep >= 0 { - let keep = u32::try_from(config.database_backups_to_keep)?; - if let Err(e) = engine.purge_old_backups(keep.try_into()?) { - error!("Failed to purge old backup: {e:?}"); - } - } - - if config.database_backups_to_keep == 0 { - warn!("Configuration item `database_backups_to_keep` is set to 0."); - } - - Ok(()) -} - -#[implement(Engine)] -pub fn backup_list(&self) -> Result + Send> { - let info = self.backup_engine()?.get_backup_info(); - - if info.is_empty() { - return Err!("No backups found."); - } - - let list = info.into_iter().map(|info| { - format!( - "#{} {}: {} bytes, {} files", - info.backup_id, - rfc2822_from_seconds(info.timestamp), - info.size, - info.num_files, - ) - }); - - Ok(list) -} - -#[implement(Engine)] -pub fn backup_count(&self) -> Result { - let info = self.backup_engine()?.get_backup_info(); - - Ok(info.len()) -} - -#[implement(Engine)] -fn backup_engine(&self) -> Result { - let path = self.backup_path()?; - let options = BackupEngineOptions::new(path).map_err(map_err)?; - BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err) -} - -#[implement(Engine)] -fn backup_path(&self) -> Result { - let path = self - .ctx - .server - .config - .database_backup_path - .clone() - .map(PathBuf::into_os_string) - .unwrap_or_default(); - - if path.is_empty() { - return Err!(Config("database_backup_path", "Configure path to enable backups")); - } - - Ok(path) -} diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs deleted file mode 100644 index 7ceec722..00000000 --- a/src/database/engine/cf_opts.rs +++ /dev/null @@ -1,278 +0,0 @@ -use conduwuit::{Config, Result, err, utils::math::Expected}; -use rocksdb::{ - BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, - DBCompressionType as CompressionType, DataBlockIndexType, FifoCompactOptions, - LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, -}; - -use super::descriptor::{CacheDisp, Descriptor}; -use crate::{Context, util::map_err}; - -pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; - -/// Adjust options for the specific column by name. Provide the result of -/// db_options() as the argument to this function and use the return value in -/// the arguments to open the specific column. -pub(crate) fn cf_options(ctx: &Context, opts: Options, desc: &Descriptor) -> Result { - let cache = get_cache(ctx, desc); - let config = &ctx.server.config; - descriptor_cf_options(opts, *desc, config, cache.as_ref()) -} - -fn descriptor_cf_options( - mut opts: Options, - mut desc: Descriptor, - config: &Config, - cache: Option<&Cache>, -) -> Result { - set_compression(&mut desc, config); - set_table_options(&mut opts, &desc, cache)?; - - opts.set_min_write_buffer_number(1); - opts.set_max_write_buffer_number(2); - opts.set_write_buffer_size(desc.write_size); - - opts.set_target_file_size_base(desc.file_size); - opts.set_target_file_size_multiplier(desc.file_shape); - - opts.set_level_zero_file_num_compaction_trigger(desc.level0_width); - opts.set_level_compaction_dynamic_level_bytes(false); - opts.set_ttl(desc.ttl); - - opts.set_max_bytes_for_level_base(desc.level_size); - opts.set_max_bytes_for_level_multiplier(1.0); - opts.set_max_bytes_for_level_multiplier_additional(&desc.level_shape); - - opts.set_compaction_style(desc.compaction); - opts.set_compaction_pri(desc.compaction_pri); - opts.set_universal_compaction_options(&uc_options(&desc)); - opts.set_fifo_compaction_options(&fifo_options(&desc)); - - let compression_shape: Vec<_> = desc - .compression_shape - .into_iter() - .map(|val| (val > 0).then_some(desc.compression)) - .map(|val| val.unwrap_or(CompressionType::None)) - .collect(); - - opts.set_compression_type(desc.compression); - opts.set_compression_per_level(compression_shape.as_slice()); - opts.set_compression_options(-14, desc.compression_level, 0, 0); // -14 w_bits used by zlib. - if let Some(&bottommost_level) = desc.bottommost_level.as_ref() { - opts.set_bottommost_compression_type(desc.compression); - opts.set_bottommost_zstd_max_train_bytes(0, true); - opts.set_bottommost_compression_options( - -14, // -14 w_bits is only read by zlib. - bottommost_level, - 0, - 0, - true, - ); - } - - opts.set_options_from_string("{{arena_block_size=2097152;}}") - .map_err(map_err)?; - - #[cfg(debug_assertions)] - opts.set_options_from_string( - "{{paranoid_checks=true;paranoid_file_checks=true;force_consistency_checks=true;\ - verify_sst_unique_id_in_manifest=true;}}", - ) - .map_err(map_err)?; - - Ok(opts) -} - -fn set_table_options(opts: &mut Options, desc: &Descriptor, cache: Option<&Cache>) -> Result { - let mut table = table_options(desc, cache.is_some()); - - if let Some(cache) = cache { - table.set_block_cache(cache); - } else { - table.disable_cache(); - } - - let prepopulate = if desc.write_to_cache { "kFlushOnly" } else { "kDisable" }; - - let string = format!( - "{{block_based_table_factory={{num_file_reads_for_auto_readahead={0};\ - max_auto_readahead_size={1};initial_auto_readahead_size={2};\ - enable_index_compression={3};prepopulate_block_cache={4}}}}}", - desc.auto_readahead_thresh, - desc.auto_readahead_max, - desc.auto_readahead_init, - desc.compressed_index, - prepopulate, - ); - - opts.set_options_from_string(&string).map_err(map_err)?; - - opts.set_block_based_table_factory(&table); - - Ok(()) -} - -fn set_compression(desc: &mut Descriptor, config: &Config) { - desc.compression = match config.rocksdb_compression_algo.as_ref() { - | "snappy" => CompressionType::Snappy, - | "zlib" => CompressionType::Zlib, - | "bz2" => CompressionType::Bz2, - | "lz4" => CompressionType::Lz4, - | "lz4hc" => CompressionType::Lz4hc, - | "none" => CompressionType::None, - | _ => CompressionType::Zstd, - }; - - let can_override_level = config.rocksdb_compression_level == SENTINEL_COMPRESSION_LEVEL - && desc.compression == CompressionType::Zstd; - - if !can_override_level { - desc.compression_level = config.rocksdb_compression_level; - } - - let can_override_bottom = config.rocksdb_bottommost_compression_level - == SENTINEL_COMPRESSION_LEVEL - && desc.compression == CompressionType::Zstd; - - if !can_override_bottom { - desc.bottommost_level = Some(config.rocksdb_bottommost_compression_level); - } - - if !config.rocksdb_bottommost_compression { - desc.bottommost_level = None; - } -} - -fn fifo_options(desc: &Descriptor) -> FifoCompactOptions { - let mut opts = FifoCompactOptions::default(); - opts.set_max_table_files_size(desc.limit_size); - - opts -} - -fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { - let mut opts = UniversalCompactOptions::default(); - opts.set_stop_style(UniversalCompactionStopStyle::Total); - opts.set_min_merge_width(desc.merge_width.0); - opts.set_max_merge_width(desc.merge_width.1); - opts.set_max_size_amplification_percent(10000); - opts.set_compression_size_percent(-1); - opts.set_size_ratio(1); - - opts -} - -fn table_options(desc: &Descriptor, has_cache: bool) -> BlockBasedOptions { - let mut opts = BlockBasedOptions::default(); - - opts.set_block_size(desc.block_size); - opts.set_metadata_block_size(desc.index_size); - - opts.set_cache_index_and_filter_blocks(has_cache); - opts.set_pin_top_level_index_and_filter(false); - opts.set_pin_l0_filter_and_index_blocks_in_cache(false); - opts.set_partition_pinning_tier(BlockBasedPinningTier::None); - opts.set_unpartitioned_pinning_tier(BlockBasedPinningTier::None); - opts.set_top_level_index_pinning_tier(BlockBasedPinningTier::None); - - opts.set_partition_filters(true); - opts.set_use_delta_encoding(false); - opts.set_index_type(BlockBasedIndexType::TwoLevelIndexSearch); - - opts.set_data_block_index_type(match desc.block_index_hashing { - | None if desc.index_size > 512 => DataBlockIndexType::BinaryAndHash, - | Some(enable) if enable => DataBlockIndexType::BinaryAndHash, - | Some(_) | None => DataBlockIndexType::BinarySearch, - }); - - opts -} - -fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { - if desc.dropped { - return None; - } - - // Some cache capacities are overriden by server config in a strange but - // legacy-compat way - let config = &ctx.server.config; - let cap = match desc.name { - | "eventid_pduid" => Some(config.eventid_pdu_cache_capacity), - | "eventid_shorteventid" => Some(config.eventidshort_cache_capacity), - | "shorteventid_eventid" => Some(config.shorteventid_cache_capacity), - | "shorteventid_authchain" => Some(config.auth_chain_cache_capacity), - | "shortstatekey_statekey" => Some(config.shortstatekey_cache_capacity), - | "statekey_shortstatekey" => Some(config.statekeyshort_cache_capacity), - | "servernameevent_data" => Some(config.servernameevent_data_cache_capacity), - | "pduid_pdu" | "eventid_outlierpdu" => Some(config.pdu_cache_capacity), - | _ => None, - } - .map(TryInto::try_into) - .transpose() - .expect("u32 to usize"); - - let ent_size: usize = desc - .key_size_hint - .unwrap_or_default() - .expected_add(desc.val_size_hint.unwrap_or_default()); - - let size = match cap { - | Some(cap) => cache_size(config, cap, ent_size), - | _ => desc.cache_size, - }; - - let shard_bits: i32 = desc - .cache_shards - .ilog2() - .try_into() - .expect("u32 to i32 conversion"); - - debug_assert!(shard_bits <= 10, "cache shards probably too large"); - let mut cache_opts = LruCacheOptions::default(); - cache_opts.set_num_shard_bits(shard_bits); - cache_opts.set_capacity(size); - - let mut caches = ctx.col_cache.lock().expect("locked"); - match desc.cache_disp { - | CacheDisp::Unique if desc.cache_size == 0 => None, - | CacheDisp::Unique => { - let cache = Cache::new_lru_cache_opts(&cache_opts); - caches.insert(desc.name.into(), cache.clone()); - Some(cache) - }, - - | CacheDisp::SharedWith(other) if !caches.contains_key(other) => { - let cache = Cache::new_lru_cache_opts(&cache_opts); - caches.insert(desc.name.into(), cache.clone()); - Some(cache) - }, - - | CacheDisp::SharedWith(other) => Some( - caches - .get(other) - .cloned() - .expect("caches.contains_key(other) must be true"), - ), - - | CacheDisp::Shared => Some( - caches - .get("Shared") - .cloned() - .expect("shared cache must already exist"), - ), - } -} - -pub(crate) fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize { - cache_size_f64(config, f64::from(base_size), entity_size) -} - -#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] -pub(crate) fn cache_size_f64(config: &Config, base_size: f64, entity_size: usize) -> usize { - let ents = base_size * config.cache_capacity_modifier; - - (ents as usize) - .checked_mul(entity_size) - .ok_or_else(|| err!(Config("cache_capacity_modifier", "Cache size is too large."))) - .expect("invalid cache size") -} diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs deleted file mode 100644 index 380e37af..00000000 --- a/src/database/engine/context.rs +++ /dev/null @@ -1,82 +0,0 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, Mutex}, -}; - -use conduwuit::{Result, Server, debug, utils::math::usize_from_f64}; -use rocksdb::{Cache, Env, LruCacheOptions}; - -use crate::{or_else, pool::Pool}; - -/// Some components are constructed prior to opening the database and must -/// outlive the database. These can also be shared between database instances -/// though at the time of this comment we only open one database per process. -/// These assets are housed in the shared Context. -pub(crate) struct Context { - pub(crate) pool: Arc, - pub(crate) col_cache: Mutex>, - pub(crate) row_cache: Mutex, - pub(crate) env: Mutex, - pub(crate) server: Arc, -} - -impl Context { - pub(crate) fn new(server: &Arc) -> Result> { - let config = &server.config; - let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; - - let col_shard_bits = 7; - let col_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; - - let row_shard_bits = 7; - let row_cache_capacity_bytes = usize_from_f64(cache_capacity_bytes * 0.50)?; - - let mut row_cache_opts = LruCacheOptions::default(); - row_cache_opts.set_num_shard_bits(row_shard_bits); - row_cache_opts.set_capacity(row_cache_capacity_bytes); - let row_cache = Cache::new_lru_cache_opts(&row_cache_opts); - - let mut col_cache_opts = LruCacheOptions::default(); - col_cache_opts.set_num_shard_bits(col_shard_bits); - col_cache_opts.set_capacity(col_cache_capacity_bytes); - let col_cache = Cache::new_lru_cache_opts(&col_cache_opts); - let col_cache: BTreeMap<_, _> = [("Shared".to_owned(), col_cache)].into(); - - let mut env = Env::new().or_else(or_else)?; - - if config.rocksdb_compaction_prio_idle { - env.lower_thread_pool_cpu_priority(); - } - - if config.rocksdb_compaction_ioprio_idle { - env.lower_thread_pool_io_priority(); - } - - Ok(Arc::new(Self { - pool: Pool::new(server)?, - col_cache: col_cache.into(), - row_cache: row_cache.into(), - env: env.into(), - server: server.clone(), - })) - } -} - -impl Drop for Context { - #[cold] - fn drop(&mut self) { - debug!("Closing frontend pool"); - self.pool.close(); - - let mut env = self.env.lock().expect("locked"); - - debug!("Shutting down background threads"); - env.set_high_priority_background_threads(0); - env.set_low_priority_background_threads(0); - env.set_bottom_priority_background_threads(0); - env.set_background_threads(0); - - debug!("Joining background threads..."); - env.join_all_threads(); - } -} diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs deleted file mode 100644 index 18cec742..00000000 --- a/src/database/engine/db_opts.rs +++ /dev/null @@ -1,140 +0,0 @@ -use std::{cmp, convert::TryFrom}; - -use conduwuit::{Config, Result, utils}; -use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; - -use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; - -/// Create database-wide options suitable for opening the database. This also -/// sets our default column options in case of opening a column with the same -/// resulting value. Note that we require special per-column options on some -/// columns, therefor columns should only be opened after passing this result -/// through cf_options(). -pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Result { - const DEFAULT_STATS_LEVEL: StatsLevel = if cfg!(debug_assertions) { - StatsLevel::ExceptDetailedTimers - } else { - StatsLevel::DisableAll - }; - - let mut opts = Options::default(); - - // Logging - set_logging_defaults(&mut opts, config); - - // Processing - opts.set_max_background_jobs(num_threads::(config)?); - opts.set_max_subcompactions(num_threads::(config)?); - opts.set_avoid_unnecessary_blocking_io(true); - opts.set_max_file_opening_threads(0); - - // IO - opts.set_manual_wal_flush(true); - opts.set_atomic_flush(config.rocksdb_atomic_flush); - opts.set_enable_pipelined_write(!config.rocksdb_atomic_flush); - if config.rocksdb_direct_io { - opts.set_use_direct_reads(true); - opts.set_use_direct_io_for_flush_and_compaction(true); - } - if config.rocksdb_optimize_for_spinning_disks { - // speeds up opening DB on hard drives - opts.set_skip_checking_sst_file_sizes_on_db_open(true); - opts.set_skip_stats_update_on_db_open(true); - //opts.set_max_file_opening_threads(threads.try_into().unwrap()); - } else { - opts.set_compaction_readahead_size(1024 * 512); - } - - // Blocks - opts.set_row_cache(row_cache); - opts.set_db_write_buffer_size(cache_size_f64( - config, - config.db_write_buffer_capacity_mb, - 1_048_576, - )); - - // Files - opts.set_table_cache_num_shard_bits(7); - opts.set_wal_size_limit_mb(1024); - opts.set_max_total_wal_size(1024 * 1024 * 512); - opts.set_writable_file_max_buffer_size(1024 * 1024 * 2); - - // Misc - opts.set_disable_auto_compactions(!config.rocksdb_compaction); - opts.create_missing_column_families(true); - opts.create_if_missing(true); - - opts.set_statistics_level(match config.rocksdb_stats_level { - | 0 => StatsLevel::DisableAll, - | 1 => DEFAULT_STATS_LEVEL, - | 2 => StatsLevel::ExceptHistogramOrTimers, - | 3 => StatsLevel::ExceptTimers, - | 4 => StatsLevel::ExceptDetailedTimers, - | 5 => StatsLevel::ExceptTimeForMutex, - | 6_u8..=u8::MAX => StatsLevel::All, - }); - - opts.set_report_bg_io_stats(match config.rocksdb_stats_level { - | 0..=1 => false, - | 2_u8..=u8::MAX => true, - }); - - // Default: https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords - // - // Unclean shutdowns of a Matrix homeserver are likely to be fine when - // recovered in this manner as it's likely any lost information will be - // restored via federation. - opts.set_wal_recovery_mode(match config.rocksdb_recovery_mode { - | 0 => DBRecoveryMode::AbsoluteConsistency, - | 1 => DBRecoveryMode::TolerateCorruptedTailRecords, - | 2 => DBRecoveryMode::PointInTime, - | 3 => DBRecoveryMode::SkipAnyCorruptedRecord, - | 4_u8..=u8::MAX => unimplemented!(), - }); - - // - // "We recommend to set track_and_verify_wals_in_manifest to true for - // production, it has been enabled in production for the entire database cluster - // serving the social graph for all Meta apps." - opts.set_track_and_verify_wals_in_manifest(true); - - opts.set_paranoid_checks(config.rocksdb_paranoid_file_checks); - - opts.set_env(env); - - Ok(opts) -} - -fn set_logging_defaults(opts: &mut Options, config: &Config) { - let rocksdb_log_level = match config.rocksdb_log_level.as_ref() { - | "debug" => LogLevel::Debug, - | "info" => LogLevel::Info, - | "warn" => LogLevel::Warn, - | "fatal" => LogLevel::Fatal, - | _ => LogLevel::Error, - }; - - opts.set_log_level(rocksdb_log_level); - opts.set_max_log_file_size(config.rocksdb_max_log_file_size); - opts.set_log_file_time_to_roll(config.rocksdb_log_time_to_roll); - opts.set_keep_log_file_num(config.rocksdb_max_log_files); - opts.set_stats_dump_period_sec(0); - - if config.rocksdb_log_stderr { - opts.set_stderr_logger(rocksdb_log_level, "rocksdb"); - } else { - opts.set_callback_logger(rocksdb_log_level, &handle_log); - } -} - -fn num_threads>(config: &Config) -> Result { - const MIN_PARALLELISM: usize = 2; - - let requested = if config.rocksdb_parallelism_threads != 0 { - config.rocksdb_parallelism_threads - } else { - utils::available_parallelism() - }; - - utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) -} diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs deleted file mode 100644 index 2274da9c..00000000 --- a/src/database/engine/descriptor.rs +++ /dev/null @@ -1,157 +0,0 @@ -use conduwuit::utils::string::EMPTY; -use rocksdb::{ - DBCompactionPri as CompactionPri, DBCompactionStyle as CompactionStyle, - DBCompressionType as CompressionType, -}; - -use super::cf_opts::SENTINEL_COMPRESSION_LEVEL; - -/// Column Descriptor -#[derive(Debug, Clone, Copy)] -pub(crate) struct Descriptor { - pub(crate) name: &'static str, - pub(crate) dropped: bool, - pub(crate) cache_disp: CacheDisp, - pub(crate) key_size_hint: Option, - pub(crate) val_size_hint: Option, - pub(crate) block_size: usize, - pub(crate) index_size: usize, - pub(crate) write_size: usize, - pub(crate) cache_size: usize, - pub(crate) level_size: u64, - pub(crate) level_shape: [i32; 7], - pub(crate) file_size: u64, - pub(crate) file_shape: i32, - pub(crate) level0_width: i32, - pub(crate) merge_width: (i32, i32), - pub(crate) limit_size: u64, - pub(crate) ttl: u64, - pub(crate) compaction: CompactionStyle, - pub(crate) compaction_pri: CompactionPri, - pub(crate) compression: CompressionType, - pub(crate) compressed_index: bool, - pub(crate) compression_shape: [i32; 7], - pub(crate) compression_level: i32, - pub(crate) bottommost_level: Option, - pub(crate) block_index_hashing: Option, - pub(crate) cache_shards: u32, - pub(crate) write_to_cache: bool, - pub(crate) auto_readahead_thresh: u32, - pub(crate) auto_readahead_init: usize, - pub(crate) auto_readahead_max: usize, -} - -/// Cache Disposition -#[derive(Debug, Clone, Copy)] -pub(crate) enum CacheDisp { - Unique, - Shared, - SharedWith(&'static str), -} - -/// Base descriptor supplying common defaults to all derived descriptors. -static BASE: Descriptor = Descriptor { - name: EMPTY, - dropped: false, - cache_disp: CacheDisp::Shared, - key_size_hint: None, - val_size_hint: None, - block_size: 1024 * 4, - index_size: 1024 * 4, - write_size: 1024 * 1024 * 2, - cache_size: 1024 * 1024 * 4, - level_size: 1024 * 1024 * 8, - level_shape: [1, 1, 1, 3, 7, 15, 31], - file_size: 1024 * 1024, - file_shape: 2, - level0_width: 2, - merge_width: (2, 16), - limit_size: 0, - ttl: 60 * 60 * 24 * 21, - compaction: CompactionStyle::Level, - compaction_pri: CompactionPri::MinOverlappingRatio, - compression: CompressionType::Zstd, - compressed_index: true, - compression_shape: [0, 0, 0, 1, 1, 1, 1], - compression_level: SENTINEL_COMPRESSION_LEVEL, - bottommost_level: Some(SENTINEL_COMPRESSION_LEVEL), - block_index_hashing: None, - cache_shards: 64, - write_to_cache: false, - auto_readahead_thresh: 0, - auto_readahead_init: 1024 * 16, - auto_readahead_max: 1024 * 1024 * 2, -}; - -/// Tombstone descriptor for columns which have been or will be deleted. -pub(crate) static DROPPED: Descriptor = Descriptor { dropped: true, ..BASE }; - -/// Descriptor for large datasets with random updates across the keyspace. -pub(crate) static RANDOM: Descriptor = Descriptor { - compaction_pri: CompactionPri::OldestSmallestSeqFirst, - write_size: 1024 * 1024 * 32, - cache_shards: 128, - compression_level: -3, - bottommost_level: Some(2), - compressed_index: true, - ..BASE -}; - -/// Descriptor for large datasets with updates to the end of the keyspace. -pub(crate) static SEQUENTIAL: Descriptor = Descriptor { - compaction_pri: CompactionPri::OldestLargestSeqFirst, - write_size: 1024 * 1024 * 64, - level_size: 1024 * 1024 * 32, - file_size: 1024 * 1024 * 2, - cache_shards: 128, - compression_level: -2, - bottommost_level: Some(2), - compression_shape: [0, 0, 1, 1, 1, 1, 1], - compressed_index: false, - ..BASE -}; - -/// Descriptor for small datasets with random updates across the keyspace. -pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { - compaction: CompactionStyle::Universal, - write_size: 1024 * 1024 * 16, - level_size: 1024 * 512, - file_size: 1024 * 128, - file_shape: 3, - index_size: 512, - block_size: 512, - cache_shards: 64, - compression_level: -4, - bottommost_level: Some(-1), - compression_shape: [0, 0, 0, 0, 0, 1, 1], - compressed_index: false, - ..RANDOM -}; - -/// Descriptor for small datasets with updates to the end of the keyspace. -pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { - compaction: CompactionStyle::Universal, - write_size: 1024 * 1024 * 16, - level_size: 1024 * 1024, - file_size: 1024 * 512, - file_shape: 3, - block_size: 512, - cache_shards: 64, - block_index_hashing: Some(false), - compression_level: -4, - bottommost_level: Some(-2), - compression_shape: [0, 0, 0, 0, 1, 1, 1], - compressed_index: false, - ..SEQUENTIAL -}; - -/// Descriptor for small persistent caches with random updates. Oldest entries -/// are deleted after limit_size reached. -pub(crate) static RANDOM_SMALL_CACHE: Descriptor = Descriptor { - compaction: CompactionStyle::Fifo, - cache_disp: CacheDisp::Unique, - limit_size: 1024 * 1024 * 64, - ttl: 60 * 60 * 24 * 14, - file_shape: 2, - ..RANDOM_SMALL -}; diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs deleted file mode 100644 index 1f38a63c..00000000 --- a/src/database/engine/files.rs +++ /dev/null @@ -1,15 +0,0 @@ -use conduwuit::{Result, implement}; -use rocksdb::LiveFile as SstFile; - -use super::Engine; -use crate::util::map_err; - -#[implement(Engine)] -pub fn file_list(&self) -> impl Iterator> + Send + use<> { - self.db - .live_files() - .map_err(map_err) - .into_iter() - .flat_map(Vec::into_iter) - .map(Ok) -} diff --git a/src/database/engine/logger.rs b/src/database/engine/logger.rs deleted file mode 100644 index 23e23fc7..00000000 --- a/src/database/engine/logger.rs +++ /dev/null @@ -1,22 +0,0 @@ -use conduwuit::{debug, error, warn}; -use rocksdb::LogLevel; - -#[tracing::instrument( - parent = None, - name = "rocksdb", - level = "trace" - skip(msg), -)] -pub(crate) fn handle(level: LogLevel, msg: &str) { - let msg = msg.trim(); - if msg.starts_with("Options") { - return; - } - - match level { - | LogLevel::Header | LogLevel::Debug => debug!("{msg}"), - | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), - | LogLevel::Info => debug!("{msg}"), - | LogLevel::Warn => warn!("{msg}"), - } -} diff --git a/src/database/engine/memory_usage.rs b/src/database/engine/memory_usage.rs deleted file mode 100644 index 9bb5c535..00000000 --- a/src/database/engine/memory_usage.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::fmt::Write; - -use conduwuit::{Result, implement}; -use rocksdb::perf::get_memory_usage_stats; - -use super::Engine; -use crate::or_else; - -#[implement(Engine)] -pub fn memory_usage(&self) -> Result { - let mut res = String::new(); - let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&*self.ctx.row_cache.lock()?])) - .or_else(or_else)?; - let mibs = |input| f64::from(u32::try_from(input / 1024).unwrap_or(0)) / 1024.0; - writeln!( - res, - "Memory buffers: {:.2} MiB\nPending write: {:.2} MiB\nTable readers: {:.2} MiB\nRow \ - cache: {:.2} MiB", - mibs(stats.mem_table_total), - mibs(stats.mem_table_unflushed), - mibs(stats.mem_table_readers_total), - mibs(u64::try_from(self.ctx.row_cache.lock()?.get_usage())?), - )?; - - for (name, cache) in &*self.ctx.col_cache.lock()? { - writeln!(res, "{name} cache: {:.2} MiB", mibs(u64::try_from(cache.get_usage())?))?; - } - - Ok(res) -} diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs deleted file mode 100644 index 84e59a6a..00000000 --- a/src/database/engine/open.rs +++ /dev/null @@ -1,132 +0,0 @@ -use std::{ - collections::BTreeSet, - path::Path, - sync::{Arc, atomic::AtomicU32}, -}; - -use conduwuit::{Result, debug, implement, info, warn}; -use rocksdb::{ColumnFamilyDescriptor, Options}; - -use super::{ - Db, Engine, - cf_opts::cf_options, - db_opts::db_options, - descriptor::{self, Descriptor}, - repair::repair, -}; -use crate::{Context, or_else}; - -#[implement(Engine)] -#[tracing::instrument(skip_all)] -pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result> { - let server = &ctx.server; - let config = &server.config; - let path = &config.database_path; - - let db_opts = db_options( - config, - &ctx.env.lock().expect("environment locked"), - &ctx.row_cache.lock().expect("row cache locked"), - )?; - - let cfds = Self::configure_cfds(&ctx, &db_opts, desc)?; - let num_cfds = cfds.len(); - debug!("Configured {num_cfds} column descriptors..."); - - let load_time = std::time::Instant::now(); - if config.rocksdb_repair { - repair(&db_opts, &config.database_path)?; - } - - debug!("Opening database..."); - let db = if config.rocksdb_read_only { - Db::open_cf_descriptors_read_only(&db_opts, path, cfds, false) - } else if config.rocksdb_secondary { - Db::open_cf_descriptors_as_secondary(&db_opts, path, path, cfds) - } else { - Db::open_cf_descriptors(&db_opts, path, cfds) - } - .or_else(or_else)?; - - info!( - columns = num_cfds, - sequence = %db.latest_sequence_number(), - time = ?load_time.elapsed(), - "Opened database." - ); - - Ok(Arc::new(Self { - db, - pool: ctx.pool.clone(), - ctx: ctx.clone(), - read_only: config.rocksdb_read_only, - secondary: config.rocksdb_secondary, - checksums: config.rocksdb_checksums, - corks: AtomicU32::new(0), - })) -} - -#[implement(Engine)] -#[tracing::instrument(name = "configure", skip_all)] -fn configure_cfds( - ctx: &Arc, - db_opts: &Options, - desc: &[Descriptor], -) -> Result> { - let server = &ctx.server; - let config = &server.config; - let path = &config.database_path; - let existing = Self::discover_cfs(path, db_opts); - - let creating = desc.iter().filter(|desc| !existing.contains(desc.name)); - - let missing = existing - .iter() - .filter(|&name| name != "default") - .filter(|&name| !desc.iter().any(|desc| desc.name == name)); - - debug!( - existing = existing.len(), - described = desc.len(), - missing = missing.clone().count(), - creating = creating.clone().count(), - "Discovered database columns" - ); - - missing.clone().for_each(|name| { - debug!("Found unrecognized column {name:?} in existing database."); - }); - - creating.map(|desc| desc.name).for_each(|name| { - debug!("Creating new column {name:?} not previously found in existing database."); - }); - - let missing_descriptors = missing.clone().map(|_| descriptor::DROPPED); - - let cfopts: Vec<_> = desc - .iter() - .copied() - .chain(missing_descriptors) - .map(|ref desc| cf_options(ctx, db_opts.clone(), desc)) - .collect::>()?; - - let cfds: Vec<_> = desc - .iter() - .map(|desc| desc.name) - .map(ToOwned::to_owned) - .chain(missing.cloned()) - .zip(cfopts.into_iter()) - .map(|(name, opts)| ColumnFamilyDescriptor::new(name, opts)) - .collect(); - - Ok(cfds) -} - -#[implement(Engine)] -#[tracing::instrument(name = "discover", skip_all)] -fn discover_cfs(path: &Path, opts: &Options) -> BTreeSet { - Db::list_cf(opts, path) - .unwrap_or_default() - .into_iter() - .collect::>() -} diff --git a/src/database/engine/repair.rs b/src/database/engine/repair.rs deleted file mode 100644 index aeec0caf..00000000 --- a/src/database/engine/repair.rs +++ /dev/null @@ -1,16 +0,0 @@ -use std::path::PathBuf; - -use conduwuit::{Err, Result, info, warn}; -use rocksdb::Options; - -use super::Db; - -pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result { - warn!("Starting database repair. This may take a long time..."); - match Db::repair(db_opts, path) { - | Ok(()) => info!("Database repair successful."), - | Err(e) => return Err!("Repair failed: {e:?}"), - } - - Ok(()) -} diff --git a/src/database/handle.rs b/src/database/handle.rs deleted file mode 100644 index 484e5618..00000000 --- a/src/database/handle.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::{fmt, fmt::Debug, ops::Deref}; - -use conduwuit::Result; -use rocksdb::DBPinnableSlice; -use serde::{Deserialize, Serialize, Serializer}; - -use crate::{Deserialized, Slice, keyval::deserialize_val}; - -pub struct Handle<'a> { - val: DBPinnableSlice<'a>, -} - -impl<'a> From> for Handle<'a> { - fn from(val: DBPinnableSlice<'a>) -> Self { Self { val } } -} - -impl Debug for Handle<'_> { - fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { - let val: &Slice = self; - let ptr = val.as_ptr(); - let len = val.len(); - write!(out, "Handle {{val: {{ptr: {ptr:?}, len: {len}}}}}") - } -} - -impl Serialize for Handle<'_> { - #[inline] - fn serialize(&self, serializer: S) -> Result { - let bytes: &Slice = self; - serializer.serialize_bytes(bytes) - } -} - -impl Deserialized for Result> { - #[inline] - fn map_de(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>, - { - self?.map_de(f) - } -} - -impl<'a> Deserialized for Result<&'a Handle<'a>> { - #[inline] - fn map_de(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>, - { - self.and_then(|handle| handle.map_de(f)) - } -} - -impl<'a> Deserialized for &'a Handle<'a> { - #[inline] - fn map_de(self, f: F) -> Result - where - F: FnOnce(T) -> U, - T: for<'de> Deserialize<'de>, - { - deserialize_val(self.as_ref()).map(f) - } -} - -impl From> for Vec { - fn from(handle: Handle<'_>) -> Self { handle.deref().to_vec() } -} - -impl Deref for Handle<'_> { - type Target = Slice; - - #[inline] - fn deref(&self) -> &Self::Target { &self.val } -} - -impl AsRef for Handle<'_> { - #[inline] - fn as_ref(&self) -> &Slice { &self.val } -} diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs new file mode 100644 index 00000000..54b61742 --- /dev/null +++ b/src/database/key_value/account_data.rs @@ -0,0 +1,137 @@ +use std::collections::HashMap; + +use ruma::{ + api::client::error::ErrorKind, + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + RoomId, UserId, +}; +use tracing::warn; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::account_data::Data for KeyValueDatabase { + /// Places one event in the account data of the user and removes the + /// previous entry. + #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] + fn update( + &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, + data: &serde_json::Value, + ) -> Result<()> { + let mut prefix = room_id + .map(ToString::to_string) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(user_id.as_bytes()); + prefix.push(0xFF); + + let mut roomuserdataid = prefix.clone(); + roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + roomuserdataid.push(0xFF); + roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); + + let mut key = prefix; + key.extend_from_slice(event_type.to_string().as_bytes()); + + if data.get("type").is_none() || data.get("content").is_none() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Account data doesn't have all required fields.", + )); + } + + self.roomuserdataid_accountdata.insert( + &roomuserdataid, + &serde_json::to_vec(&data).expect("to_vec always works on json values"), + )?; + + let prev = self.roomusertype_roomuserdataid.get(&key)?; + + self.roomusertype_roomuserdataid + .insert(&key, &roomuserdataid)?; + + // Remove old entry + if let Some(prev) = prev { + self.roomuserdataid_accountdata.remove(&prev)?; + } + + Ok(()) + } + + /// Searches the account data for a specific kind. + #[tracing::instrument(skip(self, room_id, user_id, kind))] + fn get( + &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, + ) -> Result>> { + let mut key = room_id + .map(ToString::to_string) + .unwrap_or_default() + .as_bytes() + .to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(kind.to_string().as_bytes()); + + self.roomusertype_roomuserdataid + .get(&key)? + .and_then(|roomuserdataid| { + self.roomuserdataid_accountdata + .get(&roomuserdataid) + .transpose() + }) + .transpose()? + .map(|data| serde_json::from_slice(&data).map_err(|_| Error::bad_database("could not deserialize"))) + .transpose() + } + + /// Returns all changes to the account data that happened after `since`. + #[tracing::instrument(skip(self, room_id, user_id, since))] + fn changes_since( + &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, + ) -> Result>> { + let mut userdata = HashMap::new(); + + let mut prefix = room_id + .map(ToString::to_string) + .unwrap_or_default() + .as_bytes() + .to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(user_id.as_bytes()); + prefix.push(0xFF); + + // Skip the data that's exactly at since, because we sent that last time + let mut first_possible = prefix.clone(); + first_possible.extend_from_slice(&(since + 1).to_be_bytes()); + + for r in self + .roomuserdataid_accountdata + .iter_from(&first_possible, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(k, v)| { + Ok::<_, Error>(( + RoomAccountDataEventType::from( + utils::string_from_bytes( + k.rsplit(|&b| b == 0xFF) + .next() + .ok_or_else(|| Error::bad_database("RoomUserData ID in db is invalid."))?, + ) + .map_err(|e| { + warn!("RoomUserData ID in database is invalid: {}", e); + Error::bad_database("RoomUserData ID in db is invalid.") + })?, + ), + serde_json::from_slice::>(&v) + .map_err(|_| Error::bad_database("Database contains invalid account data."))?, + )) + }) { + let (kind, data) = r?; + userdata.insert(kind, data); + } + + Ok(userdata) + } +} diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs new file mode 100644 index 00000000..ead37c2b --- /dev/null +++ b/src/database/key_value/appservice.rs @@ -0,0 +1,55 @@ +use ruma::api::appservice::Registration; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::appservice::Data for KeyValueDatabase { + /// Registers an appservice and returns the ID to the caller + fn register_appservice(&self, yaml: Registration) -> Result { + let id = yaml.id.as_str(); + self.id_appserviceregistrations + .insert(id.as_bytes(), serde_yaml::to_string(&yaml).unwrap().as_bytes())?; + + Ok(id.to_owned()) + } + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + fn unregister_appservice(&self, service_name: &str) -> Result<()> { + self.id_appserviceregistrations + .remove(service_name.as_bytes())?; + Ok(()) + } + + fn get_registration(&self, id: &str) -> Result> { + self.id_appserviceregistrations + .get(id.as_bytes())? + .map(|bytes| { + serde_yaml::from_slice(&bytes) + .map_err(|_| Error::bad_database("Invalid registration bytes in id_appserviceregistrations.")) + }) + .transpose() + } + + fn iter_ids<'a>(&'a self) -> Result> + 'a>> { + Ok(Box::new(self.id_appserviceregistrations.iter().map(|(id, _)| { + utils::string_from_bytes(&id) + .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) + }))) + } + + fn all(&self) -> Result> { + self.iter_ids()? + .filter_map(Result::ok) + .map(move |id| { + Ok(( + id.clone(), + self.get_registration(&id)? + .expect("iter_ids only returns appservices that exist"), + )) + }) + .collect() + } +} diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs new file mode 100644 index 00000000..c53ce9e8 --- /dev/null +++ b/src/database/key_value/globals.rs @@ -0,0 +1,301 @@ +use std::collections::{BTreeMap, HashMap}; + +use async_trait::async_trait; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use lru_cache::LruCache; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId, +}; + +use crate::{ + database::{Cork, KeyValueDatabase}, + service, services, utils, Error, Result, +}; + +const COUNTER: &[u8] = b"c"; +const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; + +#[async_trait] +impl service::globals::Data for KeyValueDatabase { + fn next_count(&self) -> Result { + utils::u64_from_bytes(&self.global.increment(COUNTER)?) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) + } + + fn current_count(&self) -> Result { + self.global.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Count has invalid bytes.")) + }) + } + + fn last_check_for_updates_id(&self) -> Result { + self.global + .get(LAST_CHECK_FOR_UPDATES_COUNT)? + .map_or(Ok(0_u64), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("last check for updates count has invalid bytes.")) + }) + } + + fn update_check_for_updates_id(&self, id: u64) -> Result<()> { + self.global + .insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?; + + Ok(()) + } + + #[allow(unused_qualifications)] // async traits + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + let userid_bytes = user_id.as_bytes().to_vec(); + let mut userid_prefix = userid_bytes.clone(); + userid_prefix.push(0xFF); + + let mut userdeviceid_prefix = userid_prefix.clone(); + userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); + userdeviceid_prefix.push(0xFF); + + let mut futures = FuturesUnordered::new(); + + // Return when *any* user changed their key + // TODO: only send for user they share a room with + futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix)); + + futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push( + self.userroomid_notificationcount + .watch_prefix(&userid_prefix), + ); + futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); + + // Events for rooms we are in + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(Result::ok) + { + let short_roomid = services() + .rooms + .short + .get_shortroomid(&room_id) + .ok() + .flatten() + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let roomid_bytes = room_id.as_bytes().to_vec(); + let mut roomid_prefix = roomid_bytes.clone(); + roomid_prefix.push(0xFF); + + // PDUs + futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); + + // EDUs + futures.push(Box::pin(async move { + let _result = services().rooms.typing.wait_for_update(&room_id).await; + })); + + futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); + + // Key changes + futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); + + // Room account data + let mut roomuser_prefix = roomid_prefix.clone(); + roomuser_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&roomuser_prefix), + ); + } + + let mut globaluserdata_prefix = vec![0xFF]; + globaluserdata_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&globaluserdata_prefix), + ); + + // More key changes (used when user is not joined to any rooms) + futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); + + // One time keys + futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); + + futures.push(Box::pin(services().globals.rotate.watch())); + + // Wait until one of them finds something + futures.next().await; + + Ok(()) + } + + fn cleanup(&self) -> Result<()> { self.db.cleanup() } + + fn flush(&self) -> Result<()> { self.db.flush() } + + fn cork(&self) -> Result { Ok(Cork::new(&self.db, false, false)) } + + fn cork_and_flush(&self) -> Result { Ok(Cork::new(&self.db, true, false)) } + + fn cork_and_sync(&self) -> Result { Ok(Cork::new(&self.db, true, true)) } + + fn memory_usage(&self) -> String { + let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len(); + let our_real_users_cache = self.our_real_users_cache.read().unwrap().len(); + let appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().len(); + let lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().len(); + + let max_auth_chain_cache = self.auth_chain_cache.lock().unwrap().capacity(); + let max_our_real_users_cache = self.our_real_users_cache.read().unwrap().capacity(); + let max_appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().capacity(); + let max_lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().capacity(); + + let mut response = format!( + "\ +auth_chain_cache: {auth_chain_cache} / {max_auth_chain_cache} +our_real_users_cache: {our_real_users_cache} / {max_our_real_users_cache} +appservice_in_room_cache: {appservice_in_room_cache} / {max_appservice_in_room_cache} +lasttimelinecount_cache: {lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n" + ); + if let Ok(db_stats) = self.db.memory_usage() { + response += &db_stats; + } + + response + } + + fn clear_caches(&self, amount: u32) { + if amount > 1 { + let c = &mut *self.auth_chain_cache.lock().unwrap(); + *c = LruCache::new(c.capacity()); + } + if amount > 2 { + let c = &mut *self.our_real_users_cache.write().unwrap(); + *c = HashMap::new(); + } + if amount > 3 { + let c = &mut *self.appservice_in_room_cache.write().unwrap(); + *c = HashMap::new(); + } + if amount > 4 { + let c = &mut *self.lasttimelinecount_cache.lock().unwrap(); + *c = HashMap::new(); + } + } + + fn load_keypair(&self) -> Result { + let keypair_bytes = self.global.get(b"keypair")?.map_or_else( + || { + let keypair = utils::generate_keypair(); + self.global.insert(b"keypair", &keypair)?; + Ok::<_, Error>(keypair) + }, + Ok, + )?; + + let mut parts = keypair_bytes.splitn(2, |&b| b == 0xFF); + + utils::string_from_bytes( + // 1. version + parts + .next() + .expect("splitn always returns at least one element"), + ) + .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) + .and_then(|version| { + // 2. key + parts + .next() + .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) + .map(|key| (version, key)) + }) + .and_then(|(version, key)| { + Ed25519KeyPair::from_der(key, version) + .map_err(|_| Error::bad_database("Private or public keys are invalid.")) + }) + } + + fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") } + + fn add_signing_key( + &self, origin: &ServerName, new_keys: ServerSigningKeys, + ) -> Result> { + // Not atomic, but this is not critical + let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + + let mut keys = signingkeys + .and_then(|keys| serde_json::from_slice(&keys).ok()) + .unwrap_or_else(|| { + // Just insert "now", it doesn't matter + ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) + }); + + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; + + keys.verify_keys.extend(verify_keys); + keys.old_verify_keys.extend(old_verify_keys); + + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), + )?; + + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + + Ok(tree) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found + /// for the server. + fn signing_keys_for(&self, origin: &ServerName) -> Result> { + let signingkeys = self + .server_signingkeys + .get(origin.as_bytes())? + .and_then(|bytes| serde_json::from_slice(&bytes).ok()) + .map_or_else(BTreeMap::new, |keys: ServerSigningKeys| { + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + tree + }); + + Ok(signingkeys) + } + + fn database_version(&self) -> Result { + self.global.get(b"version")?.map_or(Ok(0), |version| { + utils::u64_from_bytes(&version).map_err(|_| Error::bad_database("Database version id is invalid.")) + }) + } + + fn bump_database_version(&self, new_version: u64) -> Result<()> { + self.global.insert(b"version", &new_version.to_be_bytes())?; + Ok(()) + } + + fn backup(&self) -> Result<(), Box> { self.db.backup() } + + fn backup_list(&self) -> Result { self.db.backup_list() } + + fn file_list(&self) -> Result { self.db.file_list() } +} diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs new file mode 100644 index 00000000..7ed1da4c --- /dev/null +++ b/src/database/key_value/key_backups.rs @@ -0,0 +1,317 @@ +use std::collections::BTreeMap; + +use ruma::{ + api::client::{ + backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + error::ErrorKind, + }, + serde::Raw, + OwnedRoomId, RoomId, UserId, +}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::key_backups::Data for KeyValueDatabase { + fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result { + let version = services().globals.next_count()?.to_string(); + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + self.backupid_algorithm.insert( + &key, + &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), + )?; + self.backupid_etag + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; + Ok(version) + } + + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + self.backupid_algorithm.remove(&key)?; + self.backupid_etag.remove(&key)?; + + key.push(0xFF); + + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; + } + + Ok(()) + } + + fn update_backup(&self, user_id: &UserId, version: &str, backup_metadata: &Raw) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + if self.backupid_algorithm.get(&key)?.is_none() { + return Err(Error::BadRequest(ErrorKind::NotFound, "Tried to update nonexistent backup.")); + } + + self.backupid_algorithm + .insert(&key, backup_metadata.json().get().as_bytes())?; + self.backupid_etag + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; + Ok(version.to_owned()) + } + + fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.backupid_algorithm + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map(|(key, _)| { + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) + }) + .transpose() + } + + fn get_latest_backup(&self, user_id: &UserId) -> Result)>> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.backupid_algorithm + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map(|(key, value)| { + let version = utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; + + Ok(( + version, + serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?, + )) + }) + .transpose() + } + + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + self.backupid_algorithm + .get(&key)? + .map_or(Ok(None), |bytes| { + serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) + }) + } + + fn add_key( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, + ) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + if self.backupid_algorithm.get(&key)?.is_none() { + return Err(Error::BadRequest(ErrorKind::NotFound, "Tried to update nonexistent backup.")); + } + + self.backupid_etag + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; + + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(session_id.as_bytes()); + + self.backupkeyid_backup + .insert(&key, key_data.json().get().as_bytes())?; + + Ok(()) + } + + fn count_keys(&self, user_id: &UserId, version: &str) -> Result { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(version.as_bytes()); + + Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) + } + + fn get_etag(&self, user_id: &UserId, version: &str) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + + Ok(utils::u64_from_bytes( + &self + .backupid_etag + .get(&key)? + .ok_or_else(|| Error::bad_database("Backup has no etag."))?, + ) + .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? + .to_string()) + } + + fn get_all(&self, user_id: &UserId, version: &str) -> Result> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(version.as_bytes()); + prefix.push(0xFF); + + let mut rooms = BTreeMap::::new(); + + for result in self + .backupkeyid_backup + .scan_prefix(prefix) + .map(|(key, value)| { + let mut parts = key.rsplit(|&b| b == 0xFF); + + let session_id = utils::string_from_bytes( + parts + .next() + .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup session_id is invalid."))?; + + let room_id = RoomId::parse( + utils::string_from_bytes( + parts + .next() + .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid room id."))?; + + let key_data = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("KeyBackupData in backupkeyid_backup is invalid."))?; + + Ok::<_, Error>((room_id, session_id, key_data)) + }) { + let (room_id, session_id, key_data) = result?; + rooms + .entry(room_id) + .or_insert_with(|| RoomKeyBackup { + sessions: BTreeMap::new(), + }) + .sessions + .insert(session_id, key_data); + } + + Ok(rooms) + } + + fn get_room( + &self, user_id: &UserId, version: &str, room_id: &RoomId, + ) -> Result>> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(version.as_bytes()); + prefix.push(0xFF); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xFF); + + Ok(self + .backupkeyid_backup + .scan_prefix(prefix) + .map(|(key, value)| { + let mut parts = key.rsplit(|&b| b == 0xFF); + + let session_id = utils::string_from_bytes( + parts + .next() + .ok_or_else(|| Error::bad_database("backupkeyid_backup key is invalid."))?, + ) + .map_err(|_| Error::bad_database("backupkeyid_backup session_id is invalid."))?; + + let key_data = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("KeyBackupData in backupkeyid_backup is invalid."))?; + + Ok::<_, Error>((session_id, key_data)) + }) + .filter_map(Result::ok) + .collect()) + } + + fn get_session( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, + ) -> Result>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(session_id.as_bytes()); + + self.backupkeyid_backup + .get(&key)? + .map(|value| { + serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.")) + }) + .transpose() + } + + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + key.push(0xFF); + + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; + } + + Ok(()) + } + + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xFF); + + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; + } + + Ok(()) + } + + fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(version.as_bytes()); + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(session_id.as_bytes()); + + for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { + self.backupkeyid_backup.remove(&outdated_key)?; + } + + Ok(()) + } +} diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs new file mode 100644 index 00000000..d86b50f1 --- /dev/null +++ b/src/database/key_value/media.rs @@ -0,0 +1,247 @@ +use ruma::api::client::error::ErrorKind; +use tracing::debug; + +use crate::{ + database::KeyValueDatabase, + service::{self, media::UrlPreviewData}, + utils::string_from_bytes, + Error, Result, +}; + +impl service::media::Data for KeyValueDatabase { + fn create_file_metadata( + &self, sender_user: Option<&str>, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result> { + let mut key = mxc.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(&width.to_be_bytes()); + key.extend_from_slice(&height.to_be_bytes()); + key.push(0xFF); + key.extend_from_slice( + content_disposition + .as_ref() + .map(|f| f.as_bytes()) + .unwrap_or_default(), + ); + key.push(0xFF); + key.extend_from_slice( + content_type + .as_ref() + .map(|c| c.as_bytes()) + .unwrap_or_default(), + ); + + self.mediaid_file.insert(&key, &[])?; + + if let Some(user) = sender_user { + let key = mxc.as_bytes().to_vec(); + let user = user.as_bytes().to_vec(); + self.mediaid_user.insert(&key, &user)?; + } + + Ok(key) + } + + fn delete_file_mxc(&self, mxc: String) -> Result<()> { + debug!("MXC URI: {:?}", mxc); + + let mut prefix = mxc.as_bytes().to_vec(); + prefix.push(0xFF); + + debug!("MXC db prefix: {prefix:?}"); + + for (key, _) in self.mediaid_file.scan_prefix(prefix) { + debug!("Deleting key: {:?}", key); + self.mediaid_file.remove(&key)?; + } + + for (key, value) in self.mediaid_user.scan_prefix(mxc.as_bytes().to_vec()) { + if key == mxc.as_bytes().to_vec() { + let user = string_from_bytes(&value).unwrap_or_default(); + + debug!("Deleting key \"{key:?}\" which was uploaded by user {user}"); + self.mediaid_user.remove(&key)?; + } + } + + Ok(()) + } + + /// Searches for all files with the given MXC + fn search_mxc_metadata_prefix(&self, mxc: String) -> Result>> { + debug!("MXC URI: {:?}", mxc); + + let mut prefix = mxc.as_bytes().to_vec(); + prefix.push(0xFF); + + let mut keys: Vec> = vec![]; + + for (key, _) in self.mediaid_file.scan_prefix(prefix) { + keys.push(key); + } + + if keys.is_empty() { + return Err(Error::bad_database( + "Failed to find any keys in database with the provided MXC.", + )); + } + + debug!("Got the following keys: {:?}", keys); + + Ok(keys) + } + + fn search_file_metadata( + &self, mxc: String, width: u32, height: u32, + ) -> Result<(Option, Option, Vec)> { + let mut prefix = mxc.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(&width.to_be_bytes()); + prefix.extend_from_slice(&height.to_be_bytes()); + prefix.push(0xFF); + + let (key, _) = self + .mediaid_file + .scan_prefix(prefix) + .next() + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; + + let mut parts = key.rsplit(|&b| b == 0xFF); + + let content_type = parts + .next() + .map(|bytes| { + string_from_bytes(bytes) + .map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode.")) + }) + .transpose()?; + + let content_disposition_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; + + let content_disposition = if content_disposition_bytes.is_empty() { + None + } else { + Some( + string_from_bytes(content_disposition_bytes) + .map_err(|_| Error::bad_database("Content Disposition in mediaid_file is invalid unicode."))?, + ) + }; + Ok((content_disposition, content_type, key)) + } + + /// Gets all the media keys in our database (this includes all the metadata + /// associated with it such as width, height, content-type, etc) + fn get_all_media_keys(&self) -> Result>> { + let mut keys: Vec> = vec![]; + + for (key, _) in self.mediaid_file.iter() { + keys.push(key); + } + + Ok(keys) + } + + fn remove_url_preview(&self, url: &str) -> Result<()> { self.url_previews.remove(url.as_bytes()) } + + fn set_url_preview(&self, url: &str, data: &UrlPreviewData, timestamp: std::time::Duration) -> Result<()> { + let mut value = Vec::::new(); + value.extend_from_slice(×tamp.as_secs().to_be_bytes()); + value.push(0xFF); + value.extend_from_slice( + data.title + .as_ref() + .map(String::as_bytes) + .unwrap_or_default(), + ); + value.push(0xFF); + value.extend_from_slice( + data.description + .as_ref() + .map(String::as_bytes) + .unwrap_or_default(), + ); + value.push(0xFF); + value.extend_from_slice( + data.image + .as_ref() + .map(String::as_bytes) + .unwrap_or_default(), + ); + value.push(0xFF); + value.extend_from_slice(&data.image_size.unwrap_or(0).to_be_bytes()); + value.push(0xFF); + value.extend_from_slice(&data.image_width.unwrap_or(0).to_be_bytes()); + value.push(0xFF); + value.extend_from_slice(&data.image_height.unwrap_or(0).to_be_bytes()); + + self.url_previews.insert(url.as_bytes(), &value) + } + + fn get_url_preview(&self, url: &str) -> Option { + let values = self.url_previews.get(url.as_bytes()).ok()??; + + let mut values = values.split(|&b| b == 0xFF); + + let _ts = values.next(); + /* if we ever decide to use timestamp, this is here. + match values.next().map(|b| u64::from_be_bytes(b.try_into().expect("valid BE array"))) { + Some(0) => None, + x => x, + };*/ + + let title = match values + .next() + .and_then(|b| String::from_utf8(b.to_vec()).ok()) + { + Some(s) if s.is_empty() => None, + x => x, + }; + let description = match values + .next() + .and_then(|b| String::from_utf8(b.to_vec()).ok()) + { + Some(s) if s.is_empty() => None, + x => x, + }; + let image = match values + .next() + .and_then(|b| String::from_utf8(b.to_vec()).ok()) + { + Some(s) if s.is_empty() => None, + x => x, + }; + let image_size = match values + .next() + .map(|b| usize::from_be_bytes(b.try_into().unwrap_or_default())) + { + Some(0) => None, + x => x, + }; + let image_width = match values + .next() + .map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default())) + { + Some(0) => None, + x => x, + }; + let image_height = match values + .next() + .map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default())) + { + Some(0) => None, + x => x, + }; + + Some(UrlPreviewData { + title, + description, + image, + image_size, + image_width, + image_height, + }) + } +} diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs new file mode 100644 index 00000000..4391cac5 --- /dev/null +++ b/src/database/key_value/mod.rs @@ -0,0 +1,14 @@ +mod account_data; +//mod admin; +mod appservice; +mod globals; +mod key_backups; +mod media; +//mod pdu; +mod presence; +mod pusher; +mod rooms; +mod sending; +mod transaction_ids; +mod uiaa; +mod users; diff --git a/src/database/key_value/presence.rs b/src/database/key_value/presence.rs new file mode 100644 index 00000000..c123a339 --- /dev/null +++ b/src/database/key_value/presence.rs @@ -0,0 +1,120 @@ +use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; +use tracing::debug; + +use crate::{ + database::KeyValueDatabase, + service::{self, presence::Presence}, + services, + utils::{self, user_id_from_bytes}, + Error, Result, +}; + +impl service::presence::Data for KeyValueDatabase { + fn get_presence(&self, user_id: &UserId) -> Result> { + if let Some(count_bytes) = self.userid_presenceid.get(user_id.as_bytes())? { + let count = utils::u64_from_bytes(&count_bytes) + .map_err(|_e| Error::bad_database("No 'count' bytes in presence key"))?; + + let key = presenceid_key(count, user_id); + self.presenceid_presence + .get(&key)? + .map(|presence_bytes| -> Result<(u64, PresenceEvent)> { + Ok((count, Presence::from_json_bytes(&presence_bytes)?.to_presence_event(user_id)?)) + }) + .transpose() + } else { + Ok(None) + } + } + + fn set_presence( + &self, user_id: &UserId, presence_state: &PresenceState, currently_active: Option, + last_active_ago: Option, status_msg: Option, + ) -> Result<()> { + let last_presence = self.get_presence(user_id)?; + let state_changed = match last_presence { + None => true, + Some(ref presence) => presence.1.content.presence != *presence_state, + }; + + let now = utils::millis_since_unix_epoch(); + let last_last_active_ts = match last_presence { + None => 0, + Some((_, ref presence)) => now.saturating_sub(presence.content.last_active_ago.unwrap_or_default().into()), + }; + + let last_active_ts = match last_active_ago { + None => now, + Some(last_active_ago) => now.saturating_sub(last_active_ago.into()), + }; + + // tighten for state flicker? + if !state_changed && last_active_ts <= last_last_active_ts { + debug!( + "presence spam {:?} last_active_ts:{:?} <= {:?}", + user_id, last_active_ts, last_last_active_ts + ); + return Ok(()); + } + + let presence = Presence::new( + presence_state.to_owned(), + currently_active.unwrap_or(false), + last_active_ts, + status_msg, + ); + let count = services().globals.next_count()?; + let key = presenceid_key(count, user_id); + + self.presenceid_presence + .insert(&key, &presence.to_json_bytes()?)?; + + self.userid_presenceid + .insert(user_id.as_bytes(), &count.to_be_bytes())?; + + if let Some((last_count, _)) = last_presence { + let key = presenceid_key(last_count, user_id); + self.presenceid_presence.remove(&key)?; + } + + Ok(()) + } + + fn remove_presence(&self, user_id: &UserId) -> Result<()> { + if let Some(count_bytes) = self.userid_presenceid.get(user_id.as_bytes())? { + let count = utils::u64_from_bytes(&count_bytes) + .map_err(|_e| Error::bad_database("No 'count' bytes in presence key"))?; + let key = presenceid_key(count, user_id); + self.presenceid_presence.remove(&key)?; + self.userid_presenceid.remove(user_id.as_bytes())?; + } + + Ok(()) + } + + fn presence_since<'a>(&'a self, since: u64) -> Box)> + 'a> { + Box::new( + self.presenceid_presence + .iter() + .flat_map(|(key, presence_bytes)| -> Result<(OwnedUserId, u64, Vec)> { + let (count, user_id) = presenceid_parse(&key)?; + Ok((user_id, count, presence_bytes)) + }) + .filter(move |(_, count, _)| *count > since), + ) + } +} + +#[inline] +fn presenceid_key(count: u64, user_id: &UserId) -> Vec { + [count.to_be_bytes().to_vec(), user_id.as_bytes().to_vec()].concat() +} + +#[inline] +fn presenceid_parse(key: &[u8]) -> Result<(u64, OwnedUserId)> { + let (count, user_id) = key.split_at(8); + let user_id = user_id_from_bytes(user_id)?; + let count = utils::u64_from_bytes(count).unwrap(); + + Ok((count, user_id)) +} diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs new file mode 100644 index 00000000..851831ec --- /dev/null +++ b/src/database/key_value/pusher.rs @@ -0,0 +1,65 @@ +use ruma::{ + api::client::push::{set_pusher, Pusher}, + UserId, +}; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::pusher::Data for KeyValueDatabase { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { + match &pusher { + set_pusher::v3::PusherAction::Post(data) => { + let mut key = sender.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(data.pusher.ids.pushkey.as_bytes()); + self.senderkey_pusher + .insert(&key, &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"))?; + Ok(()) + }, + set_pusher::v3::PusherAction::Delete(ids) => { + let mut key = sender.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(ids.pushkey.as_bytes()); + self.senderkey_pusher.remove(&key).map_err(Into::into) + }, + } + } + + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + let mut senderkey = sender.as_bytes().to_vec(); + senderkey.push(0xFF); + senderkey.extend_from_slice(pushkey.as_bytes()); + + self.senderkey_pusher + .get(&senderkey)? + .map(|push| serde_json::from_slice(&push).map_err(|_| Error::bad_database("Invalid Pusher in db."))) + .transpose() + } + + fn get_pushers(&self, sender: &UserId) -> Result> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xFF); + + self.senderkey_pusher + .scan_prefix(prefix) + .map(|(_, push)| serde_json::from_slice(&push).map_err(|_| Error::bad_database("Invalid Pusher in db."))) + .collect() + } + + fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { + let mut parts = k.splitn(2, |&b| b == 0xFF); + let _senderkey = parts.next(); + let push_key = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; + let push_key_string = utils::string_from_bytes(push_key) + .map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; + + Ok(push_key_string) + })) + } +} diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs new file mode 100644 index 00000000..b5a976c7 --- /dev/null +++ b/src/database/key_value/rooms/alias.rs @@ -0,0 +1,75 @@ +use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::alias::Data for KeyValueDatabase { + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { + self.alias_roomid + .insert(alias.alias().as_bytes(), room_id.as_bytes())?; + let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xFF); + aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + self.aliasid_alias.insert(&aliasid, alias.as_bytes())?; + Ok(()) + } + + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { + if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { + let mut prefix = room_id; + prefix.push(0xFF); + + for (key, _) in self.aliasid_alias.scan_prefix(prefix) { + self.aliasid_alias.remove(&key)?; + } + self.alias_roomid.remove(alias.alias().as_bytes())?; + } else { + return Err(Error::BadRequest(ErrorKind::NotFound, "Alias does not exist.")); + } + Ok(()) + } + + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { + self.alias_roomid + .get(alias.alias().as_bytes())? + .map(|bytes| { + RoomId::parse( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) + }) + .transpose() + } + + fn local_aliases_for_room<'a>( + &'a self, room_id: &RoomId, + ) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) + })) + } + + fn all_local_aliases<'a>(&'a self) -> Box> + 'a> { + Box::new( + self.alias_roomid + .iter() + .map(|(room_alias_bytes, room_id_bytes)| { + let room_alias_localpart = utils::string_from_bytes(&room_alias_bytes) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))?; + + let room_id = utils::string_from_bytes(&room_id_bytes) + .map_err(|_| Error::bad_database("Invalid room_id bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid room_id in aliasid_alias."))?; + + Ok((room_id, room_alias_localpart)) + }), + ) + } +} diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs new file mode 100644 index 00000000..435a1c03 --- /dev/null +++ b/src/database/key_value/rooms/auth_chain.rs @@ -0,0 +1,59 @@ +use std::{mem::size_of, sync::Arc}; + +use crate::{database::KeyValueDatabase, service, utils, Result}; + +impl service::rooms::auth_chain::Data for KeyValueDatabase { + fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { + // Check RAM cache + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + return Ok(Some(Arc::clone(result))); + } + + // We only save auth chains for single events in the db + if key.len() == 1 { + // Check DB cache + let chain = self + .shorteventid_authchain + .get(&key[0].to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct")) + .collect::>() + }); + + if let Some(chain) = chain { + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(vec![key[0]], Arc::clone(&chain)); + + return Ok(Some(chain)); + } + } + + Ok(None) + } + + fn cache_auth_chain(&self, key: Vec, auth_chain: Arc<[u64]>) -> Result<()> { + // Only persist single events in db + if key.len() == 1 { + self.shorteventid_authchain.insert( + &key[0].to_be_bytes(), + &auth_chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(), + )?; + } + + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(key, auth_chain); + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs new file mode 100644 index 00000000..20ccfb55 --- /dev/null +++ b/src/database/key_value/rooms/directory.rs @@ -0,0 +1,23 @@ +use ruma::{OwnedRoomId, RoomId}; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::rooms::directory::Data for KeyValueDatabase { + fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[]) } + + fn set_not_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.remove(room_id.as_bytes()) } + + fn is_public_room(&self, room_id: &RoomId) -> Result { + Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) + } + + fn public_rooms<'a>(&'a self) -> Box> + 'a> { + Box::new(self.publicroomids.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) + })) + } +} diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs new file mode 100644 index 00000000..080eb4b8 --- /dev/null +++ b/src/database/key_value/rooms/lazy_load.rs @@ -0,0 +1,53 @@ +use ruma::{DeviceId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, Result}; + +impl service::rooms::lazy_loading::Data for KeyValueDatabase { + fn lazy_load_was_sent_before( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, + ) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(device_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(ll_user.as_bytes()); + Ok(self.lazyloadedids.get(&key)?.is_some()) + } + + fn lazy_load_confirm_delivery( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, + confirmed_user_ids: &mut dyn Iterator, + ) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xFF); + + for ll_id in confirmed_user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; + } + + Ok(()) + } + + fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xFF); + + for (key, _) in self.lazyloadedids.scan_prefix(prefix) { + self.lazyloadedids.remove(&key)?; + } + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs new file mode 100644 index 00000000..9528da1e --- /dev/null +++ b/src/database/key_value/rooms/metadata.rs @@ -0,0 +1,76 @@ +use ruma::{OwnedRoomId, RoomId}; +use tracing::error; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::metadata::Data for KeyValueDatabase { + fn exists(&self, room_id: &RoomId) -> Result { + let prefix = match services().rooms.short.get_shortroomid(room_id)? { + Some(b) => b.to_be_bytes().to_vec(), + None => return Ok(false), + }; + + // Look for PDUs in that room. + Ok(self + .pduid_pdu + .iter_from(&prefix, false) + .next() + .filter(|(k, _)| k.starts_with(&prefix)) + .is_some()) + } + + fn iter_ids<'a>(&'a self) -> Box> + 'a> { + Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) + })) + } + + fn is_disabled(&self, room_id: &RoomId) -> Result { + Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) + } + + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + if disabled { + self.disabledroomids.insert(room_id.as_bytes(), &[])?; + } else { + self.disabledroomids.remove(room_id.as_bytes())?; + } + + Ok(()) + } + + fn is_banned(&self, room_id: &RoomId) -> Result { Ok(self.bannedroomids.get(room_id.as_bytes())?.is_some()) } + + fn ban_room(&self, room_id: &RoomId, banned: bool) -> Result<()> { + if banned { + self.bannedroomids.insert(room_id.as_bytes(), &[])?; + } else { + self.bannedroomids.remove(room_id.as_bytes())?; + } + + Ok(()) + } + + fn list_banned_rooms<'a>(&'a self) -> Box> + 'a> { + Box::new(self.bannedroomids.iter().map( + |(room_id_bytes, _ /* non-banned rooms should not be in this table */)| { + let room_id = utils::string_from_bytes(&room_id_bytes) + .map_err(|e| { + error!("Invalid room_id bytes in bannedroomids: {e}"); + Error::bad_database("Invalid room_id in bannedroomids.") + })? + .try_into() + .map_err(|e| { + error!("Invalid room_id in bannedroomids: {e}"); + Error::bad_database("Invalid room_id in bannedroomids") + })?; + + Ok(room_id) + }, + )) + } +} diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs new file mode 100644 index 00000000..087f2711 --- /dev/null +++ b/src/database/key_value/rooms/mod.rs @@ -0,0 +1,21 @@ +mod alias; +mod auth_chain; +mod directory; +mod lazy_load; +mod metadata; +mod outlier; +mod pdu_metadata; +mod read_receipt; +mod search; +mod short; +mod state; +mod state_accessor; +mod state_cache; +mod state_compressor; +mod threads; +mod timeline; +mod user; + +use crate::{database::KeyValueDatabase, service}; + +impl service::rooms::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs new file mode 100644 index 00000000..933660e8 --- /dev/null +++ b/src/database/key_value/rooms/outlier.rs @@ -0,0 +1,28 @@ +use ruma::{CanonicalJsonObject, EventId}; + +use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; + +impl service::rooms::outlier::Data for KeyValueDatabase { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + self.eventid_outlierpdu.insert( + event_id.as_bytes(), + &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), + ) + } +} diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs new file mode 100644 index 00000000..b5c81f62 --- /dev/null +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -0,0 +1,83 @@ +use std::{mem, sync::Arc}; + +use ruma::{EventId, RoomId, UserId}; + +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::timeline::PduCount}, + services, utils, Error, PduEvent, Result, +}; + +impl service::rooms::pdu_metadata::Data for KeyValueDatabase { + fn add_relation(&self, from: u64, to: u64) -> Result<()> { + let mut key = to.to_be_bytes().to_vec(); + key.extend_from_slice(&from.to_be_bytes()); + self.tofrom_relation.insert(&key, &[])?; + Ok(()) + } + + fn relations_until<'a>( + &'a self, user_id: &'a UserId, shortroomid: u64, target: u64, until: PduCount, + ) -> Result> + 'a>> { + let prefix = target.to_be_bytes().to_vec(); + let mut current = prefix.clone(); + + let count_raw = match until { + PduCount::Normal(x) => x - 1, + PduCount::Backfilled(x) => { + current.extend_from_slice(&0_u64.to_be_bytes()); + u64::MAX - x - 1 + }, + }; + current.extend_from_slice(&count_raw.to_be_bytes()); + + Ok(Box::new( + self.tofrom_relation + .iter_from(¤t, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(tofrom, _data)| { + let from = utils::u64_from_bytes(&tofrom[(mem::size_of::())..]) + .map_err(|_| Error::bad_database("Invalid count in tofrom_relation."))?; + + let mut pduid = shortroomid.to_be_bytes().to_vec(); + pduid.extend_from_slice(&from.to_be_bytes()); + + let mut pdu = services() + .rooms + .timeline + .get_pdu_from_id(&pduid)? + .ok_or_else(|| Error::bad_database("Pdu in tofrom_relation is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((PduCount::Normal(from), pdu)) + }), + )) + } + + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + for prev in event_ids { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(prev.as_bytes()); + self.referencedevents.insert(&key, &[])?; + } + + Ok(()) + } + + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(event_id.as_bytes()); + Ok(self.referencedevents.get(&key)?.is_some()) + } + + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + self.softfailedeventids.insert(event_id.as_bytes(), &[]) + } + + fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + self.softfailedeventids + .get(event_id.as_bytes()) + .map(|o| o.is_some()) + } +} diff --git a/src/database/key_value/rooms/read_receipt.rs b/src/database/key_value/rooms/read_receipt.rs new file mode 100644 index 00000000..63fa2520 --- /dev/null +++ b/src/database/key_value/rooms/read_receipt.rs @@ -0,0 +1,120 @@ +use std::mem; + +use ruma::{events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::read_receipt::Data for KeyValueDatabase { + fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + // Remove old entry + if let Some((old, _)) = self + .readreceiptid_readreceipt + .iter_from(&last_possible_key, true) + .take_while(|(key, _)| key.starts_with(&prefix)) + .find(|(key, _)| { + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element") + == user_id.as_bytes() + }) { + // This is the old room_latest + self.readreceiptid_readreceipt.remove(&old)?; + } + + let mut room_latest_id = prefix; + room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + room_latest_id.push(0xFF); + room_latest_id.extend_from_slice(user_id.as_bytes()); + + self.readreceiptid_readreceipt.insert( + &room_latest_id, + &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), + )?; + + Ok(()) + } + + fn readreceipts_since<'a>( + &'a self, room_id: &RoomId, since: u64, + ) -> Box)>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + let prefix2 = prefix.clone(); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + + Box::new( + self.readreceiptid_readreceipt + .iter_from(&first_possible_edu, false) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count = utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + let user_id = UserId::parse( + utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) + .map_err(|_| Error::bad_database("Invalid readreceiptid userid bytes in db."))?, + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + + let mut json = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json."))?; + json.remove("room_id"); + + Ok(( + user_id, + count, + Raw::from_json(serde_json::value::to_raw_value(&json).expect("json is valid raw value")), + )) + }), + ) + } + + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .insert(&key, &count.to_be_bytes())?; + + self.roomuserid_lastprivatereadupdate + .insert(&key, &services().globals.next_count()?.to_be_bytes()) + } + + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .get(&key)? + .map_or(Ok(None), |v| { + Ok(Some( + utils::u64_from_bytes(&v).map_err(|_| Error::bad_database("Invalid private read marker bytes"))?, + )) + }) + } + + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id.as_bytes()); + + Ok(self + .roomuserid_lastprivatereadupdate + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")) + }) + .transpose()? + .unwrap_or(0)) + } +} diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs new file mode 100644 index 00000000..6c5d1bc2 --- /dev/null +++ b/src/database/key_value/rooms/search.rs @@ -0,0 +1,64 @@ +use ruma::RoomId; + +use crate::{database::KeyValueDatabase, service, services, utils, Result}; + +type SearchPdusResult<'a> = Result> + 'a>, Vec)>>; + +impl service::rooms::search::Data for KeyValueDatabase { + fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { + let mut batch = message_body + .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .filter(|word| word.len() <= 50) + .map(str::to_lowercase) + .map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xFF); + key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + (key, Vec::new()) + }); + + self.tokenids.insert_batch(&mut batch) + } + + fn search_pdus<'a>(&'a self, room_id: &RoomId, search_string: &str) -> SearchPdusResult<'a> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let words: Vec<_> = search_string + .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .map(str::to_lowercase) + .collect(); + + let iterators = words.clone().into_iter().map(move |word| { + let mut prefix2 = prefix.clone(); + prefix2.extend_from_slice(word.as_bytes()); + prefix2.push(0xFF); + let prefix3 = prefix2.clone(); + + let mut last_possible_id = prefix2.clone(); + last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.tokenids + .iter_from(&last_possible_id, true) // Newest pdus first + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(key, _)| key[prefix3.len()..].to_vec()) + }); + + let Some(common_elements) = utils::common_elements(iterators, |a, b| { + // We compare b with a because we reversed the iterator earlier + b.cmp(a) + }) else { + return Ok(None); + }; + + Ok(Some((Box::new(common_elements), words))) + } +} diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs new file mode 100644 index 00000000..e0c3daac --- /dev/null +++ b/src/database/key_value/rooms/short.rs @@ -0,0 +1,164 @@ +use std::sync::Arc; + +use ruma::{events::StateEventType, EventId, RoomId}; +use tracing::warn; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::short::Data for KeyValueDatabase { + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { + let short = if let Some(shorteventid) = self.eventid_shorteventid.get(event_id.as_bytes())? { + utils::u64_from_bytes(&shorteventid).map_err(|_| Error::bad_database("Invalid shorteventid in db."))? + } else { + let shorteventid = services().globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + shorteventid + }; + + Ok(short) + } + + fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Result> { + let mut ret: Vec = Vec::with_capacity(event_ids.len()); + let keys = event_ids + .iter() + .map(|id| id.as_bytes()) + .collect::>(); + for (i, short) in self + .eventid_shorteventid + .multi_get(&keys)? + .iter() + .enumerate() + { + match short { + Some(short) => ret.push( + utils::u64_from_bytes(short).map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, + ), + None => { + let short = services().globals.next_count()?; + self.eventid_shorteventid + .insert(keys[i], &short.to_be_bytes())?; + self.shorteventid_eventid + .insert(&short.to_be_bytes(), keys[i])?; + + debug_assert!(ret.len() == i, "position of result must match input"); + ret.push(short); + }, + } + } + + Ok(ret) + } + + fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result> { + let mut statekey_vec = event_type.to_string().as_bytes().to_vec(); + statekey_vec.push(0xFF); + statekey_vec.extend_from_slice(state_key.as_bytes()); + + let short = self + .statekey_shortstatekey + .get(&statekey_vec)? + .map(|shortstatekey| { + utils::u64_from_bytes(&shortstatekey).map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + }) + .transpose()?; + + Ok(short) + } + + fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { + let mut statekey_vec = event_type.to_string().as_bytes().to_vec(); + statekey_vec.push(0xFF); + statekey_vec.extend_from_slice(state_key.as_bytes()); + + let short = if let Some(shortstatekey) = self.statekey_shortstatekey.get(&statekey_vec)? { + utils::u64_from_bytes(&shortstatekey).map_err(|_| Error::bad_database("Invalid shortstatekey in db."))? + } else { + let shortstatekey = services().globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey_vec, &shortstatekey.to_be_bytes())?; + self.shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &statekey_vec)?; + shortstatekey + }; + + Ok(short) + } + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + let bytes = self + .shorteventid_eventid + .get(&shorteventid.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; + + let event_id = EventId::parse_arc( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("EventID in shorteventid_eventid is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; + + Ok(event_id) + } + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + let bytes = self + .shortstatekey_statekey + .get(&shortstatekey.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; + + let mut parts = bytes.splitn(2, |&b| b == 0xFF); + let eventtype_bytes = parts.next().expect("split always returns one entry"); + let statekey_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; + + let event_type = StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|e| { + warn!("Event type in shortstatekey_statekey is invalid: {}", e); + Error::bad_database("Event type in shortstatekey_statekey is invalid.") + })?); + + let state_key = utils::string_from_bytes(statekey_bytes) + .map_err(|_| Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode."))?; + + let result = (event_type, state_key); + + Ok(result) + } + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { + Ok(if let Some(shortstatehash) = self.statehash_shortstatehash.get(state_hash)? { + ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + true, + ) + } else { + let shortstatehash = services().globals.next_count()?; + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + }) + } + + fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid shortroomid in db."))) + .transpose() + } + + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { + Ok(if let Some(short) = self.roomid_shortroomid.get(room_id.as_bytes())? { + utils::u64_from_bytes(&short).map_err(|_| Error::bad_database("Invalid shortroomid in db."))? + } else { + let short = services().globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + }) + } +} diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs new file mode 100644 index 00000000..79ef202f --- /dev/null +++ b/src/database/key_value/rooms/state.rs @@ -0,0 +1,73 @@ +use std::{collections::HashSet, sync::Arc}; + +use ruma::{EventId, OwnedEventId, RoomId}; +use tokio::sync::MutexGuard; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::rooms::state::Data for KeyValueDatabase { + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.roomid_shortstatehash + .get(room_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") + })?)) + }) + } + + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; + Ok(()) + } + + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> { + self.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + Ok(()) + } + + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + self.roomid_pduleaves + .scan_prefix(prefix) + .map(|(_, bytes)| { + EventId::parse_arc( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("EventID in roomid_pduleaves is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) + }) + .collect() + } + + fn set_forward_extremities( + &self, + room_id: &RoomId, + event_ids: Vec, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { + self.roomid_pduleaves.remove(&key)?; + } + + for event_id in event_ids { + let mut key = prefix.clone(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs new file mode 100644 index 00000000..1ef7c4b5 --- /dev/null +++ b/src/database/key_value/rooms/state_accessor.rs @@ -0,0 +1,165 @@ +use std::{collections::HashMap, sync::Arc}; + +use async_trait::async_trait; +use ruma::{events::StateEventType, EventId, RoomId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; + +#[async_trait] +impl service::rooms::state_accessor::Data for KeyValueDatabase { + #[allow(unused_qualifications)] // async traits + async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + let mut result = HashMap::new(); + let mut i = 0; + for compressed in full_state.iter() { + let parsed = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; + result.insert(parsed.0, parsed.1); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + Ok(result) + } + + #[allow(unused_qualifications)] // async traits + async fn state_full(&self, shortstatehash: u64) -> Result>> { + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + + let mut result = HashMap::new(); + let mut i = 0; + for compressed in full_state.iter() { + let (_, eventid) = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; + if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { + result.insert( + ( + pdu.kind.to_string().into(), + pdu.state_key + .as_ref() + .ok_or_else(|| Error::bad_database("State event has no state key."))? + .clone(), + ), + pdu, + ); + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + Ok(result) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn state_get_id( + &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + let Some(shortstatekey) = services() + .rooms + .short + .get_shortstatekey(event_type, state_key)? + else { + return Ok(None); + }; + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + Ok(full_state + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + .and_then(|compressed| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed) + .ok() + .map(|(_, id)| id) + })) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn state_get( + &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + self.state_get_id(shortstatehash, event_type, state_key)? + .map_or(Ok(None), |event_id| services().rooms.timeline.get_pdu(&event_id)) + } + + /// Returns the state hash for this pdu. + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + self.eventid_shorteventid + .get(event_id.as_bytes())? + .map_or(Ok(None), |shorteventid| { + self.shorteventid_shortstatehash + .get(&shorteventid)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash bytes in shorteventid_shortstatehash") + }) + }) + .transpose() + }) + } + + /// Returns the full room state. + #[allow(unused_qualifications)] // async traits + async fn room_state_full(&self, room_id: &RoomId) -> Result>> { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + self.state_full(current_shortstatehash).await + } else { + Ok(HashMap::new()) + } + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn room_state_get_id( + &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + self.state_get_id(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn room_state_get( + &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + self.state_get(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } +} diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs new file mode 100644 index 00000000..99fcfe85 --- /dev/null +++ b/src/database/key_value/rooms/state_cache.rs @@ -0,0 +1,627 @@ +use std::{collections::HashSet, sync::Arc}; + +use itertools::Itertools; +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; +use tracing::error; + +use crate::{ + database::KeyValueDatabase, + service::{self, appservice::RegistrationInfo}, + services, utils, Error, Result, +}; + +type StrippedStateEventIter<'a> = Box>)>> + 'a>; + +type AnySyncStateEventIter<'a> = Box>)>> + 'a>; + +impl service::rooms::state_cache::Data for KeyValueDatabase { + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + self.roomuseroncejoinedids.insert(&userroom_id, &[]) + } + + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let roomid = room_id.as_bytes().to_vec(); + let mut roomid_prefix = room_id.as_bytes().to_vec(); + roomid_prefix.push(0xFF); + + let mut roomuser_id = roomid_prefix.clone(); + roomuser_id.push(0xFF); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_joined.insert(&userroom_id, &[])?; + self.roomuserid_joined.insert(&roomuser_id, &[])?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + if self + .roomuserid_joined + .scan_prefix(roomid_prefix.clone()) + .count() == 0 + && self + .roomuserid_invitecount + .scan_prefix(roomid_prefix) + .count() == 0 + { + self.roomid_inviteviaservers.remove(&roomid)?; + } + + Ok(()) + } + + fn mark_as_invited( + &self, user_id: &UserId, room_id: &RoomId, last_state: Option>>, + invite_via: Option>, + ) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xFF); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate.insert( + &userroom_id, + &serde_json::to_vec(&last_state.unwrap_or_default()).expect("state to bytes always works"), + )?; + self.roomuserid_invitecount + .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + if let Some(servers) = invite_via { + let mut prev_servers = self.servers_invite_via(room_id)?.unwrap_or(Vec::new()); + #[allow(clippy::redundant_clone)] // this is a necessary clone? + prev_servers.append(servers.clone().as_mut()); + let servers = prev_servers.iter().rev().unique().rev().collect_vec(); + + let servers = servers + .iter() + .map(|server| server.as_bytes()) + .collect_vec() + .join(&[0xFF][..]); + + self.roomid_inviteviaservers + .insert(room_id.as_bytes(), &servers)?; + } + + Ok(()) + } + + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let roomid = room_id.as_bytes().to_vec(); + let mut roomid_prefix = room_id.as_bytes().to_vec(); + roomid_prefix.push(0xFF); + + let mut roomuser_id = roomid_prefix.clone(); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate.insert( + &userroom_id, + &serde_json::to_vec(&Vec::>::new()).unwrap(), + )?; // TODO + self.roomuserid_leftcount + .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + + if self + .roomuserid_joined + .scan_prefix(roomid_prefix.clone()) + .count() == 0 + && self + .roomuserid_invitecount + .scan_prefix(roomid_prefix) + .count() == 0 + { + self.roomid_inviteviaservers.remove(&roomid)?; + } + + Ok(()) + } + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + let mut joinedcount = 0_u64; + let mut invitedcount = 0_u64; + let mut joined_servers = HashSet::new(); + let mut real_users = HashSet::new(); + + for joined in self.room_members(room_id).filter_map(Result::ok) { + joined_servers.insert(joined.server_name().to_owned()); + if joined.server_name() == services().globals.server_name() + && !services().users.is_deactivated(&joined).unwrap_or(true) + { + real_users.insert(joined); + } + joinedcount += 1; + } + + for _invited in self.room_members_invited(room_id).filter_map(Result::ok) { + invitedcount += 1; + } + + self.roomid_joinedcount + .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; + + self.roomid_invitedcount + .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; + + self.our_real_users_cache + .write() + .unwrap() + .insert(room_id.to_owned(), Arc::new(real_users)); + + for old_joined_server in self.room_servers(room_id).filter_map(Result::ok) { + if !joined_servers.remove(&old_joined_server) { + // Server not in room anymore + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xFF); + roomserver_id.extend_from_slice(old_joined_server.as_bytes()); + + let mut serverroom_id = old_joined_server.as_bytes().to_vec(); + serverroom_id.push(0xFF); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; + } + } + + // Now only new servers are in joined_servers anymore + for server in joined_servers { + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xFF); + roomserver_id.extend_from_slice(server.as_bytes()); + + let mut serverroom_id = server.as_bytes().to_vec(); + serverroom_id.push(0xFF); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; + } + + self.appservice_in_room_cache + .write() + .unwrap() + .remove(room_id); + + Ok(()) + } + + #[tracing::instrument(skip(self, room_id))] + fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { + let maybe = self + .our_real_users_cache + .read() + .unwrap() + .get(room_id) + .cloned(); + if let Some(users) = maybe { + Ok(users) + } else { + self.update_joined_count(room_id)?; + Ok(Arc::clone( + self.our_real_users_cache + .read() + .unwrap() + .get(room_id) + .unwrap(), + )) + } + } + + #[tracing::instrument(skip(self, room_id, appservice))] + fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result { + let maybe = self + .appservice_in_room_cache + .read() + .unwrap() + .get(room_id) + .and_then(|map| map.get(&appservice.registration.id)) + .copied(); + + if let Some(b) = maybe { + Ok(b) + } else { + let bridge_user_id = UserId::parse_with_server_name( + appservice.registration.sender_localpart.as_str(), + services().globals.server_name(), + ) + .ok(); + + let in_room = bridge_user_id.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) + || self + .room_members(room_id) + .any(|userid| userid.map_or(false, |userid| appservice.users.is_match(userid.as_str()))); + + self.appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default() + .insert(appservice.registration.id.clone(), in_room); + + Ok(in_room) + } + } + + /// Makes a user forget a room. + #[tracing::instrument(skip(self))] + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xFF); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + /// Returns an iterator of all servers participating in this room. + #[tracing::instrument(skip(self))] + fn room_servers<'a>(&'a self, room_id: &RoomId) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new(self.roomserverids.scan_prefix(prefix).map(|(key, _)| { + ServerName::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Server name in roomserverids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result { + let mut key = server.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + + self.serverroomids.get(&key).map(|o| o.is_some()) + } + + /// Returns an iterator of all rooms a server participates in (as far as we + /// know). + #[tracing::instrument(skip(self))] + fn server_rooms<'a>(&'a self, server: &ServerName) -> Box> + 'a> { + let mut prefix = server.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new(self.serverroomids.scan_prefix(prefix).map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) + })) + } + + /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] + fn room_members<'a>(&'a self, room_id: &RoomId) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new(self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) + })) + } + + /// Returns the number of users which are currently in a room + #[tracing::instrument(skip(self))] + fn room_joined_count(&self, room_id: &RoomId) -> Result> { + self.roomid_joinedcount + .get(room_id.as_bytes())? + .map(|b| utils::u64_from_bytes(&b).map_err(|_| Error::bad_database("Invalid joinedcount in db."))) + .transpose() + } + + /// Returns the number of users which are currently invited to a room + #[tracing::instrument(skip(self))] + fn room_invited_count(&self, room_id: &RoomId) -> Result> { + self.roomid_invitedcount + .get(room_id.as_bytes())? + .map(|b| utils::u64_from_bytes(&b).map_err(|_| Error::bad_database("Invalid joinedcount in db."))) + .transpose() + } + + /// Returns an iterator over all User IDs who ever joined a room. + #[tracing::instrument(skip(self))] + fn room_useroncejoined<'a>(&'a self, room_id: &RoomId) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new( + self.roomuseroncejoinedids + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) + }), + ) + } + + /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] + fn room_members_invited<'a>(&'a self, room_id: &RoomId) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new( + self.roomuserid_invitecount + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) + }), + ) + } + + #[tracing::instrument(skip(self))] + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_invitecount + .get(&key)? + .map_or(Ok(None), |bytes| { + Ok(Some( + utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid invitecount in db."))?, + )) + }) + } + + #[tracing::instrument(skip(self))] + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_leftcount + .get(&key)? + .map(|bytes| utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid leftcount in db."))) + .transpose() + } + + /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] + fn rooms_joined<'a>(&'a self, user_id: &UserId) -> Box> + 'a> { + Box::new( + self.userroomid_joined + .scan_prefix(user_id.as_bytes().to_vec()) + .map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) + }), + ) + } + + /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] + fn rooms_invited<'a>(&'a self, user_id: &UserId) -> StrippedStateEventIter<'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new( + self.userroomid_invitestate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok((room_id, state)) + }), + ) + } + + #[tracing::instrument(skip(self))] + fn invite_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok(state) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn left_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok(state) + }) + .transpose() + } + + /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] + fn rooms_left<'a>(&'a self, user_id: &UserId) -> AnySyncStateEventIter<'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + + Box::new( + self.userroomid_leftstate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok((room_id, state)) + }), + ) + } + + #[tracing::instrument(skip(self))] + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn servers_invite_via(&self, room_id: &RoomId) -> Result>> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + + self.roomid_inviteviaservers + .get(&key)? + .map(|servers| { + let state = serde_json::from_slice(&servers).map_err(|e| { + error!("Invalid state in userroomid_leftstate: {e}"); + Error::bad_database("Invalid state in userroomid_leftstate.") + })?; + + Ok(state) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn add_servers_invite_via(&self, room_id: &RoomId, servers: &[OwnedServerName]) -> Result<()> { + let mut prev_servers = self.servers_invite_via(room_id)?.unwrap_or(Vec::new()); + prev_servers.append(servers.to_owned().as_mut()); + + let servers = prev_servers.iter().rev().unique().rev().collect_vec(); + + let servers = servers + .iter() + .map(|server| server.as_bytes()) + .collect_vec() + .join(&[0xFF][..]); + + self.roomid_inviteviaservers + .insert(room_id.as_bytes(), &servers)?; + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs new file mode 100644 index 00000000..0043a1ba --- /dev/null +++ b/src/database/key_value/rooms/state_compressor.rs @@ -0,0 +1,64 @@ +use std::{collections::HashSet, mem::size_of, sync::Arc}; + +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::state_compressor::data::StateDiff}, + utils, Error, Result, +}; + +impl service::rooms::state_compressor::Data for KeyValueDatabase { + fn get_statediff(&self, shortstatehash: u64) -> Result { + let value = self + .shortstatehash_statediff + .get(&shortstatehash.to_be_bytes())? + .ok_or_else(|| Error::bad_database("State hash does not exist"))?; + let parent = utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); + let parent = if parent != 0 { + Some(parent) + } else { + None + }; + + let mut add_mode = true; + let mut added = HashSet::new(); + let mut removed = HashSet::new(); + + let mut i = size_of::(); + while let Some(v) = value.get(i..i + 2 * size_of::()) { + if add_mode && v.starts_with(&0_u64.to_be_bytes()) { + add_mode = false; + i += size_of::(); + continue; + } + if add_mode { + added.insert(v.try_into().expect("we checked the size above")); + } else { + removed.insert(v.try_into().expect("we checked the size above")); + } + i += 2 * size_of::(); + } + + Ok(StateDiff { + parent, + added: Arc::new(added), + removed: Arc::new(removed), + }) + } + + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { + let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); + for new in diff.added.iter() { + value.extend_from_slice(&new[..]); + } + + if !diff.removed.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in diff.removed.iter() { + value.extend_from_slice(&removed[..]); + } + } + + self.shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value) + } +} diff --git a/src/database/key_value/rooms/threads.rs b/src/database/key_value/rooms/threads.rs new file mode 100644 index 00000000..4cb2591b --- /dev/null +++ b/src/database/key_value/rooms/threads.rs @@ -0,0 +1,75 @@ +use std::mem; + +use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; + +type PduEventIterResult<'a> = Result> + 'a>>; + +impl service::rooms::threads::Data for KeyValueDatabase { + fn threads_until<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, _include: &'a IncludeThreads, + ) -> PduEventIterResult<'a> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut current = prefix.clone(); + current.extend_from_slice(&(until - 1).to_be_bytes()); + + Ok(Box::new( + self.threadid_userids + .iter_from(¤t, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pduid, _users)| { + let count = utils::u64_from_bytes(&pduid[(mem::size_of::())..]) + .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; + let mut pdu = services() + .rooms + .timeline + .get_pdu_from_id(&pduid)? + .ok_or_else(|| Error::bad_database("Invalid pduid reference in threadid_userids"))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((count, pdu)) + }), + )) + } + + fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()> { + let users = participants + .iter() + .map(|user| user.as_bytes()) + .collect::>() + .join(&[0xFF][..]); + + self.threadid_userids.insert(root_id, &users)?; + + Ok(()) + } + + fn get_participants(&self, root_id: &[u8]) -> Result>> { + if let Some(users) = self.threadid_userids.get(root_id)? { + Ok(Some( + users + .split(|b| *b == 0xFF) + .map(|bytes| { + UserId::parse( + utils::string_from_bytes(bytes) + .map_err(|_| Error::bad_database("Invalid UserId bytes in threadid_userids."))?, + ) + .map_err(|_| Error::bad_database("Invalid UserId in threadid_userids.")) + }) + .filter_map(Result::ok) + .collect(), + )) + } else { + Ok(None) + } + } +} diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs new file mode 100644 index 00000000..c1ffb236 --- /dev/null +++ b/src/database/key_value/rooms/timeline.rs @@ -0,0 +1,300 @@ +use std::{collections::hash_map, mem::size_of, sync::Arc}; + +use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; +use service::rooms::timeline::PduCount; +use tracing::error; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; + +impl service::rooms::timeline::Data for KeyValueDatabase { + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + match self + .lasttimelinecount_cache + .lock() + .unwrap() + .entry(room_id.to_owned()) + { + hash_map::Entry::Vacant(v) => { + if let Some(last_count) = self + .pdus_until(sender_user, room_id, PduCount::max())? + .find_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) { + Ok(*v.insert(last_count.0)) + } else { + Ok(PduCount::Normal(0)) + } + }, + hash_map::Entry::Occupied(o) => Ok(*o.get()), + } + } + + /// Returns the `count` of this pdu's id. + fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pdu_id| pdu_count(&pdu_id)) + .transpose() + } + + /// Returns the json of a pdu. + fn get_pdu_json(&self, event_id: &EventId) -> Result> { + self.get_non_outlier_pdu_json(event_id)?.map_or_else( + || { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) + .transpose() + }, + |x| Ok(Some(x)), + ) + } + + /// Returns the json of a pdu. + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pduid| { + self.pduid_pdu + .get(&pduid)? + .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + }) + .transpose()? + .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) + .transpose() + } + + /// Returns the pdu's id. + fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid.get(event_id.as_bytes()) } + + /// Returns the pdu. + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pduid| { + self.pduid_pdu + .get(&pduid)? + .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + }) + .transpose()? + .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) + .transpose() + } + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + fn get_pdu(&self, event_id: &EventId) -> Result>> { + if let Some(pdu) = self + .get_non_outlier_pdu(event_id)? + .map_or_else( + || { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map(|pdu| serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))) + .transpose() + }, + |x| Ok(Some(x)), + )? + .map(Arc::new) + { + Ok(Some(pdu)) + } else { + Ok(None) + } + } + + /// Returns the pdu. + /// + /// This does __NOT__ check the outliers `Tree`. + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns the pdu as a `BTreeMap`. + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()> { + self.pduid_pdu.insert( + pdu_id, + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + )?; + + self.lasttimelinecount_cache + .lock() + .unwrap() + .insert(pdu.room_id.clone(), PduCount::Normal(count)); + + self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; + self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; + + Ok(()) + } + + fn prepend_backfill_pdu(&self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject) -> Result<()> { + self.pduid_pdu.insert( + pdu_id, + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + )?; + + self.eventid_pduid.insert(event_id.as_bytes(), pdu_id)?; + self.eventid_outlierpdu.remove(event_id.as_bytes())?; + + Ok(()) + } + + /// Removes a pdu and creates a new one with the same id. + fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent) -> Result<()> { + if self.pduid_pdu.get(pdu_id)?.is_some() { + self.pduid_pdu.insert( + pdu_id, + &serde_json::to_vec(pdu_json).expect("CanonicalJsonObject is always a valid"), + )?; + } else { + return Err(Error::BadRequest(ErrorKind::NotFound, "PDU does not exist.")); + } + + Ok(()) + } + + /// Returns an iterator over all events and their tokens in a room that + /// happened before the event with id `until` in reverse-chronological + /// order. + fn pdus_until<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, until: PduCount, + ) -> Result> + 'a>> { + let (prefix, current) = count_to_id(room_id, until, 1, true)?; + + let user_id = user_id.to_owned(); + + Ok(Box::new( + self.pduid_pdu + .iter_from(¤t, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + pdu.add_age()?; + let count = pdu_count(&pdu_id)?; + Ok((count, pdu)) + }), + )) + } + + fn pdus_after<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, from: PduCount, + ) -> Result> + 'a>> { + let (prefix, current) = count_to_id(room_id, from, 1, false)?; + + let user_id = user_id.to_owned(); + + Ok(Box::new( + self.pduid_pdu + .iter_from(¤t, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + pdu.add_age()?; + let count = pdu_count(&pdu_id)?; + Ok((count, pdu)) + }), + )) + } + + fn increment_notification_counts( + &self, room_id: &RoomId, notifies: Vec, highlights: Vec, + ) -> Result<()> { + let mut notifies_batch = Vec::new(); + let mut highlights_batch = Vec::new(); + for user in notifies { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + notifies_batch.push(userroom_id); + } + for user in highlights { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + highlights_batch.push(userroom_id); + } + + self.userroomid_notificationcount + .increment_batch(&mut notifies_batch.into_iter())?; + self.userroomid_highlightcount + .increment_batch(&mut highlights_batch.into_iter())?; + Ok(()) + } +} + +/// Returns the `count` of this pdu's id. +fn pdu_count(pdu_id: &[u8]) -> Result { + let last_u64 = utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?; + let second_last_u64 = + utils::u64_from_bytes(&pdu_id[pdu_id.len() - 2 * size_of::()..pdu_id.len() - size_of::()]); + + if matches!(second_last_u64, Ok(0)) { + Ok(PduCount::Backfilled(u64::MAX - last_u64)) + } else { + Ok(PduCount::Normal(last_u64)) + } +} + +fn count_to_id(room_id: &RoomId, count: PduCount, offset: u64, subtract: bool) -> Result<(Vec, Vec)> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))? + .to_be_bytes() + .to_vec(); + let mut pdu_id = prefix.clone(); + // +1 so we don't send the base event + let count_raw = match count { + PduCount::Normal(x) => { + if subtract { + x - offset + } else { + x + offset + } + }, + PduCount::Backfilled(x) => { + pdu_id.extend_from_slice(&0_u64.to_be_bytes()); + let num = u64::MAX - x; + if subtract { + if num > 0 { + num - offset + } else { + num + } + } else { + num + offset + } + }, + }; + pdu_id.extend_from_slice(&count_raw.to_be_bytes()); + + Ok((prefix, pdu_id)) +} diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs new file mode 100644 index 00000000..d773a577 --- /dev/null +++ b/src/database/key_value/rooms/user.rs @@ -0,0 +1,136 @@ +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::user::Data for KeyValueDatabase { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xFF); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + self.userroomid_notificationcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + self.userroomid_highlightcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + + self.roomuserid_lastnotificationread + .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + + Ok(()) + } + + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_notificationcount + .get(&userroom_id)? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid notification count in db.")) + }) + } + + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xFF); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_highlightcount + .get(&userroom_id)? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid highlight count in db.")) + }) + } + + fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id.as_bytes()); + + Ok(self + .roomuserid_lastnotificationread + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")) + }) + .transpose()? + .unwrap_or(0)) + } + + fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) -> Result<()> { + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); + + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); + + self.roomsynctoken_shortstatehash + .insert(&key, &shortstatehash.to_be_bytes()) + } + + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); + + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); + + self.roomsynctoken_shortstatehash + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash")) + }) + .transpose() + } + + fn get_shared_rooms<'a>( + &'a self, users: Vec, + ) -> Result> + 'a>> { + let iterators = users.into_iter().map(move |user_id| { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + + self.userroomid_joined + .scan_prefix(prefix) + .map(|(key, _)| { + let roomid_index = key + .iter() + .enumerate() + .find(|(_, &b)| b == 0xFF) + .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? + .0 + 1; // +1 because the room id starts AFTER the separator + + let room_id = key[roomid_index..].to_vec(); + + Ok::<_, Error>(room_id) + }) + .filter_map(Result::ok) + }); + + // We use the default compare function because keys are sorted correctly (not + // reversed) + Ok(Box::new( + utils::common_elements(iterators, Ord::cmp) + .expect("users is not empty") + .map(|bytes| { + RoomId::parse( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid RoomId bytes in userroomid_joined"))?, + ) + .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) + }), + )) + } +} diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs new file mode 100644 index 00000000..78dfd197 --- /dev/null +++ b/src/database/key_value/sending.rs @@ -0,0 +1,197 @@ +use ruma::{ServerName, UserId}; + +use crate::{ + database::KeyValueDatabase, + service::{ + self, + sending::{OutgoingDestination, SendingEventType}, + }, + services, utils, Error, Result, +}; + +impl service::sending::Data for KeyValueDatabase { + fn active_requests<'a>( + &'a self, + ) -> Box, OutgoingDestination, SendingEventType)>> + 'a> { + Box::new( + self.servercurrentevent_data + .iter() + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e))), + ) + } + + fn active_requests_for<'a>( + &'a self, outgoing_kind: &OutgoingDestination, + ) -> Box, SendingEventType)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + Box::new( + self.servercurrentevent_data + .scan_prefix(prefix) + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(_, e)| (key, e))), + ) + } + + fn delete_active_request(&self, key: Vec) -> Result<()> { self.servercurrentevent_data.remove(&key) } + + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingDestination) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix) { + self.servercurrentevent_data.remove(&key)?; + } + + Ok(()) + } + + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingDestination) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + + fn queue_requests(&self, requests: &[(&OutgoingDestination, SendingEventType)]) -> Result>> { + let mut batch = Vec::new(); + let mut keys = Vec::new(); + for (outgoing_kind, event) in requests { + let mut key = outgoing_kind.get_prefix(); + if let SendingEventType::Pdu(value) = &event { + key.extend_from_slice(value); + } else { + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + } + let value = if let SendingEventType::Edu(value) = &event { + &**value + } else { + &[] + }; + batch.push((key.clone(), value.to_owned())); + keys.push(key); + } + self.servernameevent_data + .insert_batch(&mut batch.into_iter())?; + Ok(keys) + } + + fn queued_requests<'a>( + &'a self, outgoing_kind: &OutgoingDestination, + ) -> Box)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + return Box::new( + self.servernameevent_data + .scan_prefix(prefix) + .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), + ); + } + + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()> { + for (e, key) in events { + if key.is_empty() { + continue; + } + + let value = if let SendingEventType::Edu(value) = &e { + &**value + } else { + &[] + }; + self.servercurrentevent_data.insert(key, value)?; + self.servernameevent_data.remove(key)?; + } + + Ok(()) + } + + fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) -> Result<()> { + self.servername_educount + .insert(server_name.as_bytes(), &last_count.to_be_bytes()) + } + + fn get_latest_educount(&self, server_name: &ServerName) -> Result { + self.servername_educount + .get(server_name.as_bytes())? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) + }) + } +} + +#[tracing::instrument(skip(key))] +fn parse_servercurrentevent(key: &[u8], value: Vec) -> Result<(OutgoingDestination, SendingEventType)> { + // Appservices start with a plus + Ok::<_, Error>(if key.starts_with(b"+") { + let mut parts = key[1..].splitn(2, |&b| b == 0xFF); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + + let server = utils::string_from_bytes(server) + .map_err(|_| Error::bad_database("Invalid server bytes in server_currenttransaction"))?; + + ( + OutgoingDestination::Appservice(server), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + } else if key.starts_with(b"$") { + let mut parts = key[1..].splitn(3, |&b| b == 0xFF); + + let user = parts.next().expect("splitn always returns one element"); + let user_string = utils::string_from_bytes(user) + .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; + let user_id = + UserId::parse(user_string).map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; + + let pushkey = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let pushkey_string = utils::string_from_bytes(pushkey) + .map_err(|_| Error::bad_database("Invalid pushkey in servercurrentevent"))?; + + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + + ( + OutgoingDestination::Push(user_id, pushkey_string), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + // I'm pretty sure this should never be called + SendingEventType::Edu(value) + }, + ) + } else { + let mut parts = key.splitn(2, |&b| b == 0xFF); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + + let server = utils::string_from_bytes(server) + .map_err(|_| Error::bad_database("Invalid server bytes in server_currenttransaction"))?; + + ( + OutgoingDestination::Normal( + ServerName::parse(server) + .map_err(|_| Error::bad_database("Invalid server string in server_currenttransaction"))?, + ), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + }) +} diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs new file mode 100644 index 00000000..f88ae69f --- /dev/null +++ b/src/database/key_value/transaction_ids.rs @@ -0,0 +1,32 @@ +use ruma::{DeviceId, TransactionId, UserId}; + +use crate::{database::KeyValueDatabase, service, Result}; + +impl service::transaction_ids::Data for KeyValueDatabase { + fn add_txnid( + &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, data: &[u8], + ) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default()); + key.push(0xFF); + key.extend_from_slice(txn_id.as_bytes()); + + self.userdevicetxnid_response.insert(&key, data)?; + + Ok(()) + } + + fn existing_txnid( + &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, + ) -> Result>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default()); + key.push(0xFF); + key.extend_from_slice(txn_id.as_bytes()); + + // If there's no entry, this is a new transaction + self.userdevicetxnid_response.get(&key) + } +} diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs new file mode 100644 index 00000000..a8047656 --- /dev/null +++ b/src/database/key_value/uiaa.rs @@ -0,0 +1,68 @@ +use ruma::{ + api::client::{error::ErrorKind, uiaa::UiaaInfo}, + CanonicalJsonValue, DeviceId, UserId, +}; + +use crate::{database::KeyValueDatabase, service, Error, Result}; + +impl service::uiaa::Data for KeyValueDatabase { + fn set_uiaa_request( + &self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue, + ) -> Result<()> { + self.userdevicesessionid_uiaarequest + .write() + .unwrap() + .insert( + (user_id.to_owned(), device_id.to_owned(), session.to_owned()), + request.to_owned(), + ); + + Ok(()) + } + + fn get_uiaa_request(&self, user_id: &UserId, device_id: &DeviceId, session: &str) -> Option { + self.userdevicesessionid_uiaarequest + .read() + .unwrap() + .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) + .map(ToOwned::to_owned) + } + + fn update_uiaa_session( + &self, user_id: &UserId, device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>, + ) -> Result<()> { + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xFF); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xFF); + userdevicesessionid.extend_from_slice(session.as_bytes()); + + if let Some(uiaainfo) = uiaainfo { + self.userdevicesessionid_uiaainfo.insert( + &userdevicesessionid, + &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), + )?; + } else { + self.userdevicesessionid_uiaainfo + .remove(&userdevicesessionid)?; + } + + Ok(()) + } + + fn get_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str) -> Result { + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xFF); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xFF); + userdevicesessionid.extend_from_slice(session.as_bytes()); + + serde_json::from_slice( + &self + .userdevicesessionid_uiaainfo + .get(&userdevicesessionid)? + .ok_or(Error::BadRequest(ErrorKind::forbidden(), "UIAA session does not exist."))?, + ) + .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) + } +} diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs new file mode 100644 index 00000000..b83e456f --- /dev/null +++ b/src/database/key_value/users.rs @@ -0,0 +1,893 @@ +use std::{collections::BTreeMap, mem::size_of}; + +use ruma::{ + api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::{AnyToDeviceEvent, StateEventType}, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, + OwnedMxcUri, OwnedUserId, UInt, UserId, +}; +use tracing::warn; + +use crate::{ + database::KeyValueDatabase, + service::{self, users::clean_signatures}, + services, utils, Error, Result, +}; + +impl service::users::Data for KeyValueDatabase { + /// Check if a user has an account on this homeserver. + fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } + + /// Check if account is deactivated + fn is_deactivated(&self, user_id: &UserId) -> Result { + Ok(self + .userid_password + .get(user_id.as_bytes())? + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "User does not exist."))? + .is_empty()) + } + + /// Returns the number of users registered on this server. + fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } + + /// Find out which user an access token belongs to. + fn find_from_token(&self, token: &str) -> Result> { + self.token_userdeviceid + .get(token.as_bytes())? + .map_or(Ok(None), |bytes| { + let mut parts = bytes.split(|&b| b == 0xFF); + let user_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("User ID in token_userdeviceid is invalid."))?; + let device_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Device ID in token_userdeviceid is invalid."))?; + + Ok(Some(( + UserId::parse( + utils::string_from_bytes(user_bytes) + .map_err(|_| Error::bad_database("User ID in token_userdeviceid is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("User ID in token_userdeviceid is invalid."))?, + utils::string_from_bytes(device_bytes) + .map_err(|_| Error::bad_database("Device ID in token_userdeviceid is invalid."))?, + ))) + }) + } + + /// Returns an iterator over all users on this homeserver. + fn iter<'a>(&'a self) -> Box> + 'a> { + Box::new(self.userid_password.iter().map(|(bytes, _)| { + UserId::parse( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("User ID in userid_password is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) + })) + } + + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password is + /// greater then zero. + fn list_local_users(&self) -> Result> { + let users: Vec = self + .userid_password + .iter() + .filter_map(|(username, pw)| get_username_with_valid_password(&username, &pw)) + .collect(); + Ok(users) + } + + /// Returns the password hash for the given user. + fn password_hash(&self, user_id: &UserId) -> Result> { + self.userid_password + .get(user_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Password hash in db is not valid string.") + })?)) + }) + } + + /// Hash and set the user's password to the Argon2 hash + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + if let Some(password) = password { + if let Ok(hash) = utils::calculate_password_hash(password) { + self.userid_password + .insert(user_id.as_bytes(), hash.as_bytes())?; + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Password does not meet the requirements.", + )) + } + } else { + self.userid_password.insert(user_id.as_bytes(), b"")?; + Ok(()) + } + } + + /// Returns the displayname of a user on this homeserver. + fn displayname(&self, user_id: &UserId) -> Result> { + self.userid_displayname + .get(user_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some( + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Displayname in db is invalid."))?, + )) + }) + } + + /// Sets a new displayname or removes it if displayname is None. You still + /// need to nofify all rooms of this change. + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + if let Some(displayname) = displayname { + self.userid_displayname + .insert(user_id.as_bytes(), displayname.as_bytes())?; + } else { + self.userid_displayname.remove(user_id.as_bytes())?; + } + + Ok(()) + } + + /// Get the `avatar_url` of a user. + fn avatar_url(&self, user_id: &UserId) -> Result> { + self.userid_avatarurl + .get(user_id.as_bytes())? + .map(|bytes| { + let s_bytes = utils::string_from_bytes(&bytes).map_err(|e| { + warn!("Avatar URL in db is invalid: {}", e); + Error::bad_database("Avatar URL in db is invalid.") + })?; + let mxc_uri: OwnedMxcUri = s_bytes.into(); + Ok(mxc_uri) + }) + .transpose() + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + if let Some(avatar_url) = avatar_url { + self.userid_avatarurl + .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; + } else { + self.userid_avatarurl.remove(user_id.as_bytes())?; + } + + Ok(()) + } + + /// Get the blurhash of a user. + fn blurhash(&self, user_id: &UserId) -> Result> { + self.userid_blurhash + .get(user_id.as_bytes())? + .map(|bytes| { + let s = utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; + + Ok(s) + }) + .transpose() + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + if let Some(blurhash) = blurhash { + self.userid_blurhash + .insert(user_id.as_bytes(), blurhash.as_bytes())?; + } else { + self.userid_blurhash.remove(user_id.as_bytes())?; + } + + Ok(()) + } + + /// Adds a new device to a user. + fn create_device( + &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, + ) -> Result<()> { + // This method should never be called for nonexistent users. We shouldn't assert + // though... + if !self.exists(user_id)? { + warn!("Called create_device for non-existent user {} in database", user_id); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "User does not exist.")); + } + + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + self.userid_devicelistversion + .increment(user_id.as_bytes())?; + + self.userdeviceid_metadata.insert( + &userdeviceid, + &serde_json::to_vec(&Device { + device_id: device_id.into(), + display_name: initial_device_display_name, + last_seen_ip: None, // TODO + last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), + }) + .expect("Device::to_string never fails."), + )?; + + self.set_token(user_id, device_id, token)?; + + Ok(()) + } + + /// Removes a device from a user. + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + // Remove tokens + if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { + self.userdeviceid_token.remove(&userdeviceid)?; + self.token_userdeviceid.remove(&old_token)?; + } + + // Remove todevice events + let mut prefix = userdeviceid.clone(); + prefix.push(0xFF); + + for (key, _) in self.todeviceid_events.scan_prefix(prefix) { + self.todeviceid_events.remove(&key)?; + } + + // TODO: Remove onetimekeys + + self.userid_devicelistversion + .increment(user_id.as_bytes())?; + + self.userdeviceid_metadata.remove(&userdeviceid)?; + + Ok(()) + } + + /// Returns an iterator over all device ids of this user. + fn all_device_ids<'a>(&'a self, user_id: &UserId) -> Box> + 'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + // All devices have metadata + Box::new( + self.userdeviceid_metadata + .scan_prefix(prefix) + .map(|(bytes, _)| { + Ok(utils::string_from_bytes( + bytes + .rsplit(|&b| b == 0xFF) + .next() + .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, + ) + .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? + .into()) + }), + ) + } + + /// Replaces the access token of one device. + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + // should not be None, but we shouldn't assert either lol... + if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { + warn!( + "Called set_token for a non-existent user \"{}\" and/or device ID \"{}\" with no metadata in database", + user_id, device_id + ); + return Err(Error::bad_database( + "User does not exist or device ID has no metadata in database.", + )); + } + + // Remove old token + if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { + self.token_userdeviceid.remove(&old_token)?; + // It will be removed from userdeviceid_token by the insert later + } + + // Assign token to user device combination + self.userdeviceid_token + .insert(&userdeviceid, token.as_bytes())?; + self.token_userdeviceid + .insert(token.as_bytes(), &userdeviceid)?; + + Ok(()) + } + + fn add_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, + one_time_key_value: &Raw, + ) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(device_id.as_bytes()); + + // All devices have metadata + // Only existing devices should be able to call this, but we shouldn't assert + // either... + if self.userdeviceid_metadata.get(&key)?.is_none() { + warn!( + "Called add_one_time_key for a non-existent user \"{}\" and/or device ID \"{}\" with no metadata in \ + database", + user_id, device_id + ); + return Err(Error::bad_database( + "User does not exist or device ID has no metadata in database.", + )); + } + + key.push(0xFF); + // TODO: Use DeviceKeyId::to_string when it's available (and update everything, + // because there are no wrapping quotation marks anymore) + key.extend_from_slice( + serde_json::to_string(one_time_key_key) + .expect("DeviceKeyId::to_string always works") + .as_bytes(), + ); + + self.onetimekeyid_onetimekeys.insert( + &key, + &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), + )?; + + self.userid_lastonetimekeyupdate + .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + + Ok(()) + } + + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { + self.userid_lastonetimekeyupdate + .get(user_id.as_bytes())? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")) + }) + } + + fn take_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, + ) -> Result)>> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + prefix.push(b'"'); // Annoying quotation mark + prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); + prefix.push(b':'); + + self.userid_lastonetimekeyupdate + .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + + self.onetimekeyid_onetimekeys + .scan_prefix(prefix) + .next() + .map(|(key, value)| { + self.onetimekeyid_onetimekeys.remove(&key)?; + + Ok(( + serde_json::from_slice( + key.rsplit(|&b| b == 0xFF) + .next() + .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, + ) + .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, + serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, + )) + }) + .transpose() + } + + fn count_one_time_keys( + &self, user_id: &UserId, device_id: &DeviceId, + ) -> Result> { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + let mut counts = BTreeMap::new(); + + for algorithm in self + .onetimekeyid_onetimekeys + .scan_prefix(userdeviceid) + .map(|(bytes, _)| { + Ok::<_, Error>( + serde_json::from_slice::( + bytes + .rsplit(|&b| b == 0xFF) + .next() + .ok_or_else(|| Error::bad_database("OneTimeKey ID in db is invalid."))?, + ) + .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? + .algorithm(), + ) + }) { + *counts.entry(algorithm?).or_default() += UInt::from(1_u32); + } + + Ok(counts) + } + + fn add_device_keys(&self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw) -> Result<()> { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + self.keyid_key.insert( + &userdeviceid, + &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), + )?; + + self.mark_device_key_update(user_id)?; + + Ok(()) + } + + fn add_cross_signing_keys( + &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, + user_signing_key: &Option>, notify: bool, + ) -> Result<()> { + // TODO: Check signatures + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + + let (master_key_key, _) = self.parse_master_key(user_id, master_key)?; + + self.keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes())?; + + self.userid_masterkeyid + .insert(user_id.as_bytes(), &master_key_key)?; + + // Self-signing key + if let Some(self_signing_key) = self_signing_key { + let mut self_signing_key_ids = self_signing_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key"))? + .keys + .into_values(); + + let self_signing_key_id = self_signing_key_ids + .next() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Self signing key contained no key."))?; + + if self_signing_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained more than one key.", + )); + } + + let mut self_signing_key_key = prefix.clone(); + self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); + + self.keyid_key + .insert(&self_signing_key_key, self_signing_key.json().get().as_bytes())?; + + self.userid_selfsigningkeyid + .insert(user_id.as_bytes(), &self_signing_key_key)?; + } + + // User-signing key + if let Some(user_signing_key) = user_signing_key { + let mut user_signing_key_ids = user_signing_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key"))? + .keys + .into_values(); + + let user_signing_key_id = user_signing_key_ids + .next() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "User signing key contained no key."))?; + + if user_signing_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "User signing key contained more than one key.", + )); + } + + let mut user_signing_key_key = prefix; + user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + + self.keyid_key + .insert(&user_signing_key_key, user_signing_key.json().get().as_bytes())?; + + self.userid_usersigningkeyid + .insert(user_id.as_bytes(), &user_signing_key_key)?; + } + + if notify { + self.mark_device_key_update(user_id)?; + } + + Ok(()) + } + + fn sign_key( + &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, + ) -> Result<()> { + let mut key = target_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(key_id.as_bytes()); + + let mut cross_signing_key: serde_json::Value = serde_json::from_slice( + &self + .keyid_key + .get(&key)? + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Tried to sign nonexistent key."))?, + ) + .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; + + let signatures = cross_signing_key + .get_mut("signatures") + .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? + .as_object_mut() + .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? + .entry(sender_id.to_string()) + .or_insert_with(|| serde_json::Map::new().into()); + + signatures + .as_object_mut() + .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? + .insert(signature.0, signature.1.into()); + + self.keyid_key.insert( + &key, + &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), + )?; + + self.mark_device_key_update(target_id)?; + + Ok(()) + } + + fn keys_changed<'a>( + &'a self, user_or_room_id: &str, from: u64, to: Option, + ) -> Box> + 'a> { + let mut prefix = user_or_room_id.as_bytes().to_vec(); + prefix.push(0xFF); + + let mut start = prefix.clone(); + start.extend_from_slice(&(from + 1).to_be_bytes()); + + let to = to.unwrap_or(u64::MAX); + + Box::new( + self.keychangeid_userid + .iter_from(&start, false) + .take_while(move |(k, _)| { + k.starts_with(&prefix) + && if let Some(current) = k.splitn(2, |&b| b == 0xFF).nth(1) { + if let Ok(c) = utils::u64_from_bytes(current) { + c <= to + } else { + warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + false + } + } else { + warn!("BadDatabase: Could not parse keychangeid_userid"); + false + } + }) + .map(|(_, bytes)| { + UserId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) + }), + ) + } + + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { + let count = services().globals.next_count()?.to_be_bytes(); + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(Result::ok) + { + // Don't send key updates to unencrypted rooms + if services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? + .is_none() + { + continue; + } + + let mut key = room_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(&count); + + self.keychangeid_userid.insert(&key, user_id.as_bytes())?; + } + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(&count); + self.keychangeid_userid.insert(&key, user_id.as_bytes())?; + + Ok(()) + } + + fn get_device_keys(&self, user_id: &UserId, device_id: &DeviceId) -> Result>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(device_id.as_bytes()); + + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { + Ok(Some( + serde_json::from_slice(&bytes).map_err(|_| Error::bad_database("DeviceKeys in db are invalid."))?, + )) + }) + } + + fn parse_master_key( + &self, user_id: &UserId, master_key: &Raw, + ) -> Result<(Vec, CrossSigningKey)> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + + let master_key = master_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?; + let mut master_key_ids = master_key.keys.values(); + let master_key_id = master_key_ids + .next() + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Master key contained no key."))?; + if master_key_ids.next().is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Master key contained more than one key.", + )); + } + let mut master_key_key = prefix.clone(); + master_key_key.extend_from_slice(master_key_id.as_bytes()); + Ok((master_key_key, master_key)) + } + + fn get_key( + &self, key: &[u8], sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, sender_user, user_id, allowed_signatures)?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key).expect("Value to RawValue serialization"), + ))) + }) + } + + fn get_master_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.userid_masterkeyid + .get(user_id.as_bytes())? + .map_or(Ok(None), |key| self.get_key(&key, sender_user, user_id, allowed_signatures)) + } + + fn get_self_signing_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.userid_selfsigningkeyid + .get(user_id.as_bytes())? + .map_or(Ok(None), |key| self.get_key(&key, sender_user, user_id, allowed_signatures)) + } + + fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { + self.userid_usersigningkeyid + .get(user_id.as_bytes())? + .map_or(Ok(None), |key| { + self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { + Ok(Some( + serde_json::from_slice(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?, + )) + }) + }) + } + + fn add_to_device_event( + &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, + content: serde_json::Value, + ) -> Result<()> { + let mut key = target_user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(target_device_id.as_bytes()); + key.push(0xFF); + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + + let mut json = serde_json::Map::new(); + json.insert("type".to_owned(), event_type.to_owned().into()); + json.insert("sender".to_owned(), sender.to_string().into()); + json.insert("content".to_owned(), content); + + let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); + + self.todeviceid_events.insert(&key, &value)?; + + Ok(()) + } + + fn get_to_device_events(&self, user_id: &UserId, device_id: &DeviceId) -> Result>> { + let mut events = Vec::new(); + + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + + for (_, value) in self.todeviceid_events.scan_prefix(prefix) { + events.push( + serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, + ); + } + + Ok(events) + } + + fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xFF); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xFF); + + let mut last = prefix.clone(); + last.extend_from_slice(&until.to_be_bytes()); + + for (key, _) in self + .todeviceid_events + .iter_from(&last, true) // this includes last + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(|(key, _)| { + Ok::<_, Error>(( + key.clone(), + utils::u64_from_bytes(&key[key.len() - size_of::()..key.len()]) + .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, + )) + }) + .filter_map(Result::ok) + .take_while(|&(_, count)| count <= until) + { + self.todeviceid_events.remove(&key)?; + } + + Ok(()) + } + + fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + // Only existing devices should be able to call this, but we shouldn't assert + // either... + if self.userdeviceid_metadata.get(&userdeviceid)?.is_none() { + warn!( + "Called update_device_metadata for a non-existent user \"{}\" and/or device ID \"{}\" with no \ + metadata in database", + user_id, device_id + ); + return Err(Error::bad_database( + "User does not exist or device ID has no metadata in database.", + )); + } + + self.userid_devicelistversion + .increment(user_id.as_bytes())?; + + self.userdeviceid_metadata.insert( + &userdeviceid, + &serde_json::to_vec(device).expect("Device::to_string always works"), + )?; + + Ok(()) + } + + /// Get device metadata. + fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) -> Result> { + let mut userdeviceid = user_id.as_bytes().to_vec(); + userdeviceid.push(0xFF); + userdeviceid.extend_from_slice(device_id.as_bytes()); + + self.userdeviceid_metadata + .get(&userdeviceid)? + .map_or(Ok(None), |bytes| { + Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { + Error::bad_database("Metadata in userdeviceid_metadata is invalid.") + })?)) + }) + } + + fn get_devicelist_version(&self, user_id: &UserId) -> Result> { + self.userid_devicelistversion + .get(user_id.as_bytes())? + .map_or(Ok(None), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid devicelistversion in db.")) + .map(Some) + }) + } + + fn all_devices_metadata<'a>(&'a self, user_id: &UserId) -> Box> + 'a> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + + Box::new( + self.userdeviceid_metadata + .scan_prefix(key) + .map(|(_, bytes)| { + serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) + }), + ) + } + + /// Creates a new sync filter. Returns the filter id. + fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { + let filter_id = utils::random_string(4); + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(filter_id.as_bytes()); + + self.userfilterid_filter + .insert(&key, &serde_json::to_vec(&filter).expect("filter is valid json"))?; + + Ok(filter_id) + } + + fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(filter_id.as_bytes()); + + let raw = self.userfilterid_filter.get(&key)?; + + if let Some(raw) = raw { + serde_json::from_slice(&raw).map_err(|_| Error::bad_database("Invalid filter event in db.")) + } else { + Ok(None) + } + } +} + +impl KeyValueDatabase {} + +/// Will only return with Some(username) if the password was not empty and the +/// username could be successfully parsed. +/// If `utils::string_from_bytes`(...) returns an error that username will be +/// skipped and the error will be logged. +fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option { + // A valid password is not empty + if password.is_empty() { + None + } else { + match utils::string_from_bytes(username) { + Ok(u) => Some(u), + Err(e) => { + warn!("Failed to parse username while calling get_local_users(): {}", e.to_string()); + None + }, + } + } +} diff --git a/src/database/keyval.rs b/src/database/keyval.rs deleted file mode 100644 index 6059cd53..00000000 --- a/src/database/keyval.rs +++ /dev/null @@ -1,103 +0,0 @@ -use conduwuit::{Result, smallvec::SmallVec}; -use serde::{Deserialize, Serialize}; - -use crate::{de, ser}; - -pub type KeyVal<'a, K = &'a Slice, V = &'a Slice> = (Key<'a, K>, Val<'a, V>); -pub type Key<'a, T = &'a Slice> = T; -pub type Val<'a, T = &'a Slice> = T; - -pub type KeyBuf = KeyBuffer; -pub type ValBuf = ValBuffer; - -pub type KeyBuffer = Buffer; -pub type ValBuffer = Buffer; -pub type Buffer = SmallVec<[Byte; CAP]>; - -pub type Slice = [Byte]; -pub type Byte = u8; - -pub const KEY_STACK_CAP: usize = 128; -pub const VAL_STACK_CAP: usize = 512; -pub const DEF_STACK_CAP: usize = KEY_STACK_CAP; - -#[inline] -pub fn serialize_key(val: T) -> Result -where - T: Serialize, -{ - ser::serialize_to::(val) -} - -#[inline] -pub fn serialize_val(val: T) -> Result -where - T: Serialize, -{ - ser::serialize_to::(val) -} - -#[inline] -pub(crate) fn _expect_deserialize<'a, K, V>(kv: Result>) -> KeyVal<'a, K, V> -where - K: Deserialize<'a>, - V: Deserialize<'a>, -{ - result_deserialize(kv).expect("failed to deserialize result key/val") -} - -#[inline] -pub(crate) fn _expect_deserialize_key<'a, K>(key: Result>) -> Key<'a, K> -where - K: Deserialize<'a>, -{ - result_deserialize_key(key).expect("failed to deserialize result key") -} - -#[inline] -pub(crate) fn result_deserialize<'a, K, V>(kv: Result>) -> Result> -where - K: Deserialize<'a>, - V: Deserialize<'a>, -{ - deserialize(kv?) -} - -#[inline] -pub(crate) fn result_deserialize_key<'a, K>(key: Result>) -> Result> -where - K: Deserialize<'a>, -{ - deserialize_key(key?) -} - -#[inline] -pub(crate) fn deserialize<'a, K, V>(kv: KeyVal<'a>) -> Result> -where - K: Deserialize<'a>, - V: Deserialize<'a>, -{ - Ok((deserialize_key::(kv.0)?, deserialize_val::(kv.1)?)) -} - -#[inline] -pub(crate) fn deserialize_key<'a, K>(key: Key<'a>) -> Result> -where - K: Deserialize<'a>, -{ - de::from_slice::(key) -} - -#[inline] -pub(crate) fn deserialize_val<'a, V>(val: Val<'a>) -> Result> -where - V: Deserialize<'a>, -{ - de::from_slice::(val) -} - -#[inline] -pub fn key(kv: KeyVal<'_, K, V>) -> Key<'_, K> { kv.0 } - -#[inline] -pub fn val(kv: KeyVal<'_, K, V>) -> Val<'_, V> { kv.1 } diff --git a/src/database/kvengine.rs b/src/database/kvengine.rs new file mode 100644 index 00000000..c67a7e98 --- /dev/null +++ b/src/database/kvengine.rs @@ -0,0 +1,38 @@ +use std::{error::Error, sync::Arc}; + +use super::{Config, KvTree}; +use crate::Result; + +pub(crate) trait KeyValueDatabaseEngine: Send + Sync { + fn open(config: &Config) -> Result + where + Self: Sized; + + fn open_tree(&self, name: &'static str) -> Result>; + + fn flush(&self) -> Result<()>; + + #[allow(dead_code)] + fn sync(&self) -> Result<()> { Ok(()) } + + fn cork(&self) -> Result<()> { Ok(()) } + + fn uncork(&self) -> Result<()> { Ok(()) } + + fn corked(&self) -> bool { false } + + fn cleanup(&self) -> Result<()> { Ok(()) } + + fn memory_usage(&self) -> Result { + Ok("Current database engine does not support memory usage reporting.".to_owned()) + } + + #[allow(dead_code)] + fn clear_caches(&self) {} + + fn backup(&self) -> Result<(), Box> { unimplemented!() } + + fn backup_list(&self) -> Result { Ok(String::new()) } + + fn file_list(&self) -> Result { Ok(String::new()) } +} diff --git a/src/database/kvtree.rs b/src/database/kvtree.rs new file mode 100644 index 00000000..52e5b146 --- /dev/null +++ b/src/database/kvtree.rs @@ -0,0 +1,62 @@ +use std::{future::Future, pin::Pin}; + +use crate::Result; + +pub(crate) trait KvTree: Send + Sync { + fn get(&self, key: &[u8]) -> Result>>; + + #[allow(dead_code)] + fn multi_get(&self, keys: &[&[u8]]) -> Result>>> { + let mut ret: Vec>> = Vec::with_capacity(keys.len()); + for key in keys { + ret.push(self.get(key)?); + } + + Ok(ret) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; + fn insert_batch(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + for (key, value) in iter { + self.insert(&key, &value)?; + } + + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()>; + + #[allow(dead_code)] + fn remove_batch(&self, iter: &mut dyn Iterator>) -> Result<()> { + for key in iter { + self.remove(&key)?; + } + + Ok(()) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + 'a>; + + fn iter_from<'a>(&'a self, from: &[u8], backwards: bool) -> Box, Vec)> + 'a>; + + fn increment(&self, key: &[u8]) -> Result>; + fn increment_batch(&self, iter: &mut dyn Iterator>) -> Result<()> { + for key in iter { + self.increment(&key)?; + } + + Ok(()) + } + + fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box, Vec)> + 'a>; + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>>; + + fn clear(&self) -> Result<()> { + for (key, _) in self.iter() { + self.remove(&key)?; + } + + Ok(()) + } +} diff --git a/src/database/map.rs b/src/database/map.rs deleted file mode 100644 index ed38e1fc..00000000 --- a/src/database/map.rs +++ /dev/null @@ -1,106 +0,0 @@ -mod clear; -pub mod compact; -mod contains; -mod count; -mod get; -mod get_batch; -mod insert; -mod keys; -mod keys_from; -mod keys_prefix; -mod open; -mod options; -mod qry; -mod qry_batch; -mod remove; -mod rev_keys; -mod rev_keys_from; -mod rev_keys_prefix; -mod rev_stream; -mod rev_stream_from; -mod rev_stream_prefix; -mod stream; -mod stream_from; -mod stream_prefix; - -use std::{ - convert::AsRef, - ffi::CStr, - fmt, - fmt::{Debug, Display}, - future::Future, - pin::Pin, - sync::Arc, -}; - -use conduwuit::Result; -use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteOptions}; - -pub(crate) use self::options::{ - cache_iter_options_default, cache_read_options_default, iter_options_default, - read_options_default, write_options_default, -}; -pub use self::{get_batch::Get, qry_batch::Qry}; -use crate::{Engine, watchers::Watchers}; - -pub struct Map { - name: &'static str, - watchers: Watchers, - cf: Arc, - db: Arc, - read_options: ReadOptions, - cache_read_options: ReadOptions, - write_options: WriteOptions, -} - -impl Map { - pub(crate) fn open(db: &Arc, name: &'static str) -> Result> { - Ok(Arc::new(Self { - name, - watchers: Watchers::default(), - cf: open::open(db, name), - db: db.clone(), - read_options: read_options_default(db), - cache_read_options: cache_read_options_default(db), - write_options: write_options_default(db), - })) - } - - #[inline] - pub fn watch_prefix<'a, K>( - &'a self, - prefix: &K, - ) -> Pin + Send + 'a>> - where - K: AsRef<[u8]> + ?Sized + Debug, - { - self.watchers.watch(prefix.as_ref()) - } - - #[inline] - pub fn property_integer(&self, name: &CStr) -> Result { - self.db.property_integer(&self.cf(), name) - } - - #[inline] - pub fn property(&self, name: &str) -> Result { self.db.property(&self.cf(), name) } - - #[inline] - pub fn name(&self) -> &str { self.name } - - #[inline] - pub(crate) fn db(&self) -> &Arc { &self.db } - - #[inline] - pub(crate) fn cf(&self) -> impl AsColumnFamilyRef + '_ { &*self.cf } -} - -impl Debug for Map { - fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(out, "Map {{name: {0}}}", self.name) - } -} - -impl Display for Map { - fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result { write!(out, "{0}", self.name) } -} diff --git a/src/database/map/clear.rs b/src/database/map/clear.rs deleted file mode 100644 index 321ec79c..00000000 --- a/src/database/map/clear.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::sync::Arc; - -use conduwuit::{ - Result, implement, - utils::stream::{ReadyExt, TryIgnore}, -}; -use futures::{Stream, TryStreamExt}; - -use crate::keyval::Key; - -/// Delete all data stored in this map. !!! USE WITH CAUTION !!! -/// -/// See for_clear() with additional details. -#[implement(super::Map)] -#[tracing::instrument(level = "trace")] -pub async fn clear(self: &Arc) { - self.for_clear().ignore_err().ready_for_each(|_| ()).await; -} - -/// Delete all data stored in this map. !!! USE WITH CAUTION !!! -/// -/// Provides stream of keys undergoing deletion along with any errors. -/// -/// Note this operation applies to a snapshot of the data when invoked. -/// Additional data written during or after this call may be missed. -#[implement(super::Map)] -#[tracing::instrument(level = "trace")] -pub fn for_clear(self: &Arc) -> impl Stream>> + Send { - self.raw_keys().inspect_ok(|key| self.remove(key)) -} diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs deleted file mode 100644 index b49bf30b..00000000 --- a/src/database/map/compact.rs +++ /dev/null @@ -1,62 +0,0 @@ -use conduwuit::{Err, Result, implement}; -use rocksdb::{BottommostLevelCompaction, CompactOptions}; - -use crate::keyval::KeyBuf; - -#[derive(Clone, Debug, Default)] -pub struct Options { - /// Key range to start and stop compaction. - pub range: (Option, Option), - - /// (None, None) - all levels to all necessary levels - /// (None, Some(1)) - compact all levels into level 1 - /// (Some(1), None) - compact level 1 into level 1 - /// (Some(_), Some(_) - currently unsupported - pub level: (Option, Option), - - /// run compaction until complete. if false only one pass is made, and the - /// results of that pass are not further recompacted. - pub exhaustive: bool, - - /// waits for other compactions to complete, then runs this compaction - /// exclusively before allowing automatic compactions to resume. - pub exclusive: bool, -} - -#[implement(super::Map)] -#[tracing::instrument( - name = "compact", - level = "info" - skip(self), - fields(%self), -)] -pub fn compact_blocking(&self, opts: Options) -> Result { - let mut co = CompactOptions::default(); - co.set_exclusive_manual_compaction(opts.exclusive); - co.set_bottommost_level_compaction(match opts.exhaustive { - | true => BottommostLevelCompaction::Force, - | false => BottommostLevelCompaction::ForceOptimized, - }); - - match opts.level { - | (None, None) => { - co.set_change_level(true); - co.set_target_level(-1); - }, - | (None, Some(level)) => { - co.set_change_level(true); - co.set_target_level(level.try_into()?); - }, - | (Some(level), None) => { - co.set_change_level(false); - co.set_target_level(level.try_into()?); - }, - | (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"), - } - - self.db - .db - .compact_range_cf_opt(&self.cf(), opts.range.0, opts.range.1, &co); - - Ok(()) -} diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs deleted file mode 100644 index 474818e8..00000000 --- a/src/database/map/contains.rs +++ /dev/null @@ -1,103 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; - -use conduwuit::{ - Result, - arrayvec::ArrayVec, - err, implement, - utils::{future::TryExtExt, result::FlatOk}, -}; -use futures::FutureExt; -use serde::Serialize; - -use crate::{keyval::KeyBuf, ser}; - -/// Returns true if the map contains the key. -/// - key is serialized into allocated buffer -/// - harder errors may not be reported -#[inline] -#[implement(super::Map)] -pub fn contains( - self: &Arc, - key: &K, -) -> impl Future + Send + '_ + use<'_, K> -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = KeyBuf::new(); - self.bcontains(key, &mut buf) -} - -/// Returns true if the map contains the key. -/// - key is serialized into stack-buffer -/// - harder errors will panic -#[inline] -#[implement(super::Map)] -pub fn acontains( - self: &Arc, - key: &K, -) -> impl Future + Send + '_ + use<'_, MAX, K> -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = ArrayVec::::new(); - self.bcontains(key, &mut buf) -} - -/// Returns true if the map contains the key. -/// - key is serialized into provided buffer -/// - harder errors will panic -#[implement(super::Map)] -#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")] -pub fn bcontains( - self: &Arc, - key: &K, - buf: &mut B, -) -> impl Future + Send + '_ + use<'_, K, B> -where - K: Serialize + ?Sized + Debug, - B: Write + AsRef<[u8]>, -{ - let key = ser::serialize(buf, key).expect("failed to serialize query key"); - self.exists(key).is_ok() -} - -/// Returns Ok if the map contains the key. -/// - key is raw -#[inline] -#[implement(super::Map)] -pub fn exists<'a, K>( - self: &'a Arc, - key: &K, -) -> impl Future + Send + 'a + use<'a, K> -where - K: AsRef<[u8]> + ?Sized + Debug + 'a, -{ - self.get(key).map(|res| res.map(|_| ())) -} - -/// Returns Ok if the map contains the key; NotFound otherwise. Harder errors -/// may not always be reported properly. -#[implement(super::Map)] -#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn exists_blocking(&self, key: &K) -> Result -where - K: AsRef<[u8]> + ?Sized + Debug, -{ - self.maybe_exists(key) - .then(|| self.get_blocking(key)) - .flat_ok() - .map(|_| ()) - .ok_or_else(|| err!(Request(NotFound("Not found in database")))) -} - -/// Rocksdb limits this to kBlockCacheTier internally so this is not actually a -/// blocking call; in case that changes we set this as well in our read_options. -#[implement(super::Map)] -pub(crate) fn maybe_exists(&self, key: &K) -> bool -where - K: AsRef<[u8]> + ?Sized, -{ - self.db - .db - .key_may_exist_cf_opt(&self.cf(), key, &self.cache_read_options) -} diff --git a/src/database/map/count.rs b/src/database/map/count.rs deleted file mode 100644 index 78f9e2e3..00000000 --- a/src/database/map/count.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::{fmt::Debug, future::Future, sync::Arc}; - -use conduwuit::implement; -use futures::stream::StreamExt; -use serde::Serialize; - -/// Count the total number of entries in the map. -#[implement(super::Map)] -#[inline] -pub fn count(self: &Arc) -> impl Future + Send + '_ { - self.raw_keys().count() -} - -/// Count the number of entries in the map starting from a lower-bound. -/// -/// - From is a structured key -#[implement(super::Map)] -#[inline] -pub fn count_from<'a, P>( - self: &'a Arc, - from: &P, -) -> impl Future + Send + 'a + use<'a, P> -where - P: Serialize + ?Sized + Debug + 'a, -{ - self.keys_from_raw(from).count() -} - -/// Count the number of entries in the map starting from a lower-bound. -/// -/// - From is a raw -#[implement(super::Map)] -#[inline] -pub fn raw_count_from<'a, P>( - self: &'a Arc, - from: &'a P, -) -> impl Future + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, -{ - self.raw_keys_from(from).count() -} - -/// Count the number of entries in the map matching a prefix. -/// -/// - Prefix is structured key -#[implement(super::Map)] -#[inline] -pub fn count_prefix<'a, P>( - self: &'a Arc, - prefix: &P, -) -> impl Future + Send + 'a + use<'a, P> -where - P: Serialize + ?Sized + Debug + 'a, -{ - self.keys_prefix_raw(prefix).count() -} - -/// Count the number of entries in the map matching a prefix. -/// -/// - Prefix is raw -#[implement(super::Map)] -#[inline] -pub fn raw_count_prefix<'a, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Future + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, -{ - self.raw_keys_prefix(prefix).count() -} diff --git a/src/database/map/get.rs b/src/database/map/get.rs deleted file mode 100644 index 0971fb17..00000000 --- a/src/database/map/get.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Err, Result, err, implement, utils::result::MapExpect}; -use futures::{Future, FutureExt, TryFutureExt, future::ready}; -use rocksdb::{DBPinnableSlice, ReadOptions}; -use tokio::task; - -use crate::{ - Handle, - util::{is_incomplete, map_err, or_else}, -}; - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is referenced directly to perform the query. -#[implement(super::Map)] -#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn get( - self: &Arc, - key: &K, -) -> impl Future>> + Send + use<'_, K> -where - K: AsRef<[u8]> + Debug + ?Sized, -{ - use crate::pool::Get; - - let cached = self.get_cached(key); - if matches!(cached, Err(_) | Ok(Some(_))) { - return task::consume_budget() - .map(move |()| cached.map_expect("data found in cache")) - .boxed(); - } - - debug_assert!(matches!(cached, Ok(None)), "expected status Incomplete"); - let cmd = Get { - map: self.clone(), - key: [key.as_ref().into()].into(), - res: None, - }; - - self.db - .pool - .execute_get(cmd) - .and_then(|mut res| ready(res.remove(0))) - .boxed() -} - -/// Fetch a value from the cache without I/O. -#[implement(super::Map)] -#[tracing::instrument(skip(self, key), name = "cache", level = "trace")] -pub(crate) fn get_cached(&self, key: &K) -> Result>> -where - K: AsRef<[u8]> + Debug + ?Sized, -{ - let res = self.get_blocking_opts(key, &self.cache_read_options); - cached_handle_from(res) -} - -/// Fetch a value from the database into cache, returning a reference-handle. -/// The key is referenced directly to perform the query. This is a thread- -/// blocking call. -#[implement(super::Map)] -#[tracing::instrument(skip(self, key), name = "blocking", level = "trace")] -pub fn get_blocking(&self, key: &K) -> Result> -where - K: AsRef<[u8]> + ?Sized, -{ - let res = self.get_blocking_opts(key, &self.read_options); - handle_from(res) -} - -#[implement(super::Map)] -fn get_blocking_opts( - &self, - key: &K, - read_options: &ReadOptions, -) -> Result>, rocksdb::Error> -where - K: AsRef<[u8]> + ?Sized, -{ - self.db.db.get_pinned_cf_opt(&self.cf(), key, read_options) -} - -#[inline] -pub(super) fn handle_from( - result: Result>, rocksdb::Error>, -) -> Result> { - result - .map_err(map_err)? - .map(Handle::from) - .ok_or(err!(Request(NotFound("Not found in database")))) -} - -#[inline] -pub(super) fn cached_handle_from( - result: Result>, rocksdb::Error>, -) -> Result>> { - match result { - // cache hit; not found - | Ok(None) => Err!(Request(NotFound("Not found in database"))), - - // cache hit; value found - | Ok(Some(result)) => Ok(Some(Handle::from(result))), - - // cache miss; unknown - | Err(error) if is_incomplete(&error) => Ok(None), - - // some other error occurred - | Err(error) => or_else(error), - } -} diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs deleted file mode 100644 index e23a8848..00000000 --- a/src/database/map/get_batch.rs +++ /dev/null @@ -1,107 +0,0 @@ -use std::{convert::AsRef, sync::Arc}; - -use conduwuit::{ - Result, implement, - utils::{ - IterStream, - stream::{WidebandExt, automatic_amplification, automatic_width}, - }, -}; -use futures::{Stream, StreamExt, TryStreamExt}; -use rocksdb::{DBPinnableSlice, ReadOptions}; - -use super::get::{cached_handle_from, handle_from}; -use crate::Handle; - -pub trait Get<'a, K, S> -where - Self: Sized, - S: Stream + Send + 'a, - K: AsRef<[u8]> + Send + Sync + 'a, -{ - fn get(self, map: &'a Arc) -> impl Stream>> + Send + 'a; -} - -impl<'a, K, S> Get<'a, K, S> for S -where - Self: Sized, - S: Stream + Send + 'a, - K: AsRef<[u8]> + Send + Sync + 'a, -{ - #[inline] - fn get(self, map: &'a Arc) -> impl Stream>> + Send + 'a { - map.get_batch(self) - } -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self, keys), level = "trace")] -pub(crate) fn get_batch<'a, S, K>( - self: &'a Arc, - keys: S, -) -> impl Stream>> + Send + 'a -where - S: Stream + Send + 'a, - K: AsRef<[u8]> + Send + Sync + 'a, -{ - use crate::pool::Get; - - keys.ready_chunks(automatic_amplification()) - .widen_then(automatic_width(), |chunk| { - self.db.pool.execute_get(Get { - map: self.clone(), - key: chunk.iter().map(AsRef::as_ref).map(Into::into).collect(), - res: None, - }) - }) - .map_ok(|results| results.into_iter().stream()) - .try_flatten() -} - -#[implement(super::Map)] -#[tracing::instrument(name = "batch_cached", level = "trace", skip_all)] -pub(crate) fn get_batch_cached<'a, I, K>( - &self, - keys: I, -) -> impl Iterator>>> + Send + use<'_, I, K> -where - I: Iterator + ExactSizeIterator + Send, - K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, -{ - self.get_batch_blocking_opts(keys, &self.cache_read_options) - .map(cached_handle_from) -} - -#[implement(super::Map)] -#[tracing::instrument(name = "batch_blocking", level = "trace", skip_all)] -pub(crate) fn get_batch_blocking<'a, I, K>( - &self, - keys: I, -) -> impl Iterator>> + Send + use<'_, I, K> -where - I: Iterator + ExactSizeIterator + Send, - K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, -{ - self.get_batch_blocking_opts(keys, &self.read_options) - .map(handle_from) -} - -#[implement(super::Map)] -fn get_batch_blocking_opts<'a, I, K>( - &self, - keys: I, - read_options: &ReadOptions, -) -> impl Iterator>, rocksdb::Error>> + Send + use<'_, I, K> -where - I: Iterator + ExactSizeIterator + Send, - K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, -{ - // Optimization can be `true` if key vector is pre-sorted **by the column - // comparator**. - const SORTED: bool = false; - - self.db - .db - .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options) - .into_iter() -} diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs deleted file mode 100644 index 6f010097..00000000 --- a/src/database/map/insert.rs +++ /dev/null @@ -1,237 +0,0 @@ -//! Insert a Key+Value into the database. -//! -//! Overloads are provided for the user to choose the most efficient -//! serialization or bypass for pre=serialized (raw) inputs. - -use std::{convert::AsRef, fmt::Debug, io::Write}; - -use conduwuit::{arrayvec::ArrayVec, implement}; -use rocksdb::WriteBatchWithTransaction; -use serde::Serialize; - -use crate::{ - keyval::{KeyBuf, ValBuf}, - ser, - util::or_else, -}; - -/// Insert Key/Value -/// -/// - Key is serialized -/// - Val is serialized -#[implement(super::Map)] -#[inline] -pub fn put(&self, key: K, val: V) -where - K: Serialize + Debug, - V: Serialize, -{ - let mut key_buf = KeyBuf::new(); - let mut val_buf = ValBuf::new(); - self.bput(key, val, (&mut key_buf, &mut val_buf)); -} - -/// Insert Key/Value -/// -/// - Key is serialized -/// - Val is raw -#[implement(super::Map)] -#[inline] -pub fn put_raw(&self, key: K, val: V) -where - K: Serialize + Debug, - V: AsRef<[u8]>, -{ - let mut key_buf = KeyBuf::new(); - self.bput_raw(key, val, &mut key_buf); -} - -/// Insert Key/Value -/// -/// - Key is raw -/// - Val is serialized -#[implement(super::Map)] -#[inline] -pub fn raw_put(&self, key: K, val: V) -where - K: AsRef<[u8]>, - V: Serialize, -{ - let mut val_buf = ValBuf::new(); - self.raw_bput(key, val, &mut val_buf); -} - -/// Insert Key/Value -/// -/// - Key is serialized -/// - Val is serialized to stack-buffer -#[implement(super::Map)] -#[inline] -pub fn put_aput(&self, key: K, val: V) -where - K: Serialize + Debug, - V: Serialize, -{ - let mut key_buf = KeyBuf::new(); - let mut val_buf = ArrayVec::::new(); - self.bput(key, val, (&mut key_buf, &mut val_buf)); -} - -/// Insert Key/Value -/// -/// - Key is serialized to stack-buffer -/// - Val is serialized -#[implement(super::Map)] -#[inline] -pub fn aput_put(&self, key: K, val: V) -where - K: Serialize + Debug, - V: Serialize, -{ - let mut key_buf = ArrayVec::::new(); - let mut val_buf = ValBuf::new(); - self.bput(key, val, (&mut key_buf, &mut val_buf)); -} - -/// Insert Key/Value -/// -/// - Key is serialized to stack-buffer -/// - Val is serialized to stack-buffer -#[implement(super::Map)] -#[inline] -pub fn aput(&self, key: K, val: V) -where - K: Serialize + Debug, - V: Serialize, -{ - let mut key_buf = ArrayVec::::new(); - let mut val_buf = ArrayVec::::new(); - self.bput(key, val, (&mut key_buf, &mut val_buf)); -} - -/// Insert Key/Value -/// -/// - Key is serialized to stack-buffer -/// - Val is raw -#[implement(super::Map)] -#[inline] -pub fn aput_raw(&self, key: K, val: V) -where - K: Serialize + Debug, - V: AsRef<[u8]>, -{ - let mut key_buf = ArrayVec::::new(); - self.bput_raw(key, val, &mut key_buf); -} - -/// Insert Key/Value -/// -/// - Key is raw -/// - Val is serialized to stack-buffer -#[implement(super::Map)] -#[inline] -pub fn raw_aput(&self, key: K, val: V) -where - K: AsRef<[u8]>, - V: Serialize, -{ - let mut val_buf = ArrayVec::::new(); - self.raw_bput(key, val, &mut val_buf); -} - -/// Insert Key/Value -/// -/// - Key is serialized to supplied buffer -/// - Val is serialized to supplied buffer -#[implement(super::Map)] -pub fn bput(&self, key: K, val: V, mut buf: (Bk, Bv)) -where - K: Serialize + Debug, - V: Serialize, - Bk: Write + AsRef<[u8]>, - Bv: Write + AsRef<[u8]>, -{ - let val = ser::serialize(&mut buf.1, val).expect("failed to serialize insertion val"); - self.bput_raw(key, val, &mut buf.0); -} - -/// Insert Key/Value -/// -/// - Key is serialized to supplied buffer -/// - Val is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self, val, buf), level = "trace")] -pub fn bput_raw(&self, key: K, val: V, mut buf: Bk) -where - K: Serialize + Debug, - V: AsRef<[u8]>, - Bk: Write + AsRef<[u8]>, -{ - let key = ser::serialize(&mut buf, key).expect("failed to serialize insertion key"); - self.insert(&key, val); -} - -/// Insert Key/Value -/// -/// - Key is raw -/// - Val is serialized to supplied buffer -#[implement(super::Map)] -pub fn raw_bput(&self, key: K, val: V, mut buf: Bv) -where - K: AsRef<[u8]>, - V: Serialize, - Bv: Write + AsRef<[u8]>, -{ - let val = ser::serialize(&mut buf, val).expect("failed to serialize insertion val"); - self.insert(&key, val); -} - -/// Insert Key/Value -/// -/// - Key is raw -/// - Val is raw -#[implement(super::Map)] -#[tracing::instrument(skip_all, fields(%self), level = "trace")] -pub fn insert(&self, key: &K, val: V) -where - K: AsRef<[u8]> + ?Sized, - V: AsRef<[u8]>, -{ - let write_options = &self.write_options; - self.db - .db - .put_cf_opt(&self.cf(), key, val, write_options) - .or_else(or_else) - .expect("database insert error"); - - if !self.db.corked() { - self.db.flush().expect("database flush error"); - } - - self.watchers.wake(key.as_ref()); -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self, iter), fields(%self), level = "trace")] -pub fn insert_batch<'a, I, K, V>(&'a self, iter: I) -where - I: Iterator + Send + Debug, - K: AsRef<[u8]> + Sized + Debug + 'a, - V: AsRef<[u8]> + Sized + 'a, -{ - let mut batch = WriteBatchWithTransaction::::default(); - for (key, val) in iter { - batch.put_cf(&self.cf(), key.as_ref(), val.as_ref()); - } - - let write_options = &self.write_options; - self.db - .db - .write_opt(batch, write_options) - .or_else(or_else) - .expect("database insert batch error"); - - if !self.db.corked() { - self.db.flush().expect("database flush error"); - } -} diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs deleted file mode 100644 index 7ca932a5..00000000 --- a/src/database/map/keys.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::sync::Arc; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::Deserialize; -use tokio::task; - -use super::stream::is_cached; -use crate::{keyval, keyval::Key, stream}; - -#[implement(super::Map)] -pub fn keys<'a, K>(self: &'a Arc) -> impl Stream>> + Send -where - K: Deserialize<'a> + Send, -{ - self.raw_keys().map(keyval::result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn raw_keys(self: &Arc) -> impl Stream>> + Send { - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self) { - let state = state.init_fwd(None); - return task::consume_budget() - .map(move |()| stream::Keys::<'_>::from(state)) - .into_stream() - .flatten() - .boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Forward, - state: crate::pool::into_send_seek(state), - key: None, - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs deleted file mode 100644 index c9b1717a..00000000 --- a/src/database/map/keys_from.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::{Deserialize, Serialize}; - -use super::stream_from::is_cached; -use crate::{ - keyval::{Key, result_deserialize_key, serialize_key}, - stream, -}; - -#[implement(super::Map)] -pub fn keys_from<'a, K, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, -{ - self.keys_from_raw(from).map(result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn keys_from_raw

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(from).expect("failed to serialize query key"); - self.raw_keys_from(&key) -} - -#[implement(super::Map)] -pub fn keys_raw_from<'a, K, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, P> -where - P: AsRef<[u8]> + ?Sized + Debug + Sync, - K: Deserialize<'a> + Send, -{ - self.raw_keys_from(from).map(result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_keys_from

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: AsRef<[u8]> + ?Sized + Debug, -{ - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self, from) { - return stream::Keys::<'_>::from(state.init_fwd(from.as_ref().into())).boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Forward, - key: Some(from.as_ref().into()), - state: crate::pool::into_send_seek(state), - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs deleted file mode 100644 index 09dd79ac..00000000 --- a/src/database/map/keys_prefix.rs +++ /dev/null @@ -1,59 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{Stream, StreamExt, TryStreamExt, future}; -use serde::{Deserialize, Serialize}; - -use crate::keyval::{Key, result_deserialize_key, serialize_key}; - -#[implement(super::Map)] -pub fn keys_prefix<'a, K, P>( - self: &'a Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'a, K, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, -{ - self.keys_prefix_raw(prefix) - .map(result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn keys_prefix_raw

( - self: &Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(prefix).expect("failed to serialize query key"); - self.raw_keys_from(&key) - .try_take_while(move |k: &Key<'_>| future::ok(k.starts_with(&key))) -} - -#[implement(super::Map)] -pub fn keys_raw_prefix<'a, K, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, - K: Deserialize<'a> + Send + 'a, -{ - self.raw_keys_prefix(prefix) - .map(result_deserialize_key::) -} - -#[implement(super::Map)] -pub fn raw_keys_prefix<'a, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, -{ - self.raw_keys_from(prefix) - .try_take_while(|k: &Key<'_>| future::ok(k.starts_with(prefix.as_ref()))) -} diff --git a/src/database/map/open.rs b/src/database/map/open.rs deleted file mode 100644 index 07f7a0c6..00000000 --- a/src/database/map/open.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::sync::Arc; - -use rocksdb::ColumnFamily; - -use crate::Engine; - -pub(super) fn open(db: &Arc, name: &str) -> Arc { - let bounded_arc = db.cf(name); - let bounded_ptr = Arc::into_raw(bounded_arc); - let cf_ptr = bounded_ptr.cast::(); - - // SAFETY: Column family handles out of RocksDB are basic pointers and can - // be invalidated: 1. when the database closes. 2. when the column is dropped or - // closed. rust_rocksdb wraps this for us by storing handles in their own - // `RwLock` map and returning an Arc>` to - // provide expected safety. Similarly in "single-threaded mode" we would - // receive `&'_ ColumnFamily`. - // - // PROBLEM: We need to hold these handles in a field, otherwise we have to take - // a lock and get them by name from this map for every query, which is what - // conduit was doing, but we're not going to make a query for every query so we - // need to be holding it right. The lifetime parameter on these references makes - // that complicated. If this can be done without polluting the userspace - // with lifetimes on every instance of `Map` then this `unsafe` might not be - // necessary. - // - // SOLUTION: After investigating the underlying types it appears valid to - // Arc-swap `BoundColumnFamily<'_>` for `ColumnFamily`. They have the - // same inner data, the same Drop behavior, Deref, etc. We're just losing the - // lifetime parameter. We should not hold this handle, even in its Arc, after - // closing the database (dropping `Engine`). Since `Arc` is a sibling - // member along with this handle in `Map`, that is prevented. - unsafe { Arc::from_raw(cf_ptr) } -} diff --git a/src/database/map/options.rs b/src/database/map/options.rs deleted file mode 100644 index 9e2ad898..00000000 --- a/src/database/map/options.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::sync::Arc; - -use rocksdb::{ReadOptions, ReadTier, WriteOptions}; - -use crate::Engine; - -#[inline] -pub(crate) fn cache_iter_options_default(db: &Arc) -> ReadOptions { - let mut options = iter_options_default(db); - options.set_read_tier(ReadTier::BlockCache); - options.fill_cache(false); - options -} - -#[inline] -pub(crate) fn iter_options_default(db: &Arc) -> ReadOptions { - let mut options = read_options_default(db); - options.set_background_purge_on_iterator_cleanup(true); - options -} - -#[inline] -pub(crate) fn cache_read_options_default(db: &Arc) -> ReadOptions { - let mut options = read_options_default(db); - options.set_read_tier(ReadTier::BlockCache); - options.fill_cache(false); - options -} - -#[inline] -pub(crate) fn read_options_default(db: &Arc) -> ReadOptions { - let mut options = ReadOptions::default(); - options.set_total_order_seek(true); - - if !db.checksums { - options.set_verify_checksums(false); - } - - options -} - -#[inline] -pub(crate) fn write_options_default(_db: &Arc) -> WriteOptions { WriteOptions::default() } diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs deleted file mode 100644 index c6f13c0b..00000000 --- a/src/database/map/qry.rs +++ /dev/null @@ -1,56 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; - -use conduwuit::{Result, arrayvec::ArrayVec, implement}; -use futures::Future; -use serde::Serialize; - -use crate::{Handle, keyval::KeyBuf, ser}; - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into an allocated buffer to perform -/// the query. -#[implement(super::Map)] -#[inline] -pub fn qry( - self: &Arc, - key: &K, -) -> impl Future>> + Send + use<'_, K> -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = KeyBuf::new(); - self.bqry(key, &mut buf) -} - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into a fixed-sized buffer to perform -/// the query. The maximum size is supplied as const generic parameter. -#[implement(super::Map)] -#[inline] -pub fn aqry( - self: &Arc, - key: &K, -) -> impl Future>> + Send + use<'_, MAX, K> -where - K: Serialize + ?Sized + Debug, -{ - let mut buf = ArrayVec::::new(); - self.bqry(key, &mut buf) -} - -/// Fetch a value from the database into cache, returning a reference-handle -/// asynchronously. The key is serialized into a user-supplied Writer. -#[implement(super::Map)] -#[tracing::instrument(skip(self, buf), level = "trace")] -pub fn bqry( - self: &Arc, - key: &K, - buf: &mut B, -) -> impl Future>> + Send + use<'_, K, B> -where - K: Serialize + ?Sized + Debug, - B: Write + AsRef<[u8]>, -{ - let key = ser::serialize(buf, key).expect("failed to serialize query key"); - self.get(key) -} diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs deleted file mode 100644 index e42d3e63..00000000 --- a/src/database/map/qry_batch.rs +++ /dev/null @@ -1,61 +0,0 @@ -use std::{fmt::Debug, sync::Arc}; - -use conduwuit::{ - Result, implement, - utils::{ - IterStream, - stream::{WidebandExt, automatic_amplification, automatic_width}, - }, -}; -use futures::{Stream, StreamExt, TryStreamExt}; -use serde::Serialize; - -use crate::{Handle, keyval::KeyBuf, ser}; - -pub trait Qry<'a, K, S> -where - S: Stream + Send + 'a, - K: Serialize + Debug, -{ - fn qry(self, map: &'a Arc) -> impl Stream>> + Send + 'a; -} - -impl<'a, K, S> Qry<'a, K, S> for S -where - Self: 'a, - S: Stream + Send + 'a, - K: Serialize + Debug + 'a, -{ - #[inline] - fn qry(self, map: &'a Arc) -> impl Stream>> + Send + 'a { - map.qry_batch(self) - } -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self, keys), level = "trace")] -pub(crate) fn qry_batch<'a, S, K>( - self: &'a Arc, - keys: S, -) -> impl Stream>> + Send + 'a -where - S: Stream + Send + 'a, - K: Serialize + Debug + 'a, -{ - use crate::pool::Get; - - keys.ready_chunks(automatic_amplification()) - .widen_then(automatic_width(), |chunk| { - let keys = chunk - .iter() - .map(ser::serialize_to::) - .map(|result| result.expect("failed to serialize query key")) - .collect(); - - self.db - .pool - .execute_get(Get { map: self.clone(), key: keys, res: None }) - }) - .map_ok(|results| results.into_iter().stream()) - .try_flatten() -} diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs deleted file mode 100644 index a7ae9133..00000000 --- a/src/database/map/remove.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, io::Write}; - -use conduwuit::{arrayvec::ArrayVec, implement}; -use serde::Serialize; - -use crate::{keyval::KeyBuf, ser, util::or_else}; - -#[implement(super::Map)] -#[inline] -pub fn del(&self, key: K) -where - K: Serialize + Debug, -{ - let mut buf = KeyBuf::new(); - self.bdel(key, &mut buf); -} - -#[implement(super::Map)] -#[inline] -pub fn adel(&self, key: K) -where - K: Serialize + Debug, -{ - let mut buf = ArrayVec::::new(); - self.bdel(key, &mut buf); -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self, buf), level = "trace")] -pub fn bdel(&self, key: K, buf: &mut B) -where - K: Serialize + Debug, - B: Write + AsRef<[u8]>, -{ - let key = ser::serialize(buf, key).expect("failed to serialize deletion key"); - self.remove(key); -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn remove(&self, key: &K) -where - K: AsRef<[u8]> + ?Sized + Debug, -{ - let write_options = &self.write_options; - self.db - .db - .delete_cf_opt(&self.cf(), key, write_options) - .or_else(or_else) - .expect("database remove error"); - - if !self.db.corked() { - self.db.flush().expect("database flush error"); - } -} diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs deleted file mode 100644 index c00f3e55..00000000 --- a/src/database/map/rev_keys.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::sync::Arc; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::Deserialize; -use tokio::task; - -use super::rev_stream::is_cached; -use crate::{keyval, keyval::Key, stream}; - -#[implement(super::Map)] -pub fn rev_keys<'a, K>(self: &'a Arc) -> impl Stream>> + Send -where - K: Deserialize<'a> + Send, -{ - self.rev_raw_keys().map(keyval::result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_raw_keys(self: &Arc) -> impl Stream>> + Send { - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self) { - let state = state.init_rev(None); - return task::consume_budget() - .map(move |()| stream::KeysRev::<'_>::from(state)) - .into_stream() - .flatten() - .boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Reverse, - state: crate::pool::into_send_seek(state), - key: None, - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs deleted file mode 100644 index 04e457dc..00000000 --- a/src/database/map/rev_keys_from.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::{Deserialize, Serialize}; - -use super::rev_stream_from::is_cached; -use crate::{ - keyval::{Key, result_deserialize_key, serialize_key}, - stream, -}; - -#[implement(super::Map)] -pub fn rev_keys_from<'a, K, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, -{ - self.rev_keys_from_raw(from) - .map(result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn rev_keys_from_raw

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(from).expect("failed to serialize query key"); - self.rev_raw_keys_from(&key) -} - -#[implement(super::Map)] -pub fn rev_keys_raw_from<'a, K, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, P> -where - P: AsRef<[u8]> + ?Sized + Debug + Sync, - K: Deserialize<'a> + Send, -{ - self.rev_raw_keys_from(from) - .map(result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn rev_raw_keys_from

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: AsRef<[u8]> + ?Sized + Debug, -{ - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self, from) { - return stream::KeysRev::<'_>::from(state.init_rev(from.as_ref().into())).boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Reverse, - key: Some(from.as_ref().into()), - state: crate::pool::into_send_seek(state), - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs deleted file mode 100644 index fbe9f9ca..00000000 --- a/src/database/map/rev_keys_prefix.rs +++ /dev/null @@ -1,59 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{Stream, StreamExt, TryStreamExt, future}; -use serde::{Deserialize, Serialize}; - -use crate::keyval::{Key, result_deserialize_key, serialize_key}; - -#[implement(super::Map)] -pub fn rev_keys_prefix<'a, K, P>( - self: &'a Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'a, K, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, -{ - self.rev_keys_prefix_raw(prefix) - .map(result_deserialize_key::) -} - -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn rev_keys_prefix_raw

( - self: &Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(prefix).expect("failed to serialize query key"); - self.rev_raw_keys_from(&key) - .try_take_while(move |k: &Key<'_>| future::ok(k.starts_with(&key))) -} - -#[implement(super::Map)] -pub fn rev_keys_raw_prefix<'a, K, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, - K: Deserialize<'a> + Send + 'a, -{ - self.rev_raw_keys_prefix(prefix) - .map(result_deserialize_key::) -} - -#[implement(super::Map)] -pub fn rev_raw_keys_prefix<'a, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, -{ - self.rev_raw_keys_from(prefix) - .try_take_while(|k: &Key<'_>| future::ok(k.starts_with(prefix.as_ref()))) -} diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs deleted file mode 100644 index 789a52e8..00000000 --- a/src/database/map/rev_stream.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::sync::Arc; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::Deserialize; -use tokio::task; - -use crate::{keyval, keyval::KeyVal, stream}; - -/// Iterate key-value entries in the map from the end. -/// -/// - Result is deserialized -#[implement(super::Map)] -pub fn rev_stream<'a, K, V>( - self: &'a Arc, -) -> impl Stream>> + Send -where - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.rev_raw_stream() - .map(keyval::result_deserialize::) -} - -/// Iterate key-value entries in the map from the end. -/// -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn rev_raw_stream(self: &Arc) -> impl Stream>> + Send { - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self) { - let state = state.init_rev(None); - return task::consume_budget() - .map(move |()| stream::ItemsRev::<'_>::from(state)) - .into_stream() - .flatten() - .boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Reverse, - state: crate::pool::into_send_seek(state), - key: None, - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} - -#[tracing::instrument( - name = "cached", - level = "trace", - skip_all, - fields(%map), -)] -pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_iter_options_default(&map.db); - let state = stream::State::new(map, opts).init_rev(None); - - !state.is_incomplete() -} diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs deleted file mode 100644 index a612d2a2..00000000 --- a/src/database/map/rev_stream_from.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::{Deserialize, Serialize}; -use tokio::task; - -use crate::{ - keyval::{KeyVal, result_deserialize, serialize_key}, - stream, - util::is_incomplete, -}; - -/// Iterate key-value entries in the map starting from upper-bound. -/// -/// - Query is serialized -/// - Result is deserialized -#[implement(super::Map)] -pub fn rev_stream_from<'a, K, V, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, V, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.rev_stream_from_raw(from) - .map(result_deserialize::) -} - -/// Iterate key-value entries in the map starting from upper-bound. -/// -/// - Query is serialized -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn rev_stream_from_raw

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(from).expect("failed to serialize query key"); - self.rev_raw_stream_from(&key) -} - -/// Iterate key-value entries in the map starting from upper-bound. -/// -/// - Query is raw -/// - Result is deserialized -#[implement(super::Map)] -pub fn rev_stream_raw_from<'a, K, V, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, V, P> -where - P: AsRef<[u8]> + ?Sized + Debug + Sync, - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.rev_raw_stream_from(from) - .map(result_deserialize::) -} - -/// Iterate key-value entries in the map starting from upper-bound. -/// -/// - Query is raw -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn rev_raw_stream_from

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: AsRef<[u8]> + ?Sized + Debug, -{ - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self, from) { - let state = state.init_rev(from.as_ref().into()); - return task::consume_budget() - .map(move |()| stream::ItemsRev::<'_>::from(state)) - .into_stream() - .flatten() - .boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Reverse, - key: Some(from.as_ref().into()), - state: crate::pool::into_send_seek(state), - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} - -#[tracing::instrument( - name = "cached", - level = "trace", - skip(map, from), - fields(%map), -)] -pub(super) fn is_cached

(map: &Arc, from: &P) -> bool -where - P: AsRef<[u8]> + ?Sized, -{ - let cache_opts = super::cache_iter_options_default(&map.db); - let cache_status = stream::State::new(map, cache_opts) - .init_rev(from.as_ref().into()) - .status(); - - !matches!(cache_status, Some(e) if is_incomplete(&e)) -} diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs deleted file mode 100644 index 46dc9247..00000000 --- a/src/database/map/rev_stream_prefix.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{Stream, StreamExt, TryStreamExt, future}; -use serde::{Deserialize, Serialize}; - -use crate::keyval::{KeyVal, result_deserialize, serialize_key}; - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is serialized -/// - Result is deserialized -#[implement(super::Map)] -pub fn rev_stream_prefix<'a, K, V, P>( - self: &'a Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'a, K, V, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.rev_stream_prefix_raw(prefix) - .map(result_deserialize::) -} - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is serialized -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn rev_stream_prefix_raw

( - self: &Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(prefix).expect("failed to serialize query key"); - self.rev_raw_stream_from(&key) - .try_take_while(move |(k, _): &KeyVal<'_>| future::ok(k.starts_with(&key))) -} - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is raw -/// - Result is deserialized -#[implement(super::Map)] -pub fn rev_stream_raw_prefix<'a, K, V, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, - K: Deserialize<'a> + Send + 'a, - V: Deserialize<'a> + Send + 'a, -{ - self.rev_raw_stream_prefix(prefix) - .map(result_deserialize::) -} - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is raw -/// - Result is raw -#[implement(super::Map)] -pub fn rev_raw_stream_prefix<'a, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, -{ - self.rev_raw_stream_from(prefix) - .try_take_while(|(k, _): &KeyVal<'_>| future::ok(k.starts_with(prefix.as_ref()))) -} diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs deleted file mode 100644 index f7371b6c..00000000 --- a/src/database/map/stream.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::sync::Arc; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::Deserialize; -use tokio::task; - -use crate::{keyval, keyval::KeyVal, stream}; - -/// Iterate key-value entries in the map from the beginning. -/// -/// - Result is deserialized -#[implement(super::Map)] -pub fn stream<'a, K, V>( - self: &'a Arc, -) -> impl Stream>> + Send -where - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.raw_stream().map(keyval::result_deserialize::) -} - -/// Iterate key-value entries in the map from the beginning. -/// -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self), fields(%self), level = "trace")] -pub fn raw_stream(self: &Arc) -> impl Stream>> + Send { - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self) { - let state = state.init_fwd(None); - return task::consume_budget() - .map(move |()| stream::Items::<'_>::from(state)) - .into_stream() - .flatten() - .boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Forward, - state: crate::pool::into_send_seek(state), - key: None, - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} - -#[tracing::instrument( - name = "cached", - level = "trace", - skip_all, - fields(%map), -)] -pub(super) fn is_cached(map: &Arc) -> bool { - let opts = super::cache_iter_options_default(&map.db); - let state = stream::State::new(map, opts).init_fwd(None); - - !state.is_incomplete() -} diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs deleted file mode 100644 index ccf48db6..00000000 --- a/src/database/map/stream_from.rs +++ /dev/null @@ -1,122 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use rocksdb::Direction; -use serde::{Deserialize, Serialize}; -use tokio::task; - -use crate::{ - keyval::{KeyVal, result_deserialize, serialize_key}, - stream, -}; - -/// Iterate key-value entries in the map starting from lower-bound. -/// -/// - Query is serialized -/// - Result is deserialized -#[implement(super::Map)] -pub fn stream_from<'a, K, V, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, V, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.stream_from_raw(from).map(result_deserialize::) -} - -/// Iterate key-value entries in the map starting from lower-bound. -/// -/// - Query is serialized -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn stream_from_raw

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(from).expect("failed to serialize query key"); - self.raw_stream_from(&key) -} - -/// Iterate key-value entries in the map starting from lower-bound. -/// -/// - Query is raw -/// - Result is deserialized -#[implement(super::Map)] -pub fn stream_raw_from<'a, K, V, P>( - self: &'a Arc, - from: &P, -) -> impl Stream>> + Send + use<'a, K, V, P> -where - P: AsRef<[u8]> + ?Sized + Debug + Sync, - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.raw_stream_from(from).map(result_deserialize::) -} - -/// Iterate key-value entries in the map starting from lower-bound. -/// -/// - Query is raw -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_stream_from

( - self: &Arc, - from: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: AsRef<[u8]> + ?Sized + Debug, -{ - use crate::pool::Seek; - - let opts = super::iter_options_default(&self.db); - let state = stream::State::new(self, opts); - if is_cached(self, from) { - let state = state.init_fwd(from.as_ref().into()); - return task::consume_budget() - .map(move |()| stream::Items::<'_>::from(state)) - .into_stream() - .flatten() - .boxed(); - } - - let seek = Seek { - map: self.clone(), - dir: Direction::Forward, - key: Some(from.as_ref().into()), - state: crate::pool::into_send_seek(state), - res: None, - }; - - self.db - .pool - .execute_iter(seek) - .ok_into::>() - .into_stream() - .try_flatten() - .boxed() -} - -#[tracing::instrument( - name = "cached", - level = "trace", - skip(map, from), - fields(%map), -)] -pub(super) fn is_cached

(map: &Arc, from: &P) -> bool -where - P: AsRef<[u8]> + ?Sized, -{ - let opts = super::cache_iter_options_default(&map.db); - let state = stream::State::new(map, opts).init_fwd(from.as_ref().into()); - - !state.is_incomplete() -} diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs deleted file mode 100644 index a26478aa..00000000 --- a/src/database/map/stream_prefix.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::{convert::AsRef, fmt::Debug, sync::Arc}; - -use conduwuit::{Result, implement}; -use futures::{Stream, StreamExt, TryStreamExt, future}; -use serde::{Deserialize, Serialize}; - -use crate::keyval::{KeyVal, result_deserialize, serialize_key}; - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is serialized -/// - Result is deserialized -#[implement(super::Map)] -pub fn stream_prefix<'a, K, V, P>( - self: &'a Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'a, K, V, P> -where - P: Serialize + ?Sized + Debug, - K: Deserialize<'a> + Send, - V: Deserialize<'a> + Send, -{ - self.stream_prefix_raw(prefix) - .map(result_deserialize::) -} - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is serialized -/// - Result is raw -#[implement(super::Map)] -#[tracing::instrument(skip(self), level = "trace")] -pub fn stream_prefix_raw

( - self: &Arc, - prefix: &P, -) -> impl Stream>> + Send + use<'_, P> -where - P: Serialize + ?Sized + Debug, -{ - let key = serialize_key(prefix).expect("failed to serialize query key"); - self.raw_stream_from(&key) - .try_take_while(move |(k, _): &KeyVal<'_>| future::ok(k.starts_with(&key))) -} - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is raw -/// - Result is deserialized -#[implement(super::Map)] -pub fn stream_raw_prefix<'a, K, V, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, - K: Deserialize<'a> + Send + 'a, - V: Deserialize<'a> + Send + 'a, -{ - self.raw_stream_prefix(prefix) - .map(result_deserialize::) -} - -/// Iterate key-value entries in the map where the key matches a prefix. -/// -/// - Query is raw -/// - Result is raw -#[implement(super::Map)] -pub fn raw_stream_prefix<'a, P>( - self: &'a Arc, - prefix: &'a P, -) -> impl Stream>> + Send + 'a -where - P: AsRef<[u8]> + ?Sized + Debug + Sync + 'a, -{ - self.raw_stream_from(prefix) - .try_take_while(|(k, _): &KeyVal<'_>| future::ok(k.starts_with(prefix.as_ref()))) -} diff --git a/src/database/maps.rs b/src/database/maps.rs deleted file mode 100644 index 19f9ced4..00000000 --- a/src/database/maps.rs +++ /dev/null @@ -1,429 +0,0 @@ -use std::{collections::BTreeMap, sync::Arc}; - -use conduwuit::Result; - -use crate::{ - Engine, Map, - engine::descriptor::{self, CacheDisp, Descriptor}, -}; - -pub(super) type Maps = BTreeMap; -pub(super) type MapsKey = &'static str; -pub(super) type MapsVal = Arc; - -pub(super) fn open(db: &Arc) -> Result { open_list(db, MAPS) } - -#[tracing::instrument(name = "maps", level = "debug", skip_all)] -pub(super) fn open_list(db: &Arc, maps: &[Descriptor]) -> Result { - maps.iter() - .map(|desc| Ok((desc.name, Map::open(db, desc.name)?))) - .collect() -} - -pub(super) static MAPS: &[Descriptor] = &[ - Descriptor { - name: "alias_roomid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "alias_userid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "aliasid_alias", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "backupid_algorithm", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "backupid_etag", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "backupkeyid_backup", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "bannedroomids", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "disabledroomids", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "eventid_outlierpdu", - cache_disp: CacheDisp::SharedWith("pduid_pdu"), - key_size_hint: Some(48), - val_size_hint: Some(1488), - block_size: 1024, - index_size: 512, - ..descriptor::RANDOM - }, - Descriptor { - name: "eventid_pduid", - cache_disp: CacheDisp::Unique, - key_size_hint: Some(48), - val_size_hint: Some(16), - block_size: 512, - index_size: 512, - ..descriptor::RANDOM - }, - Descriptor { - name: "eventid_shorteventid", - cache_disp: CacheDisp::Unique, - key_size_hint: Some(48), - val_size_hint: Some(8), - block_size: 512, - index_size: 512, - ..descriptor::RANDOM - }, - Descriptor { - name: "global", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "id_appserviceregistrations", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "keychangeid_userid", - ..descriptor::RANDOM - }, - Descriptor { - name: "keyid_key", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "lazyloadedids", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "mediaid_file", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "mediaid_user", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "onetimekeyid_onetimekeys", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "pduid_pdu", - cache_disp: CacheDisp::SharedWith("eventid_outlierpdu"), - key_size_hint: Some(16), - val_size_hint: Some(1520), - block_size: 2048, - index_size: 512, - ..descriptor::SEQUENTIAL - }, - Descriptor { - name: "publicroomids", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "pushkey_deviceid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "presenceid_presence", - ..descriptor::SEQUENTIAL_SMALL - }, - Descriptor { - name: "readreceiptid_readreceipt", - ..descriptor::RANDOM - }, - Descriptor { - name: "referencedevents", - ..descriptor::RANDOM - }, - Descriptor { - name: "roomid_invitedcount", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomid_inviteviaservers", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomid_joinedcount", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomid_pduleaves", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomid_shortroomid", - val_size_hint: Some(8), - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomid_shortstatehash", - val_size_hint: Some(8), - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomserverids", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomsynctoken_shortstatehash", - file_shape: 3, - val_size_hint: Some(8), - block_size: 512, - compression_level: 3, - bottommost_level: Some(6), - ..descriptor::SEQUENTIAL - }, - Descriptor { - name: "roomuserdataid_accountdata", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomuserid_invitecount", - val_size_hint: Some(8), - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomuserid_joined", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomuserid_lastprivatereadupdate", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomuserid_leftcount", - val_size_hint: Some(8), - ..descriptor::RANDOM - }, - Descriptor { - name: "roomuserid_knockedcount", - val_size_hint: Some(8), - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomuserid_privateread", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "roomuseroncejoinedids", - ..descriptor::RANDOM - }, - Descriptor { - name: "roomusertype_roomuserdataid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "senderkey_pusher", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "server_signingkeys", - ..descriptor::RANDOM - }, - Descriptor { - name: "servercurrentevent_data", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "servername_destination", - ..descriptor::RANDOM_SMALL_CACHE - }, - Descriptor { - name: "servername_educount", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "servername_override", - ..descriptor::RANDOM_SMALL_CACHE - }, - Descriptor { - name: "servernameevent_data", - cache_disp: CacheDisp::Unique, - val_size_hint: Some(128), - ..descriptor::RANDOM - }, - Descriptor { - name: "serverroomids", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "shorteventid_authchain", - cache_disp: CacheDisp::Unique, - key_size_hint: Some(8), - ..descriptor::SEQUENTIAL - }, - Descriptor { - name: "shorteventid_eventid", - cache_disp: CacheDisp::Unique, - key_size_hint: Some(8), - val_size_hint: Some(48), - ..descriptor::SEQUENTIAL_SMALL - }, - Descriptor { - name: "shorteventid_shortstatehash", - key_size_hint: Some(8), - val_size_hint: Some(8), - block_size: 512, - index_size: 512, - ..descriptor::SEQUENTIAL - }, - Descriptor { - name: "shortstatehash_statediff", - key_size_hint: Some(8), - ..descriptor::SEQUENTIAL_SMALL - }, - Descriptor { - name: "shortstatekey_statekey", - cache_disp: CacheDisp::Unique, - key_size_hint: Some(8), - val_size_hint: Some(1016), - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "softfailedeventids", - key_size_hint: Some(48), - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "statehash_shortstatehash", - val_size_hint: Some(8), - ..descriptor::RANDOM - }, - Descriptor { - name: "statekey_shortstatekey", - cache_disp: CacheDisp::Unique, - key_size_hint: Some(1016), - val_size_hint: Some(8), - ..descriptor::RANDOM - }, - Descriptor { - name: "threadid_userids", - ..descriptor::SEQUENTIAL_SMALL - }, - Descriptor { - name: "todeviceid_events", - ..descriptor::RANDOM - }, - Descriptor { - name: "tofrom_relation", - key_size_hint: Some(8), - val_size_hint: Some(8), - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "token_userdeviceid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "tokenids", - block_size: 512, - ..descriptor::RANDOM - }, - Descriptor { - name: "url_previews", - ..descriptor::RANDOM - }, - Descriptor { - name: "userdeviceid_metadata", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userdeviceid_token", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userdevicesessionid_uiaainfo", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userdevicetxnid_response", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userfilterid_filter", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_avatarurl", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_blurhash", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_devicelistversion", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_displayname", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_lastonetimekeyupdate", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_masterkeyid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_password", - ..descriptor::RANDOM - }, - Descriptor { - name: "userid_presenceid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_selfsigningkeyid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userid_usersigningkeyid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "useridprofilekey_value", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "openidtoken_expiresatuserid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "logintoken_expiresatuserid", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userroomid_highlightcount", - ..descriptor::RANDOM - }, - Descriptor { - name: "userroomid_invitestate", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userroomid_joined", - ..descriptor::RANDOM - }, - Descriptor { - name: "userroomid_leftstate", - ..descriptor::RANDOM - }, - Descriptor { - name: "userroomid_knockedstate", - ..descriptor::RANDOM_SMALL - }, - Descriptor { - name: "userroomid_notificationcount", - ..descriptor::RANDOM - }, -]; diff --git a/src/database/migrations.rs b/src/database/migrations.rs new file mode 100644 index 00000000..94989185 --- /dev/null +++ b/src/database/migrations.rs @@ -0,0 +1,640 @@ +use std::{ + collections::{HashMap, HashSet}, + fs::{self}, + io::Write, + mem::size_of, + sync::Arc, +}; + +use argon2::{password_hash::SaltString, PasswordHasher, PasswordVerifier}; +use itertools::Itertools; +use rand::thread_rng; +use ruma::{ + events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, + push::Ruleset, + EventId, OwnedRoomId, RoomId, UserId, +}; +use tracing::{debug, error, info, warn}; + +use super::KeyValueDatabase; +use crate::{services, utils, Config, Error, Result}; + +pub(crate) async fn migrations(db: &KeyValueDatabase, config: &Config) -> Result<()> { + // Matrix resource ownership is based on the server name; changing it + // requires recreating the database from scratch. + if services().users.count()? > 0 { + let conduit_user = + UserId::parse_with_server_name("conduit", &config.server_name).expect("@conduit:server_name is valid"); + + if !services().users.exists(&conduit_user)? { + error!("The {} server user does not exist, and the database is not new.", conduit_user); + return Err(Error::bad_database( + "Cannot reuse an existing database after changing the server name, please delete the old one first.", + )); + } + } + + // If the database has any data, perform data migrations before starting + // do not increment the db version if the user is not using sha256_media + let latest_database_version = if cfg!(feature = "sha256_media") { + 14 + } else { + 13 + }; + + if services().users.count()? > 0 { + // MIGRATIONS + if services().globals.database_version()? < 1 { + for (roomserverid, _) in db.roomserverids.iter() { + let mut parts = roomserverid.split(|&b| b == 0xFF); + let room_id = parts.next().expect("split always returns one element"); + let Some(servername) = parts.next() else { + error!("Migration: Invalid roomserverid in db."); + continue; + }; + let mut serverroomid = servername.to_vec(); + serverroomid.push(0xFF); + serverroomid.extend_from_slice(room_id); + + db.serverroomids.insert(&serverroomid, &[])?; + } + + services().globals.bump_database_version(1)?; + + warn!("Migration: 0 -> 1 finished"); + } + + if services().globals.database_version()? < 2 { + // We accidentally inserted hashed versions of "" into the db instead of just "" + for (userid, password) in db.userid_password.iter() { + let salt = SaltString::generate(thread_rng()); + let empty_pass = services() + .globals + .argon + .hash_password(b"", &salt) + .expect("our own password to be properly hashed"); + let empty_hashed_password = services() + .globals + .argon + .verify_password(&password, &empty_pass) + .is_ok(); + + if empty_hashed_password { + db.userid_password.insert(&userid, b"")?; + } + } + + services().globals.bump_database_version(2)?; + + warn!("Migration: 1 -> 2 finished"); + } + + if services().globals.database_version()? < 3 { + // Move media to filesystem + for (key, content) in db.mediaid_file.iter() { + if content.is_empty() { + continue; + } + + #[allow(deprecated)] + let path = services().globals.get_media_file(&key); + let mut file = fs::File::create(path)?; + file.write_all(&content)?; + db.mediaid_file.insert(&key, &[])?; + } + + services().globals.bump_database_version(3)?; + + warn!("Migration: 2 -> 3 finished"); + } + + if services().globals.database_version()? < 4 { + // Add federated users to services() as deactivated + for our_user in services().users.iter() { + let our_user = our_user?; + if services().users.is_deactivated(&our_user)? { + continue; + } + for room in services().rooms.state_cache.rooms_joined(&our_user) { + for user in services().rooms.state_cache.room_members(&room?) { + let user = user?; + if user.server_name() != config.server_name { + info!(?user, "Migration: creating user"); + services().users.create(&user, None)?; + } + } + } + } + + services().globals.bump_database_version(4)?; + + warn!("Migration: 3 -> 4 finished"); + } + + if services().globals.database_version()? < 5 { + // Upgrade user data store + for (roomuserdataid, _) in db.roomuserdataid_accountdata.iter() { + let mut parts = roomuserdataid.split(|&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let user_id = parts.next().unwrap(); + let event_type = roomuserdataid.rsplit(|&b| b == 0xFF).next().unwrap(); + + let mut key = room_id.to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id); + key.push(0xFF); + key.extend_from_slice(event_type); + + db.roomusertype_roomuserdataid + .insert(&key, &roomuserdataid)?; + } + + services().globals.bump_database_version(5)?; + + warn!("Migration: 4 -> 5 finished"); + } + + if services().globals.database_version()? < 6 { + // Set room member count + for (roomid, _) in db.roomid_shortstatehash.iter() { + let string = utils::string_from_bytes(&roomid).unwrap(); + let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); + services().rooms.state_cache.update_joined_count(room_id)?; + } + + services().globals.bump_database_version(6)?; + + warn!("Migration: 5 -> 6 finished"); + } + + if services().globals.database_version()? < 7 { + // Upgrade state store + let mut last_roomstates: HashMap = HashMap::new(); + let mut current_sstatehash: Option = None; + let mut current_room = None; + let mut current_state = HashSet::new(); + let mut counter = 0; + + let mut handle_state = |current_sstatehash: u64, + current_room: &RoomId, + current_state: HashSet<_>, + last_roomstates: &mut HashMap<_, _>| { + counter += 1; + let last_roomsstatehash = last_roomstates.get(current_room); + + let states_parents = last_roomsstatehash.map_or_else( + || Ok(Vec::new()), + |&last_roomsstatehash| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(last_roomsstatehash) + }, + )?; + + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew = current_state + .difference(&parent_stateinfo.1) + .copied() + .collect::>(); + + let statediffremoved = parent_stateinfo + .1 + .difference(¤t_state) + .copied() + .collect::>(); + + (statediffnew, statediffremoved) + } else { + (current_state, HashSet::new()) + }; + + services().rooms.state_compressor.save_state_from_diff( + current_sstatehash, + Arc::new(statediffnew), + Arc::new(statediffremoved), + 2, // every state change is 2 event changes on average + states_parents, + )?; + + /* + let mut tmp = services().rooms.load_shortstatehash_info(¤t_sstatehash)?; + let state = tmp.pop().unwrap(); + println!( + "{}\t{}{:?}: {:?} + {:?} - {:?}", + current_room, + " ".repeat(tmp.len()), + utils::u64_from_bytes(¤t_sstatehash).unwrap(), + tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), + state + .2 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>(), + state + .3 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>() + ); + */ + + Ok::<_, Error>(()) + }; + + for (k, seventid) in db.db.open_tree("stateid_shorteventid")?.iter() { + let sstatehash = utils::u64_from_bytes(&k[0..size_of::()]).expect("number of bytes is correct"); + let sstatekey = k[size_of::()..].to_vec(); + if Some(sstatehash) != current_sstatehash { + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_deref().unwrap(), + current_state, + &mut last_roomstates, + )?; + last_roomstates.insert(current_room.clone().unwrap(), current_sstatehash); + } + current_state = HashSet::new(); + current_sstatehash = Some(sstatehash); + + let event_id = db.shorteventid_eventid.get(&seventid).unwrap().unwrap(); + let string = utils::string_from_bytes(&event_id).unwrap(); + let event_id = <&EventId>::try_from(string.as_str()).unwrap(); + let pdu = services() + .rooms + .timeline + .get_pdu(event_id) + .unwrap() + .unwrap(); + + if Some(&pdu.room_id) != current_room.as_ref() { + current_room = Some(pdu.room_id.clone()); + } + } + + let mut val = sstatekey; + val.extend_from_slice(&seventid); + current_state.insert(val.try_into().expect("size is correct")); + } + + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_deref().unwrap(), + current_state, + &mut last_roomstates, + )?; + } + + services().globals.bump_database_version(7)?; + + warn!("Migration: 6 -> 7 finished"); + } + + if services().globals.database_version()? < 8 { + // Generate short room ids for all rooms + for (room_id, _) in db.roomid_shortstatehash.iter() { + let shortroomid = services().globals.next_count()?.to_be_bytes(); + db.roomid_shortroomid.insert(&room_id, &shortroomid)?; + info!("Migration: 8"); + } + // Update pduids db layout + let mut batch = db.pduid_pdu.iter().filter_map(|(key, v)| { + if !key.starts_with(b"!") { + return None; + } + let mut parts = key.splitn(2, |&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = db + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + + let mut new_key = short_room_id; + new_key.extend_from_slice(count); + + Some((new_key, v)) + }); + + db.pduid_pdu.insert_batch(&mut batch)?; + + let mut batch2 = db.eventid_pduid.iter().filter_map(|(k, value)| { + if !value.starts_with(b"!") { + return None; + } + let mut parts = value.splitn(2, |&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = db + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + + let mut new_value = short_room_id; + new_value.extend_from_slice(count); + + Some((k, new_value)) + }); + + db.eventid_pduid.insert_batch(&mut batch2)?; + + services().globals.bump_database_version(8)?; + + warn!("Migration: 7 -> 8 finished"); + } + + if services().globals.database_version()? < 9 { + // Update tokenids db layout + let mut iter = db + .tokenids + .iter() + .filter_map(|(key, _)| { + if !key.starts_with(b"!") { + return None; + } + let mut parts = key.splitn(4, |&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let word = parts.next().unwrap(); + let _pdu_id_room = parts.next().unwrap(); + let pdu_id_count = parts.next().unwrap(); + + let short_room_id = db + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + let mut new_key = short_room_id; + new_key.extend_from_slice(word); + new_key.push(0xFF); + new_key.extend_from_slice(pdu_id_count); + Some((new_key, Vec::new())) + }) + .peekable(); + + while iter.peek().is_some() { + db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; + debug!("Inserted smaller batch"); + } + + info!("Deleting starts"); + + let batch2: Vec<_> = db + .tokenids + .iter() + .filter_map(|(key, _)| { + if key.starts_with(b"!") { + Some(key) + } else { + None + } + }) + .collect(); + + for key in batch2 { + db.tokenids.remove(&key)?; + } + + services().globals.bump_database_version(9)?; + + warn!("Migration: 8 -> 9 finished"); + } + + if services().globals.database_version()? < 10 { + // Add other direction for shortstatekeys + for (statekey, shortstatekey) in db.statekey_shortstatekey.iter() { + db.shortstatekey_statekey + .insert(&shortstatekey, &statekey)?; + } + + // Force E2EE device list updates so we can send them over federation + for user_id in services().users.iter().filter_map(Result::ok) { + services().users.mark_device_key_update(&user_id)?; + } + + services().globals.bump_database_version(10)?; + + warn!("Migration: 9 -> 10 finished"); + } + + if services().globals.database_version()? < 11 { + db.db + .open_tree("userdevicesessionid_uiaarequest")? + .clear()?; + services().globals.bump_database_version(11)?; + + warn!("Migration: 10 -> 11 finished"); + } + + if services().globals.database_version()? < 12 { + for username in services().users.list_local_users()? { + let user = match UserId::parse_with_server_name(username.clone(), &config.server_name) { + Ok(u) => u, + Err(e) => { + warn!("Invalid username {username}: {e}"); + continue; + }, + }; + + let raw_rules_list = services() + .account_data + .get(None, &user, GlobalAccountDataEventType::PushRules.to_string().into()) + .unwrap() + .expect("Username is invalid"); + + let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); + let rules_list = &mut account_data.content.global; + + //content rule + { + let content_rule_transformation = [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; + + let rule = rules_list.content.get(content_rule_transformation[0]); + if rule.is_some() { + let mut rule = rule.unwrap().clone(); + content_rule_transformation[1].clone_into(&mut rule.rule_id); + rules_list + .content + .shift_remove(content_rule_transformation[0]); + rules_list.content.insert(rule); + } + } + + //underride rules + { + let underride_rule_transformation = [ + [".m.rules.call", ".m.rule.call"], + [".m.rules.room_one_to_one", ".m.rule.room_one_to_one"], + [".m.rules.encrypted_room_one_to_one", ".m.rule.encrypted_room_one_to_one"], + [".m.rules.message", ".m.rule.message"], + [".m.rules.encrypted", ".m.rule.encrypted"], + ]; + + for transformation in underride_rule_transformation { + let rule = rules_list.underride.get(transformation[0]); + if let Some(rule) = rule { + let mut rule = rule.clone(); + transformation[1].clone_into(&mut rule.rule_id); + rules_list.underride.shift_remove(transformation[0]); + rules_list.underride.insert(rule); + } + } + } + + services().account_data.update( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + } + + services().globals.bump_database_version(12)?; + + warn!("Migration: 11 -> 12 finished"); + } + + // This migration can be reused as-is anytime the server-default rules are + // updated. + if services().globals.database_version()? < 13 { + for username in services().users.list_local_users()? { + let user = match UserId::parse_with_server_name(username.clone(), &config.server_name) { + Ok(u) => u, + Err(e) => { + warn!("Invalid username {username}: {e}"); + continue; + }, + }; + + let raw_rules_list = services() + .account_data + .get(None, &user, GlobalAccountDataEventType::PushRules.to_string().into()) + .unwrap() + .expect("Username is invalid"); + + let mut account_data = serde_json::from_str::(raw_rules_list.get()).unwrap(); + + let user_default_rules = Ruleset::server_default(&user); + account_data + .content + .global + .update_with_server_default(user_default_rules); + + services().account_data.update( + None, + &user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; + } + + services().globals.bump_database_version(13)?; + + warn!("Migration: 12 -> 13 finished"); + } + + #[cfg(feature = "sha256_media")] + { + if services().globals.database_version()? < 14 && cfg!(feature = "sha256_media") { + warn!("sha256_media feature flag is enabled, migrating legacy base64 file names to sha256 file names"); + // Move old media files to new names + for (key, _) in db.mediaid_file.iter() { + let old_path = services().globals.get_media_file(&key); + debug!("Old file path: {old_path:?}"); + let path = services().globals.get_media_file_new(&key); + debug!("New file path: {path:?}"); + // move the file to the new location + if old_path.exists() { + tokio::fs::rename(&old_path, &path).await?; + } + } + + services().globals.bump_database_version(14)?; + + warn!("Migration: 13 -> 14 finished"); + } + } + + assert_eq!( + services().globals.database_version().unwrap(), + latest_database_version, + "Failed asserting local database version {} is equal to known latest conduwuit database version {}", + services().globals.database_version().unwrap(), + latest_database_version + ); + + { + let patterns = &config.forbidden_usernames; + if !patterns.is_empty() { + for user_id in services() + .users + .iter() + .filter_map(Result::ok) + .filter(|user| !services().users.is_deactivated(user).unwrap_or(true)) + .filter(|user| user.server_name() == config.server_name) + { + let matches = patterns.matches(user_id.localpart()); + if matches.matched_any() { + warn!( + "User {} matches the following forbidden username patterns: {}", + user_id.to_string(), + matches + .into_iter() + .map(|x| &patterns.patterns()[x]) + .join(", ") + ); + } + } + } + } + + { + let patterns = &config.forbidden_alias_names; + if !patterns.is_empty() { + for address in services().rooms.metadata.iter_ids() { + let room_id = address?; + let room_aliases = services().rooms.alias.local_aliases_for_room(&room_id); + for room_alias_result in room_aliases { + let room_alias = room_alias_result?; + let matches = patterns.matches(room_alias.alias()); + if matches.matched_any() { + warn!( + "Room with alias {} ({}) matches the following forbidden room name patterns: {}", + room_alias, + &room_id, + matches + .into_iter() + .map(|x| &patterns.patterns()[x]) + .join(", ") + ); + } + } + } + } + } + + info!( + "Loaded {} database with schema version {}", + config.database_backend, latest_database_version + ); + } else { + services() + .globals + .bump_database_version(latest_database_version)?; + + // Create the admin room and server user on first run + services().admin.create_admin_room().await?; + + warn!( + "Created new {} database with version {}", + config.database_backend, latest_database_version + ); + } + + Ok(()) +} diff --git a/src/database/mod.rs b/src/database/mod.rs index ffcefee9..6c656426 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,96 +1,586 @@ -#![type_length_limit = "3072"] +pub(crate) mod cork; +pub(crate) mod key_value; +pub(crate) mod kvengine; +pub(crate) mod kvtree; +mod migrations; -extern crate conduwuit_core as conduwuit; -extern crate rust_rocksdb as rocksdb; +#[cfg(feature = "rocksdb")] +pub(crate) mod rocksdb; -conduwuit::mod_ctor! {} -conduwuit::mod_dtor! {} -conduwuit::rustc_flags_capture! {} +#[cfg(feature = "sqlite")] +pub mod sqlite; -#[cfg(test)] -mod benches; -mod cork; -mod de; -mod deserialized; -mod engine; -mod handle; -pub mod keyval; -mod map; -pub mod maps; -mod pool; -mod ser; -mod stream; -#[cfg(test)] -mod tests; -pub(crate) mod util; -mod watchers; +#[cfg(any(feature = "sqlite", feature = "rocksdb"))] +pub(crate) mod watchers; -use std::{ops::Index, sync::Arc}; - -use conduwuit::{Result, Server, err}; - -pub use self::{ - de::{Ignore, IgnoreAll}, - deserialized::Deserialized, - handle::Handle, - keyval::{KeyVal, Slice, serialize_key, serialize_val}, - map::{Get, Map, Qry, compact}, - ser::{Cbor, Interfix, Json, SEP, Separator, serialize, serialize_to, serialize_to_vec}, +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fs::{self}, + path::Path, + sync::{Arc, Mutex, RwLock}, + time::Duration, }; -pub(crate) use self::{ - engine::{Engine, context::Context}, - util::or_else, -}; -use crate::maps::{Maps, MapsKey, MapsVal}; -pub struct Database { - maps: Maps, - pub db: Arc, - pub(crate) _ctx: Arc, +pub(crate) use cork::Cork; +pub(crate) use kvengine::KeyValueDatabaseEngine; +pub(crate) use kvtree::KvTree; +use lru_cache::LruCache; +use ruma::{ + events::{ + push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, GlobalAccountDataEvent, + GlobalAccountDataEventType, + }, + push::Ruleset, + CanonicalJsonValue, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, +}; +use serde::Deserialize; +#[cfg(unix)] +use tokio::signal::unix::{signal, SignalKind}; +use tokio::time::{interval, Instant}; +use tracing::{debug, error, warn}; + +use crate::{ + database::migrations::migrations, service::rooms::timeline::PduCount, services, Config, Error, Result, Services, + SERVICES, +}; + +pub struct KeyValueDatabase { + db: Arc, + + //pub globals: globals::Globals, + pub(super) global: Arc, + pub(super) server_signingkeys: Arc, + + pub(super) roomid_inviteviaservers: Arc, + + //pub users: users::Users, + pub(super) userid_password: Arc, + pub(super) userid_displayname: Arc, + pub(super) userid_avatarurl: Arc, + pub(super) userid_blurhash: Arc, + pub(super) userdeviceid_token: Arc, + pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists + pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 + pub(super) token_userdeviceid: Arc, + + pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId + pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count + pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count + pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) + pub(super) userid_masterkeyid: Arc, + pub(super) userid_selfsigningkeyid: Arc, + pub(super) userid_usersigningkeyid: Arc, + + pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId + pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count + pub(super) userid_presenceid: Arc, // UserId => Count + pub(super) presenceid_presence: Arc, // Count + UserId => Presence + + //pub uiaa: uiaa::Uiaa, + pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication + pub(super) userdevicesessionid_uiaarequest: + RwLock>, + + //pub edus: RoomEdus, + pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId + pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count + pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + + //pub rooms: rooms::Rooms, + pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count + pub(super) eventid_pduid: Arc, + pub(super) roomid_pduleaves: Arc, + pub(super) alias_roomid: Arc, + pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count + pub(super) publicroomids: Arc, + + pub(super) threadid_userids: Arc, // ThreadId = RoomId + Count + + pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount + + /// Participating servers in a room. + pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName + pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId + + pub(super) userroomid_joined: Arc, + pub(super) roomuserid_joined: Arc, + pub(super) roomid_joinedcount: Arc, + pub(super) roomid_invitedcount: Arc, + pub(super) roomuseroncejoinedids: Arc, + pub(super) userroomid_invitestate: Arc, // InviteState = Vec> + pub(super) roomuserid_invitecount: Arc, // InviteCount = Count + pub(super) userroomid_leftstate: Arc, + pub(super) roomuserid_leftcount: Arc, + + pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled + + pub(super) bannedroomids: Arc, // Rooms where local users are not allowed to join + + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + + pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 + pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 + pub(super) roomuserid_lastnotificationread: Arc, // LastNotificationRead = u64 + + /// Remember the current state hash of a room. + pub(super) roomid_shortstatehash: Arc, + pub(super) roomsynctoken_shortstatehash: Arc, + /// Remember the state hash at events in the past. + pub(super) shorteventid_shortstatehash: Arc, + pub(super) statekey_shortstatekey: Arc, /* StateKey = EventType + StateKey, ShortStateKey = + * Count */ + pub(super) shortstatekey_statekey: Arc, + + pub(super) roomid_shortroomid: Arc, + + pub(super) shorteventid_eventid: Arc, + pub(super) eventid_shorteventid: Arc, + + pub(super) statehash_shortstatehash: Arc, + pub(super) shortstatehash_statediff: Arc, /* StateDiff = parent (or 0) + + * (shortstatekey+shorteventid++) + 0_u64 + + * (shortstatekey+shorteventid--) */ + + pub(super) shorteventid_authchain: Arc, + + /// RoomId + EventId -> outlier PDU. + /// Any pdu that has passed the steps 1-8 in the incoming event + /// /federation/send/txn. + pub(super) eventid_outlierpdu: Arc, + pub(super) softfailedeventids: Arc, + + /// ShortEventId + ShortEventId -> (). + pub(super) tofrom_relation: Arc, + /// RoomId + EventId -> Parent PDU EventId. + pub(super) referencedevents: Arc, + + //pub account_data: account_data::AccountData, + pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type + pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type + + //pub media: media::Media, + pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + pub(super) url_previews: Arc, + pub(super) mediaid_user: Arc, + //pub key_backups: key_backups::KeyBackups, + pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) + pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) + pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId + + //pub transaction_ids: transaction_ids::TransactionIds, + pub(super) userdevicetxnid_response: Arc, /* Response can be empty (/sendToDevice) or the event id + * (/send) */ + //pub sending: sending::Sending, + pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync + pub(super) servernameevent_data: Arc, /* ServernameEvent = (+ / $)SenderKey / ServerName / UserId + + * PduId / Id (for edus), Data = EDU content */ + pub(super) servercurrentevent_data: Arc, /* ServerCurrentEvents = (+ / $)ServerName / UserId + PduId + * / Id (for edus), Data = EDU content */ + + //pub appservice: appservice::Appservice, + pub(super) id_appserviceregistrations: Arc, + + //pub pusher: pusher::PushData, + pub(super) senderkey_pusher: Arc, + + pub(super) auth_chain_cache: Mutex, Arc<[u64]>>>, + pub(super) our_real_users_cache: RwLock>>>, + pub(super) appservice_in_room_cache: RwLock>>, + pub(super) lasttimelinecount_cache: Mutex>, } -impl Database { +#[derive(Deserialize)] +struct CheckForUpdatesResponseEntry { + id: u64, + date: String, + message: String, +} +#[derive(Deserialize)] +struct CheckForUpdatesResponse { + updates: Vec, +} + +impl KeyValueDatabase { /// Load an existing database or create a new one. - pub async fn open(server: &Arc) -> Result> { - let ctx = Context::new(server)?; - let db = Engine::open(ctx.clone(), maps::MAPS).await?; - Ok(Arc::new(Self { - maps: maps::open(&db)?, - db: db.clone(), - _ctx: ctx, - })) + #[allow(clippy::too_many_lines)] + pub async fn load_or_create( + config: Config, + tracing_reload_handler: tracing_subscriber::reload::Handle< + tracing_subscriber::EnvFilter, + tracing_subscriber::Registry, + >, + ) -> Result<()> { + Self::check_db_setup(&config)?; + + if !Path::new(&config.database_path).exists() { + debug!("Database path does not exist, assuming this is a new setup and creating it"); + fs::create_dir_all(&config.database_path).map_err(|e| { + error!("Failed to create database path: {e}"); + Error::bad_config( + "Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please \ + create the database folder yourself or allow conduwuit the permissions to create directories and \ + files.", + ) + })?; + } + + let builder: Arc = match &*config.database_backend { + "sqlite" => { + debug!("Got sqlite database backend"); + #[cfg(not(feature = "sqlite"))] + return Err(Error::bad_config("Database backend not found.")); + #[cfg(feature = "sqlite")] + Arc::new(Arc::::open(&config)?) + }, + "rocksdb" => { + debug!("Got rocksdb database backend"); + #[cfg(not(feature = "rocksdb"))] + return Err(Error::bad_config("Database backend not found.")); + #[cfg(feature = "rocksdb")] + Arc::new(Arc::::open(&config)?) + }, + _ => { + return Err(Error::bad_config( + "Database backend not found. sqlite (not recommended) and rocksdb are the only supported backends.", + )); + }, + }; + + let db_raw = Box::new(Self { + db: builder.clone(), + userid_password: builder.open_tree("userid_password")?, + userid_displayname: builder.open_tree("userid_displayname")?, + userid_avatarurl: builder.open_tree("userid_avatarurl")?, + userid_blurhash: builder.open_tree("userid_blurhash")?, + userdeviceid_token: builder.open_tree("userdeviceid_token")?, + userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, + userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, + token_userdeviceid: builder.open_tree("token_userdeviceid")?, + onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, + userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, + keychangeid_userid: builder.open_tree("keychangeid_userid")?, + keyid_key: builder.open_tree("keyid_key")?, + userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + userfilterid_filter: builder.open_tree("userfilterid_filter")?, + todeviceid_events: builder.open_tree("todeviceid_events")?, + userid_presenceid: builder.open_tree("userid_presenceid")?, + presenceid_presence: builder.open_tree("presenceid_presence")?, + + userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, + userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), + readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, + roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt + roomuserid_lastprivatereadupdate: builder.open_tree("roomuserid_lastprivatereadupdate")?, + pduid_pdu: builder.open_tree("pduid_pdu")?, + eventid_pduid: builder.open_tree("eventid_pduid")?, + roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, + + alias_roomid: builder.open_tree("alias_roomid")?, + aliasid_alias: builder.open_tree("aliasid_alias")?, + publicroomids: builder.open_tree("publicroomids")?, + + threadid_userids: builder.open_tree("threadid_userids")?, + + tokenids: builder.open_tree("tokenids")?, + + roomserverids: builder.open_tree("roomserverids")?, + serverroomids: builder.open_tree("serverroomids")?, + userroomid_joined: builder.open_tree("userroomid_joined")?, + roomuserid_joined: builder.open_tree("roomuserid_joined")?, + roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, + roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, + roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, + userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, + roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, + userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + + disabledroomids: builder.open_tree("disabledroomids")?, + + bannedroomids: builder.open_tree("bannedroomids")?, + + lazyloadedids: builder.open_tree("lazyloadedids")?, + + userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, + roomuserid_lastnotificationread: builder.open_tree("userroomid_highlightcount")?, + + statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, + shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, + + shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, + + roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, + + shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, + eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, + shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, + roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, + roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, + statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, + + eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, + softfailedeventids: builder.open_tree("softfailedeventids")?, + + tofrom_relation: builder.open_tree("tofrom_relation")?, + referencedevents: builder.open_tree("referencedevents")?, + roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, + roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, + mediaid_file: builder.open_tree("mediaid_file")?, + url_previews: builder.open_tree("url_previews")?, + mediaid_user: builder.open_tree("mediaid_user")?, + backupid_algorithm: builder.open_tree("backupid_algorithm")?, + backupid_etag: builder.open_tree("backupid_etag")?, + backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, + userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, + servername_educount: builder.open_tree("servername_educount")?, + servernameevent_data: builder.open_tree("servernameevent_data")?, + servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, + id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, + senderkey_pusher: builder.open_tree("senderkey_pusher")?, + global: builder.open_tree("global")?, + server_signingkeys: builder.open_tree("server_signingkeys")?, + + roomid_inviteviaservers: builder.open_tree("roomid_inviteviaservers")?, + + auth_chain_cache: Mutex::new(LruCache::new( + (f64::from(config.auth_chain_cache_capacity) * config.conduit_cache_capacity_modifier) as usize, + )), + our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + lasttimelinecount_cache: Mutex::new(HashMap::new()), + }); + + let db = Box::leak(db_raw); + + let services_raw = Box::new(Services::build(db, &config, tracing_reload_handler)?); + + // This is the first and only time we initialize the SERVICE static + *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); + + migrations(db, &config).await?; + + services().admin.start_handler(); + + // Set emergency access for the conduit user + match set_emergency_access() { + Ok(pwd_set) => { + if pwd_set { + warn!( + "The Conduit account emergency password is set! Please unset it as soon as you finish admin \ + account recovery!" + ); + services() + .admin + .send_message(RoomMessageEventContent::text_plain( + "The Conduit account emergency password is set! Please unset it as soon as you finish \ + admin account recovery!", + )); + } + }, + Err(e) => { + error!("Could not set the configured emergency password for the conduit user: {}", e); + }, + }; + + services().sending.start_handler(); + + if config.allow_local_presence { + services().presence.start_handler(); + } + + Self::start_cleanup_task().await; + if services().globals.allow_check_for_updates() { + Self::start_check_for_updates_task().await; + } + + Ok(()) } - #[inline] - pub fn get(&self, name: &str) -> Result<&Arc> { - self.maps - .get(name) - .ok_or_else(|| err!(Request(NotFound("column not found")))) + fn check_db_setup(config: &Config) -> Result<()> { + let path = Path::new(&config.database_path); + + let sqlite_exists = path.join("conduit.db").exists(); + let rocksdb_exists = path.join("IDENTITY").exists(); + + let mut count = 0; + + if sqlite_exists { + count += 1; + } + + if rocksdb_exists { + count += 1; + } + + if count > 1 { + warn!("Multiple databases at database_path detected"); + return Ok(()); + } + + if sqlite_exists && config.database_backend != "sqlite" { + return Err(Error::bad_config( + "Found sqlite at database_path, but is not specified in config.", + )); + } + + if rocksdb_exists && config.database_backend != "rocksdb" { + return Err(Error::bad_config( + "Found rocksdb at database_path, but is not specified in config.", + )); + } + + Ok(()) } - #[inline] - pub fn iter(&self) -> impl Iterator + Send + '_ { - self.maps.iter() + #[tracing::instrument] + async fn start_check_for_updates_task() { + let timer_interval = Duration::from_secs(7200); // 2 hours + + tokio::spawn(async move { + let mut i = interval(timer_interval); + + loop { + tokio::select! { + _ = i.tick() => { + debug!(target: "start_check_for_updates_task", "Timer ticked"); + }, + } + + _ = Self::try_handle_updates().await; + } + }); } - #[inline] - pub fn keys(&self) -> impl Iterator + Send + '_ { self.maps.keys() } + async fn try_handle_updates() -> Result<()> { + let response = services() + .globals + .client + .default + .get("https://pupbrain.dev/check-for-updates/stable") + .send() + .await?; - #[inline] - #[must_use] - pub fn is_read_only(&self) -> bool { self.db.is_read_only() } + let response = serde_json::from_str::(&response.text().await?).map_err(|e| { + error!("Bad check for updates response: {e}"); + Error::BadServerResponse("Bad version check response") + })?; - #[inline] - #[must_use] - pub fn is_secondary(&self) -> bool { self.db.is_secondary() } -} + let mut last_update_id = services().globals.last_check_for_updates_id()?; + for update in response.updates { + last_update_id = last_update_id.max(update.id); + if update.id > services().globals.last_check_for_updates_id()? { + error!("{}", update.message); + services() + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "@room: the following is a message from the conduwuit puppy. it was sent on '{}':\n\n{}", + update.date, update.message + ))); + } + } + services() + .globals + .update_check_for_updates_id(last_update_id)?; -impl Index<&str> for Database { - type Output = Arc; + Ok(()) + } - fn index(&self, name: &str) -> &Self::Output { - self.maps - .get(name) - .expect("column in database does not exist") + #[tracing::instrument] + async fn start_cleanup_task() { + let timer_interval = Duration::from_secs(u64::from(services().globals.config.cleanup_second_interval)); + + tokio::spawn(async move { + let mut i = interval(timer_interval); + + #[cfg(unix)] + let mut hangup = signal(SignalKind::hangup()).expect("Failed to register SIGHUP signal receiver"); + #[cfg(unix)] + let mut ctrl_c = signal(SignalKind::interrupt()).expect("Failed to register SIGINT signal receiver"); + #[cfg(unix)] + let mut terminate = signal(SignalKind::terminate()).expect("Failed to register SIGTERM signal receiver"); + + loop { + #[cfg(unix)] + tokio::select! { + _ = i.tick() => { + debug!(target: "database-cleanup", "Timer ticked"); + } + _ = hangup.recv() => { + debug!(target: "database-cleanup","Received SIGHUP"); + } + _ = ctrl_c.recv() => { + debug!(target: "database-cleanup", "Received Ctrl+C"); + } + _ = terminate.recv() => { + debug!(target: "database-cleanup","Received SIGTERM"); + } + } + + #[cfg(not(unix))] + { + i.tick().await; + debug!(target: "database-cleanup", "Timer ticked") + } + + Self::perform_cleanup(); + } + }); + } + + fn perform_cleanup() { + if !services().globals.config.rocksdb_periodic_cleanup { + return; + } + + let start = Instant::now(); + if let Err(e) = services().globals.cleanup() { + error!(target: "database-cleanup", "Ran into an error during cleanup: {}", e); + } else { + debug!(target: "database-cleanup", "Finished cleanup in {:#?}.", start.elapsed()); + } + } + + pub fn flush(&self) -> Result<()> { + let start = std::time::Instant::now(); + + let res = self.db.flush(); + + debug!("flush: took {:?}", start.elapsed()); + + res } } + +/// Sets the emergency password and push rules for the @conduit account in case +/// emergency password is set +fn set_emergency_access() -> Result { + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is a valid UserId"); + + services() + .users + .set_password(&conduit_user, services().globals.emergency_password().as_deref())?; + + let (ruleset, res) = match services().globals.emergency_password() { + Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), + None => (Ruleset::new(), Ok(false)), + }; + + services().account_data.update( + None, + &conduit_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(&GlobalAccountDataEvent { + content: PushRulesEventContent { + global: ruleset, + }, + }) + .expect("to json value always works"), + )?; + + res +} diff --git a/src/database/pool.rs b/src/database/pool.rs deleted file mode 100644 index 0fa742d1..00000000 --- a/src/database/pool.rs +++ /dev/null @@ -1,449 +0,0 @@ -mod configure; - -use std::{ - mem::take, - sync::{ - Arc, Mutex, - atomic::{AtomicUsize, Ordering}, - }, - thread, - thread::JoinHandle, -}; - -use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; -use conduwuit::{ - Error, Result, Server, debug, err, error, implement, - result::DebugInspect, - smallvec::SmallVec, - trace, - utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, -}; -use futures::{TryFutureExt, channel::oneshot}; -use oneshot::Sender as ResultSender; -use rocksdb::Direction; - -use self::configure::configure; -use crate::{Handle, Map, keyval::KeyBuf, stream}; - -/// Frontend thread-pool. Operating system threads are used to make database -/// requests which are not cached. These thread-blocking requests are offloaded -/// from the tokio async workers and executed on this threadpool. -pub(crate) struct Pool { - server: Arc, - queues: Vec>, - workers: Mutex>>, - topology: Vec, - busy: AtomicUsize, - queued_max: AtomicUsize, -} - -/// Operations which can be submitted to the pool. -pub(crate) enum Cmd { - Get(Get), - Iter(Seek), -} - -/// Multi-point-query -pub(crate) struct Get { - pub(crate) map: Arc, - pub(crate) key: BatchQuery<'static>, - pub(crate) res: Option>>, -} - -/// Iterator-seek. -/// Note: only initial seek is supported at this time on the assumption rocksdb -/// prefetching prevents mid-iteration polls from blocking on I/O. -pub(crate) struct Seek { - pub(crate) map: Arc, - pub(crate) state: stream::State<'static>, - pub(crate) dir: Direction, - pub(crate) key: Option, - pub(crate) res: Option>>, -} - -pub(crate) type BatchQuery<'a> = SmallVec<[KeyBuf; BATCH_INLINE]>; -pub(crate) type BatchResult<'a> = SmallVec<[ResultHandle<'a>; BATCH_INLINE]>; -pub(crate) type ResultHandle<'a> = Result>; - -const WORKER_LIMIT: (usize, usize) = (1, 1024); -const QUEUE_LIMIT: (usize, usize) = (1, 4096); -const BATCH_INLINE: usize = 1; - -const WORKER_STACK_SIZE: usize = 1_048_576; -const WORKER_NAME: &str = "conduwuit:db"; - -#[implement(Pool)] -pub(crate) fn new(server: &Arc) -> Result> { - const CHAN_SCHED: (QueueStrategy, QueueStrategy) = (QueueStrategy::Fifo, QueueStrategy::Lifo); - - let (total_workers, queue_sizes, topology) = configure(server); - - let (senders, receivers): (Vec<_>, Vec<_>) = queue_sizes - .into_iter() - .map(|cap| async_channel::bounded_with_queue_strategy(cap, CHAN_SCHED)) - .unzip(); - - let pool = Arc::new(Self { - server: server.clone(), - queues: senders, - workers: Vec::new().into(), - topology, - busy: AtomicUsize::default(), - queued_max: AtomicUsize::default(), - }); - - pool.spawn_until(&receivers, total_workers)?; - - Ok(pool) -} - -impl Drop for Pool { - fn drop(&mut self) { - self.close(); - - debug_assert!( - self.queues.iter().all(Sender::is_empty), - "channel must should not have requests queued on drop" - ); - debug_assert!( - self.queues.iter().all(Sender::is_closed), - "channel should be closed on drop" - ); - } -} - -#[implement(Pool)] -#[tracing::instrument(skip_all)] -pub(crate) fn close(&self) { - let workers = take(&mut *self.workers.lock().expect("locked")); - - let senders = self.queues.iter().map(Sender::sender_count).sum::(); - - let receivers = self - .queues - .iter() - .map(Sender::receiver_count) - .sum::(); - - for queue in &self.queues { - queue.close(); - } - - if workers.is_empty() { - return; - } - - debug!( - senders, - receivers, - queues = self.queues.len(), - workers = workers.len(), - "Closing pool. Waiting for workers to join..." - ); - - workers - .into_iter() - .map(JoinHandle::join) - .map(|result| result.map_err(Error::from_panic)) - .enumerate() - .for_each(|(id, result)| match result { - | Ok(()) => trace!(?id, "worker joined"), - | Err(error) => error!(?id, "worker joined with error: {error}"), - }); -} - -#[implement(Pool)] -fn spawn_until(self: &Arc, recv: &[Receiver], count: usize) -> Result { - let mut workers = self.workers.lock().expect("locked"); - while workers.len() < count { - self.clone().spawn_one(&mut workers, recv)?; - } - - Ok(()) -} - -#[implement(Pool)] -#[tracing::instrument( - name = "spawn", - level = "trace", - skip_all, - fields(id = %workers.len()) -)] -fn spawn_one( - self: Arc, - workers: &mut Vec>, - recv: &[Receiver], -) -> Result { - debug_assert!(!self.queues.is_empty(), "Must have at least one queue"); - debug_assert!(!recv.is_empty(), "Must have at least one receiver"); - - let id = workers.len(); - let group = id.overflowing_rem(self.queues.len()).0; - let recv = recv[group].clone(); - - let handle = thread::Builder::new() - .name(WORKER_NAME.into()) - .stack_size(WORKER_STACK_SIZE) - .spawn(move || self.worker(id, recv))?; - - workers.push(handle); - - Ok(()) -} - -#[implement(Pool)] -#[tracing::instrument(level = "trace", name = "get", skip(self, cmd))] -pub(crate) async fn execute_get(self: &Arc, mut cmd: Get) -> Result> { - let (send, recv) = oneshot::channel(); - _ = cmd.res.insert(send); - - let queue = self.select_queue(); - self.execute(queue, Cmd::Get(cmd)) - .and_then(move |()| { - recv.map_ok(into_recv_get) - .map_err(|e| err!(error!("recv failed {e:?}"))) - }) - .await -} - -#[implement(Pool)] -#[tracing::instrument(level = "trace", name = "iter", skip(self, cmd))] -pub(crate) async fn execute_iter(self: &Arc, mut cmd: Seek) -> Result> { - let (send, recv) = oneshot::channel(); - _ = cmd.res.insert(send); - - let queue = self.select_queue(); - self.execute(queue, Cmd::Iter(cmd)) - .and_then(|()| { - recv.map_ok(into_recv_seek) - .map_err(|e| err!(error!("recv failed {e:?}"))) - }) - .await -} - -#[implement(Pool)] -fn select_queue(&self) -> &Sender { - let core_id = get_affinity().next().unwrap_or(0); - let chan_id = self.topology[core_id]; - self.queues.get(chan_id).unwrap_or_else(|| &self.queues[0]) -} - -#[implement(Pool)] -#[tracing::instrument( - level = "trace", - name = "execute", - skip(self, cmd), - fields( - task = ?tokio::task::try_id(), - receivers = queue.receiver_count(), - queued = queue.len(), - queued_max = self.queued_max.load(Ordering::Relaxed), - ), -)] -async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { - if cfg!(debug_assertions) { - self.queued_max.fetch_max(queue.len(), Ordering::Relaxed); - } - - queue - .send(cmd) - .await - .map_err(|e| err!(error!("send failed {e:?}"))) -} - -#[implement(Pool)] -#[tracing::instrument( - parent = None, - level = "debug", - skip(self, recv), - fields( - tid = ?thread::current().id(), - ), -)] -fn worker(self: Arc, id: usize, recv: Receiver) { - self.worker_init(id); - self.worker_loop(&recv); -} - -#[implement(Pool)] -fn worker_init(&self, id: usize) { - let group = id.overflowing_rem(self.queues.len()).0; - let affinity = self - .topology - .iter() - .enumerate() - .filter(|_| self.queues.len() > 1) - .filter(|_| self.server.config.db_pool_affinity) - .filter_map(|(core_id, &queue_id)| (group == queue_id).then_some(core_id)) - .filter_map(nth_core_available); - - // affinity is empty (no-op) if there's only one queue - set_affinity(affinity.clone()); - - #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] - if affinity.clone().count() == 1 && conduwuit::alloc::je::is_affine_arena() { - use conduwuit::{ - alloc::je::this_thread::{arena_id, set_arena}, - result::LogDebugErr, - }; - - let id = affinity.clone().next().expect("at least one id"); - - if let Ok(arena) = arena_id() { - if arena != id { - set_arena(id).log_debug_err().ok(); - } - } - } - - debug!( - ?group, - affinity = ?affinity.collect::>(), - "worker ready" - ); -} - -#[implement(Pool)] -fn worker_loop(self: &Arc, recv: &Receiver) { - // initial +1 needed prior to entering wait - self.busy.fetch_add(1, Ordering::Relaxed); - - while let Ok(cmd) = self.worker_wait(recv) { - self.worker_handle(cmd); - } -} - -#[implement(Pool)] -#[tracing::instrument( - name = "wait", - level = "trace", - skip_all, - fields( - receivers = recv.receiver_count(), - queued = recv.len(), - busy = self.busy.fetch_sub(1, Ordering::Relaxed) - 1, - ), -)] -fn worker_wait(self: &Arc, recv: &Receiver) -> Result { - recv.recv_blocking().debug_inspect(|_| { - self.busy.fetch_add(1, Ordering::Relaxed); - }) -} - -#[implement(Pool)] -fn worker_handle(self: &Arc, cmd: Cmd) { - match cmd { - | Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd), - | Cmd::Get(cmd) => self.handle_batch(cmd), - | Cmd::Iter(cmd) => self.handle_iter(cmd), - } -} - -#[implement(Pool)] -#[tracing::instrument( - name = "iter", - level = "trace", - skip_all, - fields(%cmd.map), -)] -fn handle_iter(&self, mut cmd: Seek) { - let chan = cmd.res.take().expect("missing result channel"); - - if chan.is_canceled() { - return; - } - - let from = cmd.key.as_deref(); - - let result = match cmd.dir { - | Direction::Forward => cmd.state.init_fwd(from), - | Direction::Reverse => cmd.state.init_rev(from), - }; - - let chan_result = chan.send(into_send_seek(result)); - - let _chan_sent = chan_result.is_ok(); -} - -#[implement(Pool)] -#[tracing::instrument( - name = "batch", - level = "trace", - skip_all, - fields( - %cmd.map, - keys = %cmd.key.len(), - ), -)] -fn handle_batch(self: &Arc, mut cmd: Get) { - debug_assert!(cmd.key.len() > 1, "should have more than one key"); - debug_assert!(!cmd.key.iter().any(SmallVec::is_empty), "querying for empty key"); - - let chan = cmd.res.take().expect("missing result channel"); - - if chan.is_canceled() { - return; - } - - let keys = cmd.key.iter(); - - let result: SmallVec<_> = cmd.map.get_batch_blocking(keys).collect(); - - let chan_result = chan.send(into_send_get(result)); - - let _chan_sent = chan_result.is_ok(); -} - -#[implement(Pool)] -#[tracing::instrument( - name = "get", - level = "trace", - skip_all, - fields(%cmd.map), -)] -fn handle_get(&self, mut cmd: Get) { - debug_assert!(!cmd.key[0].is_empty(), "querying for empty key"); - - // Obtain the result channel. - let chan = cmd.res.take().expect("missing result channel"); - - // It is worth checking if the future was dropped while the command was queued - // so we can bail without paying for any query. - if chan.is_canceled() { - return; - } - - // Perform the actual database query. We reuse our database::Map interface but - // limited to the blocking calls, rather than creating another surface directly - // with rocksdb here. - let result = cmd.map.get_blocking(&cmd.key[0]); - - // Send the result back to the submitter. - let chan_result = chan.send(into_send_get([result].into())); - - // If the future was dropped during the query this will fail acceptably. - let _chan_sent = chan_result.is_ok(); -} - -fn into_send_get(result: BatchResult<'_>) -> BatchResult<'static> { - // SAFETY: Necessary to send the Handle (rust_rocksdb::PinnableSlice) through - // the channel. The lifetime on the handle is a device by rust-rocksdb to - // associate a database lifetime with its assets. The Handle must be dropped - // before the database is dropped. - unsafe { std::mem::transmute(result) } -} - -fn into_recv_get<'a>(result: BatchResult<'static>) -> BatchResult<'a> { - // SAFETY: This is to receive the Handle from the channel. - unsafe { std::mem::transmute(result) } -} - -pub(crate) fn into_send_seek(result: stream::State<'_>) -> stream::State<'static> { - // SAFETY: Necessary to send the State through the channel; see above. - unsafe { std::mem::transmute(result) } -} - -fn into_recv_seek(result: stream::State<'static>) -> stream::State<'_> { - // SAFETY: This is to receive the State from the channel; see above. - unsafe { std::mem::transmute(result) } -} diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs deleted file mode 100644 index 92dda56e..00000000 --- a/src/database/pool/configure.rs +++ /dev/null @@ -1,151 +0,0 @@ -use std::{path::PathBuf, sync::Arc}; - -use conduwuit::{ - Server, debug, debug_info, expected, is_equal_to, - utils::{ - math::usize_from_f64, - result::LogDebugErr, - stream, - stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, - sys::{compute::is_core_available, storage}, - }, -}; - -use super::{QUEUE_LIMIT, WORKER_LIMIT}; - -pub(super) fn configure(server: &Arc) -> (usize, Vec, Vec) { - let config = &server.config; - - // This finds the block device and gathers all the properties we need. - let path: PathBuf = config.database_path.clone(); - let device_name = storage::name_from_path(&path).log_debug_err().ok(); - let device_prop = storage::parallelism(&path); - - // The default worker count is masked-on if we didn't find better information. - let default_worker_count = device_prop.mq.is_empty().then_some(config.db_pool_workers); - - // Determine the worker groupings. Each indice represents a hardware queue and - // contains the number of workers which will service it. - let worker_counts: Vec<_> = device_prop - .mq - .iter() - .filter(|mq| mq.cpu_list.iter().copied().any(is_core_available)) - .map(|mq| { - let shares = mq - .cpu_list - .iter() - .filter(|&&id| is_core_available(id)) - .count() - .max(1); - - let limit = config.db_pool_workers_limit.saturating_mul(shares); - - let limit = device_prop.nr_requests.map_or(limit, |nr| nr.min(limit)); - - mq.nr_tags.unwrap_or(WORKER_LIMIT.0).min(limit) - }) - .chain(default_worker_count) - .collect(); - - // Determine our software queue size for each hardware queue. This is the mpmc - // between the tokio worker and the pool worker. - let queue_sizes: Vec<_> = worker_counts - .iter() - .map(|worker_count| { - worker_count - .saturating_mul(config.db_pool_queue_mult) - .clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1) - }) - .collect(); - - // Determine the CPU affinities of each hardware queue. Each indice is a cpu and - // each value is the associated hardware queue. There is a little shiftiness - // going on because cpu's which are not available to the process are filtered - // out, similar to the worker_counts. - let topology = device_prop - .mq - .iter() - .fold(vec![0; 128], |mut topology, mq| { - mq.cpu_list - .iter() - .filter(|&&id| is_core_available(id)) - .for_each(|&id| { - topology[id] = mq.id; - }); - - topology - }); - - // Regardless of the capacity of all queues we establish some limit on the total - // number of workers; this is hopefully hinted by nr_requests. - let max_workers = device_prop - .mq - .iter() - .filter_map(|mq| mq.nr_tags) - .chain(default_worker_count) - .fold(0_usize, usize::saturating_add) - .clamp(WORKER_LIMIT.0, WORKER_LIMIT.1); - - // Determine the final worker count which we'll be spawning. - let total_workers = worker_counts - .iter() - .sum::() - .clamp(WORKER_LIMIT.0, max_workers); - - // After computing all of the above we can update the global automatic stream - // width, hopefully with a better value tailored to this system. - if config.stream_width_scale > 0.0 { - let num_queues = queue_sizes.len().max(1); - update_stream_width(server, num_queues, total_workers); - } - - debug_info!( - device_name = ?device_name - .as_deref() - .unwrap_or("None"), - ?worker_counts, - ?queue_sizes, - ?total_workers, - stream_width = ?stream::automatic_width(), - "Frontend topology", - ); - - assert!(total_workers > 0, "some workers expected"); - assert!(!queue_sizes.is_empty(), "some queues expected"); - assert!( - !queue_sizes.iter().copied().any(is_equal_to!(0)), - "positive queue sizes expected" - ); - - (total_workers, queue_sizes, topology) -} - -#[allow(clippy::as_conversions, clippy::cast_precision_loss)] -fn update_stream_width(server: &Arc, num_queues: usize, total_workers: usize) { - let config = &server.config; - let scale: f64 = config.stream_width_scale.min(100.0).into(); - - let req_width = expected!(total_workers / num_queues).next_multiple_of(2); - let req_width = req_width as f64; - let req_width = usize_from_f64(req_width * scale) - .expect("failed to convert f64 to usize") - .clamp(WIDTH_LIMIT.0, WIDTH_LIMIT.1); - - let req_amp = config.stream_amplification as f64; - let req_amp = usize_from_f64(req_amp * scale) - .expect("failed to convert f64 to usize") - .clamp(AMPLIFICATION_LIMIT.0, AMPLIFICATION_LIMIT.1); - - let (old_width, new_width) = stream::set_width(req_width); - let (old_amp, new_amp) = stream::set_amplification(req_amp); - debug!( - scale = ?config.stream_width_scale, - ?num_queues, - ?req_width, - ?old_width, - ?new_width, - ?old_amp, - ?new_amp, - "Updated global stream width" - ); -} diff --git a/src/database/rocksdb/kvtree.rs b/src/database/rocksdb/kvtree.rs new file mode 100644 index 00000000..1d9adaa4 --- /dev/null +++ b/src/database/rocksdb/kvtree.rs @@ -0,0 +1,211 @@ +use std::{future::Future, pin::Pin, sync::Arc}; + +use rust_rocksdb::WriteBatchWithTransaction; + +use super::{watchers::Watchers, Engine, KeyValueDatabaseEngine, KvTree}; +use crate::{utils, Result}; + +pub(super) struct RocksDbEngineTree<'a> { + pub db: Arc, + pub name: &'a str, + pub watchers: Watchers, +} + +impl RocksDbEngineTree<'_> { + fn cf(&self) -> Arc> { self.db.rocks.cf_handle(self.name).unwrap() } +} + +impl KvTree for RocksDbEngineTree<'_> { + fn get(&self, key: &[u8]) -> Result>> { + let mut readoptions = rust_rocksdb::ReadOptions::default(); + readoptions.set_total_order_seek(true); + + Ok(self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?) + } + + fn multi_get(&self, keys: &[&[u8]]) -> Result>>> { + let mut readoptions = rust_rocksdb::ReadOptions::default(); + readoptions.set_total_order_seek(true); + + // Optimization can be `true` if key vector is pre-sorted **by the column + // comparator**. + const SORTED: bool = false; + + let mut ret: Vec>> = Vec::with_capacity(keys.len()); + for res in self + .db + .rocks + .batched_multi_get_cf_opt(&self.cf(), keys, SORTED, &readoptions) + { + match res? { + Some(res) => ret.push(Some((*res).to_vec())), + None => ret.push(None), + } + } + + Ok(ret) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let writeoptions = rust_rocksdb::WriteOptions::default(); + + self.db + .rocks + .put_cf_opt(&self.cf(), key, value, &writeoptions)?; + + if !self.db.corked() { + self.db.flush()?; + } + + self.watchers.wake(key); + + Ok(()) + } + + fn insert_batch(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + let writeoptions = rust_rocksdb::WriteOptions::default(); + + let mut batch = WriteBatchWithTransaction::::default(); + + for (key, value) in iter { + batch.put_cf(&self.cf(), key, value); + } + + let result = self.db.rocks.write_opt(batch, &writeoptions); + + if !self.db.corked() { + self.db.flush()?; + } + + Ok(result?) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + let writeoptions = rust_rocksdb::WriteOptions::default(); + + let result = self.db.rocks.delete_cf_opt(&self.cf(), key, &writeoptions); + + if !self.db.corked() { + self.db.flush()?; + } + + Ok(result?) + } + + fn remove_batch(&self, iter: &mut dyn Iterator>) -> Result<()> { + let writeoptions = rust_rocksdb::WriteOptions::default(); + + let mut batch = WriteBatchWithTransaction::::default(); + + for key in iter { + batch.delete_cf(&self.cf(), key); + } + + let result = self.db.rocks.write_opt(batch, &writeoptions); + + if !self.db.corked() { + self.db.flush()?; + } + + Ok(result?) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + let mut readoptions = rust_rocksdb::ReadOptions::default(); + readoptions.set_total_order_seek(true); + + Box::new( + self.db + .rocks + .iterator_cf_opt(&self.cf(), readoptions, rust_rocksdb::IteratorMode::Start) + .map(Result::unwrap) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn iter_from<'a>(&'a self, from: &[u8], backwards: bool) -> Box, Vec)> + 'a> { + let mut readoptions = rust_rocksdb::ReadOptions::default(); + readoptions.set_total_order_seek(true); + + Box::new( + self.db + .rocks + .iterator_cf_opt( + &self.cf(), + readoptions, + rust_rocksdb::IteratorMode::From( + from, + if backwards { + rust_rocksdb::Direction::Reverse + } else { + rust_rocksdb::Direction::Forward + }, + ), + ) + .map(Result::unwrap) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn increment(&self, key: &[u8]) -> Result> { + let mut readoptions = rust_rocksdb::ReadOptions::default(); + readoptions.set_total_order_seek(true); + let writeoptions = rust_rocksdb::WriteOptions::default(); + + let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?; + let new = utils::increment(old.as_deref()); + self.db + .rocks + .put_cf_opt(&self.cf(), key, &new, &writeoptions)?; + + if !self.db.corked() { + self.db.flush()?; + } + + Ok(new) + } + + fn increment_batch(&self, iter: &mut dyn Iterator>) -> Result<()> { + let mut readoptions = rust_rocksdb::ReadOptions::default(); + readoptions.set_total_order_seek(true); + let writeoptions = rust_rocksdb::WriteOptions::default(); + + let mut batch = WriteBatchWithTransaction::::default(); + + for key in iter { + let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?; + let new = utils::increment(old.as_deref()); + batch.put_cf(&self.cf(), key, new); + } + + self.db.rocks.write_opt(batch, &writeoptions)?; + + if !self.db.corked() { + self.db.flush()?; + } + + Ok(()) + } + + fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box, Vec)> + 'a> { + let mut readoptions = rust_rocksdb::ReadOptions::default(); + readoptions.set_total_order_seek(true); + + Box::new( + self.db + .rocks + .iterator_cf_opt( + &self.cf(), + readoptions, + rust_rocksdb::IteratorMode::From(&prefix, rust_rocksdb::Direction::Forward), + ) + .map(Result::unwrap) + .map(|(k, v)| (Vec::from(k), Vec::from(v))) + .take_while(move |(k, _)| k.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + self.watchers.watch(prefix) + } +} diff --git a/src/database/rocksdb/mod.rs b/src/database/rocksdb/mod.rs new file mode 100644 index 00000000..afc47630 --- /dev/null +++ b/src/database/rocksdb/mod.rs @@ -0,0 +1,255 @@ +use std::{ + collections::HashMap, + sync::{atomic::AtomicU32, Arc}, +}; + +use chrono::{DateTime, Utc}; +use rust_rocksdb::{ + backup::{BackupEngine, BackupEngineOptions}, + Cache, ColumnFamilyDescriptor, DBCommon, DBWithThreadMode as Db, Env, MultiThreaded, Options, +}; +use tracing::{debug, error, info, warn}; + +use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree}; +use crate::Result; + +pub(crate) mod kvtree; +pub(crate) mod opts; + +use kvtree::RocksDbEngineTree; +use opts::{cf_options, db_options}; + +use super::watchers; + +pub(crate) struct Engine { + rocks: Db, + row_cache: Cache, + col_cache: HashMap, + old_cfs: Vec, + opts: Options, + env: Env, + config: Config, + corks: AtomicU32, +} + +impl KeyValueDatabaseEngine for Arc { + fn open(config: &Config) -> Result { + let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0; + let row_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize; + let col_cache_capacity_bytes = (cache_capacity_bytes * 0.50) as usize; + + let mut col_cache = HashMap::new(); + col_cache.insert("primary".to_owned(), Cache::new_lru_cache(col_cache_capacity_bytes)); + + let db_env = Env::new()?; + let row_cache = Cache::new_lru_cache(row_cache_capacity_bytes); + let db_opts = db_options(config, &db_env, &row_cache, col_cache.get("primary").expect("cache")); + + let load_time = std::time::Instant::now(); + if config.rocksdb_repair { + warn!("Starting database repair. This may take a long time..."); + if let Err(e) = Db::::repair(&db_opts, &config.database_path) { + error!("Repair failed: {:?}", e); + } + } + + debug!("Listing column families in database"); + let cfs = Db::::list_cf(&db_opts, &config.database_path).unwrap_or_default(); + + debug!("Opening {} column family descriptors in database", cfs.len()); + let cfds = cfs + .iter() + .map(|name| ColumnFamilyDescriptor::new(name, cf_options(config, name, db_opts.clone(), &mut col_cache))) + .collect::>(); + + debug!("Opening database..."); + let db = if config.rocksdb_read_only { + Db::::open_cf_for_read_only(&db_opts, &config.database_path, cfs.clone(), false)? + } else { + Db::::open_cf_descriptors(&db_opts, &config.database_path, cfds)? + }; + + info!( + "Opened database at sequence number {} in {:?}", + db.latest_sequence_number(), + load_time.elapsed() + ); + Ok(Arc::new(Engine { + rocks: db, + row_cache, + col_cache, + old_cfs: cfs, + opts: db_opts, + env: db_env, + config: config.clone(), + corks: AtomicU32::new(0), + })) + } + + fn open_tree(&self, name: &'static str) -> Result> { + if !self.old_cfs.contains(&name.to_owned()) { + // Create if it didn't exist + debug!("Creating new column family in database: {}", name); + _ = self.rocks.create_cf(name, &self.opts); + } + + Ok(Arc::new(RocksDbEngineTree { + name, + db: Arc::clone(self), + watchers: Watchers::default(), + })) + } + + fn flush(&self) -> Result<()> { + DBCommon::flush_wal(&self.rocks, false)?; + + Ok(()) + } + + fn sync(&self) -> Result<()> { + DBCommon::flush_wal(&self.rocks, true)?; + + Ok(()) + } + + fn corked(&self) -> bool { self.corks.load(std::sync::atomic::Ordering::Relaxed) > 0 } + + fn cork(&self) -> Result<()> { + self.corks + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + + Ok(()) + } + + fn uncork(&self) -> Result<()> { + self.corks + .fetch_sub(1, std::sync::atomic::Ordering::Relaxed); + + Ok(()) + } + + fn memory_usage(&self) -> Result { + let mut res = String::new(); + let stats = rust_rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.row_cache]))?; + _ = std::fmt::write( + &mut res, + format_args!( + "Memory buffers: {:.2} MiB\nPending write: {:.2} MiB\nTable readers: {:.2} MiB\nRow cache: {:.2} MiB\n", + stats.mem_table_total as f64 / 1024.0 / 1024.0, + stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, + stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, + self.row_cache.get_usage() as f64 / 1024.0 / 1024.0, + ), + ); + + for (name, cache) in &self.col_cache { + _ = std::fmt::write( + &mut res, + format_args!("{} cache: {:.2} MiB\n", name, cache.get_usage() as f64 / 1024.0 / 1024.0,), + ); + } + + Ok(res) + } + + fn cleanup(&self) -> Result<()> { + debug!("Running flush_opt"); + let flushoptions = rust_rocksdb::FlushOptions::default(); + + DBCommon::flush_opt(&self.rocks, &flushoptions)?; + + Ok(()) + } + + fn backup(&self) -> Result<(), Box> { + let path = self.config.database_backup_path.as_ref(); + if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { + return Ok(()); + } + + let options = BackupEngineOptions::new(path.unwrap())?; + let mut engine = BackupEngine::open(&options, &self.env)?; + let ret = if self.config.database_backups_to_keep > 0 { + if let Err(e) = engine.create_new_backup_flush(&self.rocks, true) { + return Err(Box::new(e)); + } + + let engine_info = engine.get_backup_info(); + let info = &engine_info.last().unwrap(); + info!( + "Created database backup #{} using {} bytes in {} files", + info.backup_id, info.size, info.num_files, + ); + Ok(()) + } else { + Ok(()) + }; + + if self.config.database_backups_to_keep >= 0 { + let keep = u32::try_from(self.config.database_backups_to_keep)?; + if let Err(e) = engine.purge_old_backups(keep.try_into()?) { + error!("Failed to purge old backup: {:?}", e.to_string()); + } + } + + ret + } + + fn backup_list(&self) -> Result { + let path = self.config.database_backup_path.as_ref(); + if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { + return Ok( + "Configure database_backup_path to enable backups, or the path specified is not valid".to_owned(), + ); + } + + let mut res = String::new(); + let options = BackupEngineOptions::new(path.unwrap())?; + let engine = BackupEngine::open(&options, &self.env)?; + for info in engine.get_backup_info() { + std::fmt::write( + &mut res, + format_args!( + "#{} {}: {} bytes, {} files\n", + info.backup_id, + DateTime::::from_timestamp(info.timestamp, 0) + .unwrap_or_default() + .to_rfc2822(), + info.size, + info.num_files, + ), + ) + .unwrap(); + } + + Ok(res) + } + + fn file_list(&self) -> Result { + match self.rocks.live_files() { + Err(e) => Ok(String::from(e)), + Ok(files) => { + let mut res = String::new(); + for file in files { + let _ = std::fmt::write( + &mut res, + format_args!( + "L{} {:<13} {:7}+ {:4}- {:9} {}
", + file.level, + file.name, + file.num_entries, + file.num_deletions, + file.size, + file.column_family_name, + ), + ); + } + Ok(res) + }, + } + } + + // TODO: figure out if this is needed for rocksdb + #[allow(dead_code)] + fn clear_caches(&self) {} +} diff --git a/src/database/rocksdb/opts.rs b/src/database/rocksdb/opts.rs new file mode 100644 index 00000000..68b30b4d --- /dev/null +++ b/src/database/rocksdb/opts.rs @@ -0,0 +1,315 @@ +#![allow(dead_code)] + +use std::collections::HashMap; + +use rust_rocksdb::{ + BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Env, LogLevel, Options, + UniversalCompactOptions, UniversalCompactionStopStyle, +}; + +use super::Config; + +/// Create database-wide options suitable for opening the database. This also +/// sets our default column options in case of opening a column with the same +/// resulting value. Note that we require special per-column options on some +/// columns, therefor columns should only be opened after passing this result +/// through cf_options(). +pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache, col_cache: &Cache) -> Options { + let mut opts = Options::default(); + + // Logging + set_logging_defaults(&mut opts, config); + + // Processing + let threads = if config.rocksdb_parallelism_threads == 0 { + num_cpus::get_physical() // max cores if user specified 0 + } else { + config.rocksdb_parallelism_threads + }; + + opts.set_max_background_jobs(threads.try_into().unwrap()); + opts.set_max_subcompactions(threads.try_into().unwrap()); + opts.set_max_file_opening_threads(0); + + // IO + opts.set_manual_wal_flush(true); + opts.set_use_direct_reads(true); + opts.set_use_direct_io_for_flush_and_compaction(true); + if config.rocksdb_optimize_for_spinning_disks { + // speeds up opening DB on hard drives + opts.set_skip_checking_sst_file_sizes_on_db_open(true); + opts.set_skip_stats_update_on_db_open(true); + //opts.set_max_file_opening_threads(threads.try_into().unwrap()); + } + + // Blocks + let mut table_opts = table_options(config); + table_opts.set_block_cache(col_cache); + opts.set_block_based_table_factory(&table_opts); + opts.set_row_cache(row_cache); + + // Buffers + opts.set_write_buffer_size(2 * 1024 * 1024); + opts.set_max_write_buffer_number(2); + opts.set_min_write_buffer_number(1); + + // Files + opts.set_max_total_wal_size(96 * 1024 * 1024); + set_level_defaults(&mut opts, config); + + // Compression + set_compression_defaults(&mut opts, config); + + // Misc + opts.create_if_missing(true); + + // Default: https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes#ktoleratecorruptedtailrecords + // + // Unclean shutdowns of a Matrix homeserver are likely to be fine when + // recovered in this manner as it's likely any lost information will be + // restored via federation. + opts.set_wal_recovery_mode(match config.rocksdb_recovery_mode { + 0 => DBRecoveryMode::AbsoluteConsistency, + 1 => DBRecoveryMode::TolerateCorruptedTailRecords, + 2 => DBRecoveryMode::PointInTime, + 3 => DBRecoveryMode::SkipAnyCorruptedRecord, + 4_u8..=u8::MAX => unimplemented!(), + }); + + opts.set_env(env); + opts +} + +/// Adjust options for the specific column by name. Provide the result of +/// db_options() as the argument to this function and use the return value in +/// the arguments to open the specific column. +pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mut HashMap) -> Options { + // Columns with non-default compaction options + match name { + "backupid_algorithm" + | "backupid_etag" + | "backupkeyid_backup" + | "roomid_shortroomid" + | "shorteventid_shortstatehash" + | "shorteventid_eventid" + | "shortstatekey_statekey" + | "shortstatehash_statediff" + | "userdevicetxnid_response" + | "userfilterid_filter" => set_for_sequential_small_uc(&mut opts, cfg), + &_ => {}, + } + + // Columns with non-default table/cache configs + match name { + "shorteventid_eventid" => set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.shorteventid_cache_capacity, 64), + ), + + "eventid_shorteventid" => set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.eventidshort_cache_capacity, 64), + ), + + "shorteventid_authchain" => { + set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.auth_chain_cache_capacity, 192)); + }, + + "shortstatekey_statekey" => set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.shortstatekey_cache_capacity, 1024), + ), + + "statekey_shortstatekey" => set_table_with_new_cache( + &mut opts, + cfg, + cache, + name, + cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024), + ), + + "pduid_pdu" => set_table_with_new_cache(&mut opts, cfg, cache, name, cfg.pdu_cache_capacity as usize * 1536), + + "eventid_outlierpdu" => set_table_with_shared_cache(&mut opts, cfg, cache, name, "pduid_pdu"), + + &_ => {}, + } + + opts +} + +fn set_logging_defaults(opts: &mut Options, config: &Config) { + let rocksdb_log_level = match config.rocksdb_log_level.as_ref() { + "debug" => LogLevel::Debug, + "info" => LogLevel::Info, + "warn" => LogLevel::Warn, + "fatal" => LogLevel::Fatal, + _ => LogLevel::Error, + }; + + opts.set_log_level(rocksdb_log_level); + opts.set_max_log_file_size(config.rocksdb_max_log_file_size); + opts.set_log_file_time_to_roll(config.rocksdb_log_time_to_roll); + opts.set_keep_log_file_num(config.rocksdb_max_log_files); + opts.set_stats_dump_period_sec(0); + + if config.rocksdb_log_stderr { + opts.set_stderr_logger(rocksdb_log_level, "rocksdb"); + } +} + +fn set_compression_defaults(opts: &mut Options, config: &Config) { + let rocksdb_compression_algo = match config.rocksdb_compression_algo.as_ref() { + "zlib" => DBCompressionType::Zlib, + "lz4" => DBCompressionType::Lz4, + "bz2" => DBCompressionType::Bz2, + _ => DBCompressionType::Zstd, + }; + + if config.rocksdb_bottommost_compression { + opts.set_bottommost_compression_type(rocksdb_compression_algo); + opts.set_bottommost_zstd_max_train_bytes(0, true); + + // -14 w_bits is only read by zlib. + opts.set_bottommost_compression_options(-14, config.rocksdb_bottommost_compression_level, 0, 0, true); + } + + // -14 w_bits is only read by zlib. + opts.set_compression_options(-14, config.rocksdb_compression_level, 0, 0); + opts.set_compression_type(rocksdb_compression_algo); +} + +fn set_for_random_small_uc(opts: &mut Options, config: &Config) { + let uco = uc_options(config); + set_for_random_small(opts, config); + opts.set_universal_compaction_options(&uco); + opts.set_compaction_style(DBCompactionStyle::Universal); +} + +fn set_for_sequential_small_uc(opts: &mut Options, config: &Config) { + let uco = uc_options(config); + set_for_sequential_small(opts, config); + opts.set_universal_compaction_options(&uco); + opts.set_compaction_style(DBCompactionStyle::Universal); +} + +fn set_for_random_small(opts: &mut Options, config: &Config) { + set_for_random(opts, config); + + opts.set_write_buffer_size(1024 * 128); + opts.set_target_file_size_base(1024 * 128); + opts.set_target_file_size_multiplier(2); + opts.set_max_bytes_for_level_base(1024 * 512); + opts.set_max_bytes_for_level_multiplier(2.0); +} + +fn set_for_sequential_small(opts: &mut Options, config: &Config) { + set_for_random(opts, config); + + opts.set_write_buffer_size(1024 * 512); + opts.set_target_file_size_base(1024 * 512); + opts.set_target_file_size_multiplier(2); + opts.set_max_bytes_for_level_base(1024 * 1024); + opts.set_max_bytes_for_level_multiplier(2.0); +} + +fn set_for_random(opts: &mut Options, config: &Config) { + set_level_defaults(opts, config); + + let pri = "compaction_pri=kOldestSmallestSeqFirst"; + opts.set_options_from_string(pri) + .expect("set compaction priority string"); + + opts.set_max_bytes_for_level_base(8 * 1024 * 1024); + opts.set_max_bytes_for_level_multiplier(1.0); + opts.set_max_bytes_for_level_multiplier_additional(&[0, 1, 1, 3, 7, 15, 31]); +} + +fn set_for_sequential(opts: &mut Options, config: &Config) { + set_level_defaults(opts, config); + + let pri = "compaction_pri=kOldestLargestSeqFirst"; + opts.set_options_from_string(pri) + .expect("set compaction priority string"); + + opts.set_target_file_size_base(2 * 1024 * 1024); + opts.set_target_file_size_multiplier(2); + + opts.set_max_bytes_for_level_base(32 * 1024 * 1024); + opts.set_max_bytes_for_level_multiplier(1.0); + opts.set_max_bytes_for_level_multiplier_additional(&[0, 1, 1, 3, 7, 15, 31]); +} + +fn set_level_defaults(opts: &mut Options, _config: &Config) { + opts.set_level_zero_file_num_compaction_trigger(2); + + opts.set_target_file_size_base(1024 * 1024); + opts.set_target_file_size_multiplier(2); + + opts.set_level_compaction_dynamic_level_bytes(false); + opts.set_max_bytes_for_level_base(16 * 1024 * 1024); + opts.set_max_bytes_for_level_multiplier(2.0); + + opts.set_ttl(21 * 24 * 60 * 60); +} + +fn uc_options(_config: &Config) -> UniversalCompactOptions { + let mut opts = UniversalCompactOptions::default(); + + opts.set_stop_style(UniversalCompactionStopStyle::Total); + opts.set_max_size_amplification_percent(10000); + opts.set_compression_size_percent(-1); + opts.set_size_ratio(1); + + opts.set_min_merge_width(2); + opts.set_max_merge_width(16); + + opts +} + +fn set_table_with_new_cache( + opts: &mut Options, config: &Config, cache: &mut HashMap, name: &str, size: usize, +) { + cache.insert(name.to_owned(), Cache::new_lru_cache(size)); + set_table_with_shared_cache(opts, config, cache, name, name); +} + +fn set_table_with_shared_cache( + opts: &mut Options, config: &Config, cache: &HashMap, _name: &str, cache_name: &str, +) { + let mut table = table_options(config); + table.set_block_cache( + cache + .get(cache_name) + .expect("existing cache to share with this column"), + ); + opts.set_block_based_table_factory(&table); +} + +fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize { + let ents = f64::from(base_size) * config.conduit_cache_capacity_modifier; + + ents as usize * entity_size +} + +fn table_options(_config: &Config) -> BlockBasedOptions { + let mut opts = BlockBasedOptions::default(); + + opts.set_block_size(4 * 1024); + opts.set_metadata_block_size(4 * 1024); + + opts.set_optimize_filters_for_memory(true); + opts.set_cache_index_and_filter_blocks(true); + opts.set_pin_top_level_index_and_filter(true); + + opts +} diff --git a/src/database/ser.rs b/src/database/ser.rs deleted file mode 100644 index 2e1a2cb0..00000000 --- a/src/database/ser.rs +++ /dev/null @@ -1,399 +0,0 @@ -use std::io::Write; - -use conduwuit::{Error, Result, debug::type_name, err, result::DebugInspect, utils::exchange}; -use serde::{Deserialize, Serialize, ser}; - -use crate::util::unhandled; - -#[inline] -pub fn serialize_to_vec(val: T) -> Result> { - serialize_to::, T>(val) -} - -#[inline] -pub fn serialize_to(val: T) -> Result -where - B: Default + Write + AsRef<[u8]>, - T: Serialize, -{ - let mut buf = B::default(); - serialize(&mut buf, val)?; - - Ok(buf) -} - -/// Serialize T into Writer W -#[inline] -#[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] -pub fn serialize<'a, W, T>(out: &'a mut W, val: T) -> Result<&'a [u8]> -where - W: Write + AsRef<[u8]> + 'a, - T: Serialize, -{ - let mut serializer = Serializer { out, depth: 0, sep: false, fin: false }; - - val.serialize(&mut serializer) - .map_err(|error| err!(SerdeSer("{error}"))) - .debug_inspect(|()| { - debug_assert_eq!( - serializer.depth, 0, - "Serialization completed at non-zero recursion level" - ); - })?; - - Ok((*out).as_ref()) -} - -pub(crate) struct Serializer<'a, W: Write> { - out: &'a mut W, - depth: u32, - sep: bool, - fin: bool, -} - -/// Newtype for JSON serialization. -#[derive(Debug, Serialize)] -pub struct Json(pub T); - -/// Newtype for CBOR serialization. -#[derive(Debug, Deserialize, Serialize)] -pub struct Cbor(pub T); - -/// Directive to force separator serialization specifically for prefix keying -/// use. This is a quirk of the database schema and prefix iterations. -#[derive(Debug, Serialize)] -pub struct Interfix; - -/// Directive to force separator serialization. Separators are usually -/// serialized automatically. -#[derive(Debug, Serialize)] -pub struct Separator; - -/// Record separator; an intentionally invalid-utf8 byte. -pub const SEP: u8 = b'\xFF'; - -impl Serializer<'_, W> { - const SEP: &'static [u8] = &[SEP]; - - fn tuple_start(&mut self) { - debug_assert!(!self.sep, "Tuple start with separator set"); - self.sequence_start(); - } - - fn tuple_end(&mut self) -> Result { - self.sequence_end()?; - Ok(()) - } - - fn sequence_start(&mut self) { - debug_assert!(!self.is_finalized(), "Sequence start with finalization set"); - cfg!(debug_assertions).then(|| self.depth = self.depth.saturating_add(1)); - } - - fn sequence_end(&mut self) -> Result { - cfg!(debug_assertions).then(|| self.depth = self.depth.saturating_sub(1)); - Ok(()) - } - - fn record_start(&mut self) -> Result { - debug_assert!(!self.is_finalized(), "Starting a record after serialization finalized"); - exchange(&mut self.sep, true) - .then(|| self.separator()) - .unwrap_or(Ok(())) - } - - fn separator(&mut self) -> Result { - debug_assert!(!self.is_finalized(), "Writing a separator after serialization finalized"); - self.out.write_all(Self::SEP).map_err(Into::into) - } - - fn write(&mut self, buf: &[u8]) -> Result { self.out.write_all(buf).map_err(Into::into) } - - fn set_finalized(&mut self) { - debug_assert!(!self.is_finalized(), "Finalization already set"); - cfg!(debug_assertions).then(|| self.fin = true); - } - - fn is_finalized(&self) -> bool { self.fin } -} - -impl ser::Serializer for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - type SerializeMap = Self; - type SerializeSeq = Self; - type SerializeStruct = Self; - type SerializeStructVariant = Self; - type SerializeTuple = Self; - type SerializeTupleStruct = Self; - type SerializeTupleVariant = Self; - - fn serialize_seq(self, _len: Option) -> Result { - self.sequence_start(); - Ok(self) - } - - fn serialize_tuple(self, _len: usize) -> Result { - self.tuple_start(); - Ok(self) - } - - fn serialize_tuple_struct( - self, - _name: &'static str, - _len: usize, - ) -> Result { - self.tuple_start(); - Ok(self) - } - - fn serialize_tuple_variant( - self, - _name: &'static str, - _idx: u32, - _var: &'static str, - _len: usize, - ) -> Result { - unhandled!("serialize Tuple Variant not implemented") - } - - fn serialize_map(self, _len: Option) -> Result { - unhandled!( - "serialize Map not implemented; did you mean to use database::Json() around your \ - serde_json::Value?" - ) - } - - fn serialize_struct(self, _name: &'static str, _len: usize) -> Result { - unhandled!( - "serialize Struct not implemented at this time; did you mean to use \ - database::Json() around your struct?" - ) - } - - fn serialize_struct_variant( - self, - _name: &'static str, - _idx: u32, - _var: &'static str, - _len: usize, - ) -> Result { - unhandled!("serialize Struct Variant not implemented") - } - - #[allow(clippy::needless_borrows_for_generic_args)] // buggy - fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result - where - T: Serialize + ?Sized, - { - debug_assert!( - name != "Json" || type_name::() != "alloc::boxed::Box", - "serializing a Json(RawValue); you can skip serialization instead" - ); - - match name { - | "Json" => serde_json::to_writer(&mut self.out, value).map_err(Into::into), - | "Cbor" => { - use minicbor::encode::write::Writer; - use minicbor_serde::Serializer; - - value - .serialize(&mut Serializer::new(&mut Writer::new(&mut self.out))) - .map_err(|e| Self::Error::SerdeSer(e.to_string().into())) - }, - | _ => unhandled!("Unrecognized serialization Newtype {name:?}"), - } - } - - fn serialize_newtype_variant( - self, - _name: &'static str, - _idx: u32, - _var: &'static str, - _value: &T, - ) -> Result { - unhandled!("serialize Newtype Variant not implemented") - } - - fn serialize_unit_struct(self, name: &'static str) -> Result { - match name { - | "Interfix" => { - self.set_finalized(); - }, - | "Separator" => { - self.separator()?; - }, - | _ => unhandled!("Unrecognized serialization directive: {name:?}"), - } - - Ok(()) - } - - fn serialize_unit_variant( - self, - _name: &'static str, - _idx: u32, - _var: &'static str, - ) -> Result { - unhandled!("serialize Unit Variant not implemented") - } - - fn serialize_some(self, val: &T) -> Result { - val.serialize(self) - } - - fn serialize_none(self) -> Result { Ok(()) } - - fn serialize_char(self, v: char) -> Result { - let mut buf: [u8; 4] = [0; 4]; - self.serialize_str(v.encode_utf8(&mut buf)) - } - - fn serialize_str(self, v: &str) -> Result { - debug_assert!( - self.depth > 0, - "serializing string at the top-level; you can skip serialization instead" - ); - - self.serialize_bytes(v.as_bytes()) - } - - fn serialize_bytes(self, v: &[u8]) -> Result { - debug_assert!( - self.depth > 0, - "serializing byte array at the top-level; you can skip serialization instead" - ); - - self.write(v) - } - - fn serialize_f64(self, _v: f64) -> Result { - unhandled!("serialize f64 not implemented") - } - - fn serialize_f32(self, _v: f32) -> Result { - unhandled!("serialize f32 not implemented") - } - - fn serialize_i64(self, v: i64) -> Result { self.write(&v.to_be_bytes()) } - - fn serialize_i32(self, v: i32) -> Result { self.write(&v.to_be_bytes()) } - - fn serialize_i16(self, _v: i16) -> Result { - unhandled!("serialize i16 not implemented") - } - - fn serialize_i8(self, _v: i8) -> Result { - unhandled!("serialize i8 not implemented") - } - - fn serialize_u64(self, v: u64) -> Result { self.write(&v.to_be_bytes()) } - - fn serialize_u32(self, v: u32) -> Result { self.write(&v.to_be_bytes()) } - - fn serialize_u16(self, _v: u16) -> Result { - unhandled!("serialize u16 not implemented") - } - - fn serialize_u8(self, v: u8) -> Result { self.write(&[v]) } - - fn serialize_bool(self, _v: bool) -> Result { - unhandled!("serialize bool not implemented") - } - - fn serialize_unit(self) -> Result { unhandled!("serialize unit not implemented") } -} - -impl ser::SerializeSeq for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_element(&mut self, val: &T) -> Result { - val.serialize(&mut **self) - } - - fn end(self) -> Result { self.sequence_end() } -} - -impl ser::SerializeTuple for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_element(&mut self, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) - } - - fn end(self) -> Result { self.tuple_end() } -} - -impl ser::SerializeTupleStruct for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_field(&mut self, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) - } - - fn end(self) -> Result { self.tuple_end() } -} - -impl ser::SerializeTupleVariant for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_field(&mut self, val: &T) -> Result { - self.record_start()?; - val.serialize(&mut **self) - } - - fn end(self) -> Result { self.tuple_end() } -} - -impl ser::SerializeMap for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_key(&mut self, _key: &T) -> Result { - unhandled!("serialize Map Key not implemented") - } - - fn serialize_value(&mut self, _val: &T) -> Result { - unhandled!("serialize Map Val not implemented") - } - - fn end(self) -> Result { unhandled!("serialize Map End not implemented") } -} - -impl ser::SerializeStruct for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_field( - &mut self, - _key: &'static str, - _val: &T, - ) -> Result { - unhandled!("serialize Struct Field not implemented") - } - - fn end(self) -> Result { unhandled!("serialize Struct End not implemented") } -} - -impl ser::SerializeStructVariant for &mut Serializer<'_, W> { - type Error = Error; - type Ok = (); - - fn serialize_field( - &mut self, - _key: &'static str, - _val: &T, - ) -> Result { - unhandled!("serialize Struct Variant Field not implemented") - } - - fn end(self) -> Result { - unhandled!("serialize Struct Variant End not implemented") - } -} diff --git a/src/database/sqlite/mod.rs b/src/database/sqlite/mod.rs new file mode 100644 index 00000000..11c81e03 --- /dev/null +++ b/src/database/sqlite/mod.rs @@ -0,0 +1,322 @@ +use std::{ + cell::RefCell, + future::Future, + path::{Path, PathBuf}, + pin::Pin, + sync::Arc, +}; + +use parking_lot::{Mutex, MutexGuard}; +use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; +use thread_local::ThreadLocal; +use tracing::debug; + +use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}; +use crate::{database::Config, Result}; + +thread_local! { + static READ_CONNECTION: RefCell> = const { RefCell::new(None) }; + static READ_CONNECTION_ITERATOR: RefCell> = const { RefCell::new(None) }; +} + +struct PreparedStatementIterator<'a> { + pub iterator: Box + 'a>, + pub _statement_ref: NonAliasingBox>, +} + +impl Iterator for PreparedStatementIterator<'_> { + type Item = TupleOfBytes; + + fn next(&mut self) -> Option { self.iterator.next() } +} + +struct NonAliasingBox(*mut T); +impl Drop for NonAliasingBox { + fn drop(&mut self) { + // TODO: figure out why this is necessary, but also this is sqlite so dont think + // i care that much. i tried checking commit history but couldn't find out why + // this was done. + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + _ = Box::from_raw(self.0); + }; + } +} + +pub(crate) struct Engine { + writer: Mutex, + read_conn_tls: ThreadLocal, + read_iterator_conn_tls: ThreadLocal, + + path: PathBuf, + cache_size_per_thread: u32, +} + +impl Engine { + fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { + let conn = Connection::open(path)?; + + conn.pragma_update(Some(Main), "page_size", 2048)?; + conn.pragma_update(Some(Main), "journal_mode", "WAL")?; + conn.pragma_update(Some(Main), "synchronous", "NORMAL")?; + conn.pragma_update(Some(Main), "cache_size", -i64::from(cache_size_kb))?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", 0)?; + + Ok(conn) + } + + fn write_lock(&self) -> MutexGuard<'_, Connection> { self.writer.lock() } + + fn read_lock(&self) -> &Connection { + self.read_conn_tls + .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) + } + + fn read_lock_iterator(&self) -> &Connection { + self.read_iterator_conn_tls + .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) + } + + pub fn flush_wal(self: &Arc) -> Result<()> { + self.write_lock() + .pragma_update(Some(Main), "wal_checkpoint", "RESTART")?; + Ok(()) + } +} + +impl KeyValueDatabaseEngine for Arc { + fn open(config: &Config) -> Result { + let path = Path::new(&config.database_path).join("conduit.db"); + + // calculates cache-size per permanent connection + // 1. convert MB to KiB + // 2. divide by permanent connections + permanent iter connections + write + // connection + // 3. round down to nearest integer + let cache_size_per_thread: u32 = + ((config.db_cache_capacity_mb * 1024.0) / ((num_cpus::get().max(1) * 2) + 1) as f64) as u32; + + let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); + + let arc = Arc::new(Engine { + writer, + read_conn_tls: ThreadLocal::new(), + read_iterator_conn_tls: ThreadLocal::new(), + path, + cache_size_per_thread, + }); + + Ok(arc) + } + + fn open_tree(&self, name: &str) -> Result> { + self.write_lock().execute( + &format!("CREATE TABLE IF NOT EXISTS {name} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )"), + [], + )?; + + Ok(Arc::new(SqliteTable { + engine: Arc::clone(self), + name: name.to_owned(), + watchers: Watchers::default(), + })) + } + + fn flush(&self) -> Result<()> { + // we enabled PRAGMA synchronous=normal, so this should not be necessary + Ok(()) + } + + fn cleanup(&self) -> Result<()> { self.flush_wal() } +} + +pub struct SqliteTable { + engine: Arc, + name: String, + watchers: Watchers, +} + +type TupleOfBytes = (Vec, Vec); + +impl SqliteTable { + fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { + Ok(guard + .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? + .query_row([key], |row| row.get(0)) + .optional()?) + } + + fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { + guard.execute( + format!("INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", self.name).as_str(), + [key, value], + )?; + Ok(()) + } + + pub fn iter_with_guard<'a>(&'a self, guard: &'a Connection) -> Box + 'a> { + let statement = Box::leak(Box::new( + guard + .prepare(&format!("SELECT key, value FROM {} ORDER BY key ASC", &self.name)) + .unwrap(), + )); + + let statement_ref = NonAliasingBox(statement); + + //let name = self.name.clone(); + + let iterator = Box::new( + statement + .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(Result::unwrap), + ); + + Box::new(PreparedStatementIterator { + iterator, + _statement_ref: statement_ref, + }) + } +} + +impl KvTree for SqliteTable { + fn get(&self, key: &[u8]) -> Result>> { self.get_with_guard(self.engine.read_lock(), key) } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let guard = self.engine.write_lock(); + self.insert_with_guard(&guard, key, value)?; + drop(guard); + self.watchers.wake(key); + Ok(()) + } + + fn insert_batch(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + let guard = self.engine.write_lock(); + + guard.execute("BEGIN", [])?; + for (key, value) in iter { + self.insert_with_guard(&guard, &key, &value)?; + } + guard.execute("COMMIT", [])?; + + drop(guard); + + Ok(()) + } + + fn increment_batch(&self, iter: &mut dyn Iterator>) -> Result<()> { + let guard = self.engine.write_lock(); + + guard.execute("BEGIN", [])?; + for key in iter { + let old = self.get_with_guard(&guard, &key)?; + let new = crate::utils::increment(old.as_deref()); + self.insert_with_guard(&guard, &key, &new)?; + } + guard.execute("COMMIT", [])?; + + drop(guard); + + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + let guard = self.engine.write_lock(); + + guard.execute(format!("DELETE FROM {} WHERE key = ?", self.name).as_str(), [key])?; + + Ok(()) + } + + fn iter<'a>(&'a self) -> Box + 'a> { + let guard = self.engine.read_lock_iterator(); + + self.iter_with_guard(guard) + } + + fn iter_from<'a>(&'a self, from: &[u8], backwards: bool) -> Box + 'a> { + let guard = self.engine.read_lock_iterator(); + let from = from.to_vec(); // TODO change interface? + + //let name = self.name.clone(); + + if backwards { + let statement = Box::leak(Box::new( + guard + .prepare(&format!( + "SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC", + &self.name + )) + .unwrap(), + )); + + let statement_ref = NonAliasingBox(statement); + + let iterator = Box::new( + statement + .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(Result::unwrap), + ); + Box::new(PreparedStatementIterator { + iterator, + _statement_ref: statement_ref, + }) + } else { + let statement = Box::leak(Box::new( + guard + .prepare(&format!( + "SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC", + &self.name + )) + .unwrap(), + )); + + let statement_ref = NonAliasingBox(statement); + + let iterator = Box::new( + statement + .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .unwrap() + .map(Result::unwrap), + ); + + Box::new(PreparedStatementIterator { + iterator, + _statement_ref: statement_ref, + }) + } + } + + fn increment(&self, key: &[u8]) -> Result> { + let guard = self.engine.write_lock(); + + let old = self.get_with_guard(&guard, key)?; + + let new = crate::utils::increment(old.as_deref()); + + self.insert_with_guard(&guard, key, &new)?; + + Ok(new) + } + + fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box + 'a> { + Box::new( + self.iter_from(&prefix, false) + .take_while(move |(key, _)| key.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + self.watchers.watch(prefix) + } + + fn clear(&self) -> Result<()> { + debug!("clear: running"); + self.engine + .write_lock() + .execute(format!("DELETE FROM {}", self.name).as_str(), [])?; + debug!("clear: ran"); + Ok(()) + } +} diff --git a/src/database/stream.rs b/src/database/stream.rs deleted file mode 100644 index eb264ccd..00000000 --- a/src/database/stream.rs +++ /dev/null @@ -1,152 +0,0 @@ -mod items; -mod items_rev; -mod keys; -mod keys_rev; - -use std::sync::Arc; - -use conduwuit::{Result, utils::exchange}; -use rocksdb::{DBRawIteratorWithThreadMode, ReadOptions}; - -pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; -use crate::{ - Map, Slice, - engine::Db, - keyval::{Key, KeyVal, Val}, - util::{is_incomplete, map_err}, -}; - -pub(crate) struct State<'a> { - inner: Inner<'a>, - seek: bool, - init: bool, -} - -pub(crate) trait Cursor<'a, T> { - fn state(&self) -> &State<'a>; - - fn fetch(&self) -> Option; - - fn seek(&mut self); - - #[inline] - fn get(&self) -> Option> { - self.fetch() - .map(Ok) - .or_else(|| self.state().status().map(map_err).map(Err)) - } - - #[inline] - fn seek_and_get(&mut self) -> Option> { - self.seek(); - self.get() - } -} - -type Inner<'a> = DBRawIteratorWithThreadMode<'a, Db>; -type From<'a> = Option>; - -impl<'a> State<'a> { - #[inline] - pub(super) fn new(map: &'a Arc, opts: ReadOptions) -> Self { - Self { - inner: map.db().db.raw_iterator_cf_opt(&map.cf(), opts), - init: true, - seek: false, - } - } - - #[inline] - #[tracing::instrument(level = "trace", skip_all)] - pub(super) fn init_fwd(mut self, from: From<'_>) -> Self { - debug_assert!(self.init, "init must be set to make this call"); - debug_assert!(!self.seek, "seek must not be set to make this call"); - - if let Some(key) = from { - self.inner.seek(key); - } else { - self.inner.seek_to_first(); - } - - self.seek = true; - self - } - - #[inline] - #[tracing::instrument(level = "trace", skip_all)] - pub(super) fn init_rev(mut self, from: From<'_>) -> Self { - debug_assert!(self.init, "init must be set to make this call"); - debug_assert!(!self.seek, "seek must not be set to make this call"); - - if let Some(key) = from { - self.inner.seek_for_prev(key); - } else { - self.inner.seek_to_last(); - } - - self.seek = true; - self - } - - #[inline] - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - pub(super) fn seek_fwd(&mut self) { - if !exchange(&mut self.init, false) { - self.inner.next(); - } else if !self.seek { - self.inner.seek_to_first(); - } - } - - #[inline] - #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - pub(super) fn seek_rev(&mut self) { - if !exchange(&mut self.init, false) { - self.inner.prev(); - } else if !self.seek { - self.inner.seek_to_last(); - } - } - - pub(super) fn is_incomplete(&self) -> bool { - matches!(self.status(), Some(e) if is_incomplete(&e)) - } - - #[inline] - fn fetch_key(&self) -> Option> { self.inner.key() } - - #[inline] - fn _fetch_val(&self) -> Option> { self.inner.value() } - - #[inline] - fn fetch(&self) -> Option> { self.inner.item() } - - #[inline] - pub(super) fn status(&self) -> Option { self.inner.status().err() } - - #[inline] - pub(super) fn valid(&self) -> bool { self.inner.valid() } -} - -fn keyval_longevity<'a, 'b: 'a>(item: KeyVal<'a>) -> KeyVal<'b> { - (slice_longevity::<'a, 'b>(item.0), slice_longevity::<'a, 'b>(item.1)) -} - -fn slice_longevity<'a, 'b: 'a>(item: &'a Slice) -> &'b Slice { - // SAFETY: The lifetime of the data returned by the rocksdb cursor is only valid - // between each movement of the cursor. It is hereby unsafely extended to match - // the lifetime of the cursor itself. This is due to the limitation of the - // Stream trait where the Item is incapable of conveying a lifetime; this is due - // to GAT's being unstable during its development. This unsafety can be removed - // as soon as this limitation is addressed by an upcoming version. - // - // We have done our best to mitigate the implications of this in conjunction - // with the deserialization API such that borrows being held across movements of - // the cursor do not happen accidentally. The compiler will still error when - // values herein produced try to leave a closure passed to a StreamExt API. But - // escapes can happen if you explicitly and intentionally attempt it, and there - // will be no compiler error or warning. This is primarily the case with - // calling collect() without a preceding map(ToOwned::to_owned). A collection - // of references here is illegal, but this will not be enforced by the compiler. - unsafe { std::mem::transmute(item) } -} diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs deleted file mode 100644 index ede2b822..00000000 --- a/src/database/stream/items.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::pin::Pin; - -use conduwuit::Result; -use futures::{ - Stream, - stream::FusedStream, - task::{Context, Poll}, -}; - -use super::{Cursor, State, keyval_longevity}; -use crate::keyval::KeyVal; - -pub(crate) struct Items<'a> { - state: State<'a>, -} - -impl<'a> From> for Items<'a> { - #[inline] - fn from(state: State<'a>) -> Self { Self { state } } -} - -impl<'a> Cursor<'a, KeyVal<'a>> for Items<'a> { - #[inline] - fn state(&self) -> &State<'a> { &self.state } - - #[inline] - fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } - - #[inline] - fn seek(&mut self) { self.state.seek_fwd(); } -} - -impl<'a> Stream for Items<'a> { - type Item = Result>; - - fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { - Poll::Ready(self.seek_and_get()) - } -} - -impl FusedStream for Items<'_> { - #[inline] - fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } -} diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs deleted file mode 100644 index dba8d16c..00000000 --- a/src/database/stream/items_rev.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::pin::Pin; - -use conduwuit::Result; -use futures::{ - Stream, - stream::FusedStream, - task::{Context, Poll}, -}; - -use super::{Cursor, State, keyval_longevity}; -use crate::keyval::KeyVal; - -pub(crate) struct ItemsRev<'a> { - state: State<'a>, -} - -impl<'a> From> for ItemsRev<'a> { - #[inline] - fn from(state: State<'a>) -> Self { Self { state } } -} - -impl<'a> Cursor<'a, KeyVal<'a>> for ItemsRev<'a> { - #[inline] - fn state(&self) -> &State<'a> { &self.state } - - #[inline] - fn fetch(&self) -> Option> { self.state.fetch().map(keyval_longevity) } - - #[inline] - fn seek(&mut self) { self.state.seek_rev(); } -} - -impl<'a> Stream for ItemsRev<'a> { - type Item = Result>; - - fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { - Poll::Ready(self.seek_and_get()) - } -} - -impl FusedStream for ItemsRev<'_> { - #[inline] - fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } -} diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs deleted file mode 100644 index 7c89869b..00000000 --- a/src/database/stream/keys.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::pin::Pin; - -use conduwuit::Result; -use futures::{ - Stream, - stream::FusedStream, - task::{Context, Poll}, -}; - -use super::{Cursor, State, slice_longevity}; -use crate::keyval::Key; - -pub(crate) struct Keys<'a> { - state: State<'a>, -} - -impl<'a> From> for Keys<'a> { - #[inline] - fn from(state: State<'a>) -> Self { Self { state } } -} - -impl<'a> Cursor<'a, Key<'a>> for Keys<'a> { - #[inline] - fn state(&self) -> &State<'a> { &self.state } - - #[inline] - fn fetch(&self) -> Option> { self.state.fetch_key().map(slice_longevity) } - - #[inline] - fn seek(&mut self) { self.state.seek_fwd(); } -} - -impl<'a> Stream for Keys<'a> { - type Item = Result>; - - fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { - Poll::Ready(self.seek_and_get()) - } -} - -impl FusedStream for Keys<'_> { - #[inline] - fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } -} diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs deleted file mode 100644 index 51561e5c..00000000 --- a/src/database/stream/keys_rev.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::pin::Pin; - -use conduwuit::Result; -use futures::{ - Stream, - stream::FusedStream, - task::{Context, Poll}, -}; - -use super::{Cursor, State, slice_longevity}; -use crate::keyval::Key; - -pub(crate) struct KeysRev<'a> { - state: State<'a>, -} - -impl<'a> From> for KeysRev<'a> { - #[inline] - fn from(state: State<'a>) -> Self { Self { state } } -} - -impl<'a> Cursor<'a, Key<'a>> for KeysRev<'a> { - #[inline] - fn state(&self) -> &State<'a> { &self.state } - - #[inline] - fn fetch(&self) -> Option> { self.state.fetch_key().map(slice_longevity) } - - #[inline] - fn seek(&mut self) { self.state.seek_rev(); } -} - -impl<'a> Stream for KeysRev<'a> { - type Item = Result>; - - fn poll_next(mut self: Pin<&mut Self>, _ctx: &mut Context<'_>) -> Poll> { - Poll::Ready(self.seek_and_get()) - } -} - -impl FusedStream for KeysRev<'_> { - #[inline] - fn is_terminated(&self) -> bool { !self.state.init && !self.state.valid() } -} diff --git a/src/database/tests.rs b/src/database/tests.rs deleted file mode 100644 index c1a9f47c..00000000 --- a/src/database/tests.rs +++ /dev/null @@ -1,549 +0,0 @@ -#![allow(clippy::needless_borrows_for_generic_args)] - -use std::fmt::Debug; - -use conduwuit::{ - arrayvec::ArrayVec, - ruma::{EventId, RoomId, UserId, serde::Raw}, -}; -use serde::Serialize; - -use crate::{ - Ignore, Interfix, de, ser, - ser::{Json, serialize_to_vec}, -}; - -#[test] -#[cfg_attr(debug_assertions, should_panic(expected = "serializing string at the top-level"))] -fn ser_str() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let s = serialize_to_vec(&user_id).expect("failed to serialize user_id"); - assert_eq!(&s, user_id.as_bytes()); -} - -#[test] -fn ser_tuple() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - - let mut a = user_id.as_bytes().to_vec(); - a.push(0xFF); - a.extend_from_slice(room_id.as_bytes()); - - let b = (user_id, room_id); - let b = serialize_to_vec(&b).expect("failed to serialize tuple"); - - assert_eq!(a, b); -} - -#[test] -fn ser_tuple_option() { - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let mut a = Vec::::new(); - a.push(0xFF); - a.extend_from_slice(user_id.as_bytes()); - - let mut aa = Vec::::new(); - aa.extend_from_slice(room_id.as_bytes()); - aa.push(0xFF); - aa.extend_from_slice(user_id.as_bytes()); - - let b: (Option<&RoomId>, &UserId) = (None, user_id); - let b = serialize_to_vec(&b).expect("failed to serialize tuple"); - assert_eq!(a, b); - - let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); - let bb = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bb); -} - -#[test] -#[should_panic(expected = "I/O error: failed to write whole buffer")] -fn ser_overflow() { - const BUFSIZE: usize = 10; - - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - - assert!(BUFSIZE < user_id.as_str().len() + room_id.as_str().len()); - let mut buf = ArrayVec::::new(); - - let val = (user_id, room_id); - _ = ser::serialize(&mut buf, val).unwrap(); -} - -#[test] -fn ser_complex() { - use conduwuit::ruma::Mxc; - - #[derive(Debug, Serialize)] - struct Dim { - width: u32, - height: u32, - } - - let mxc = Mxc { - server_name: "example.com".try_into().unwrap(), - media_id: "AbCdEfGhIjK", - }; - - let dim = Dim { width: 123, height: 456 }; - - let mut a = Vec::new(); - a.extend_from_slice(b"mxc://"); - a.extend_from_slice(mxc.server_name.as_bytes()); - a.extend_from_slice(b"/"); - a.extend_from_slice(mxc.media_id.as_bytes()); - a.push(0xFF); - a.extend_from_slice(&dim.width.to_be_bytes()); - a.extend_from_slice(&dim.height.to_be_bytes()); - a.push(0xFF); - - let d: &[u32] = &[dim.width, dim.height]; - let b = (mxc, d, Interfix); - let b = serialize_to_vec(b).expect("failed to serialize complex"); - - assert_eq!(a, b); -} - -#[test] -fn ser_json() { - use conduwuit::ruma::api::client::filter::FilterDefinition; - - let filter = FilterDefinition { - event_fields: Some(vec!["content.body".to_owned()]), - ..Default::default() - }; - - let serialized = serialize_to_vec(Json(&filter)).expect("failed to serialize value"); - - let s = String::from_utf8_lossy(&serialized); - assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); -} - -#[test] -fn ser_json_value() { - use conduwuit::ruma::api::client::filter::FilterDefinition; - - let filter = FilterDefinition { - event_fields: Some(vec!["content.body".to_owned()]), - ..Default::default() - }; - - let value = serde_json::to_value(filter).expect("failed to serialize to serde_json::value"); - let serialized = serialize_to_vec(Json(value)).expect("failed to serialize value"); - - let s = String::from_utf8_lossy(&serialized); - assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); -} - -#[test] -fn ser_json_macro() { - use serde_json::json; - - #[derive(Serialize)] - struct Foo { - foo: String, - } - - let content = Foo { foo: "bar".to_owned() }; - let content = serde_json::to_value(content).expect("failed to serialize content"); - let sender: &UserId = "@foo:example.com".try_into().unwrap(); - let serialized = serialize_to_vec(Json(json!({ - "content": content, - "sender": sender, - }))) - .expect("failed to serialize value"); - - let s = String::from_utf8_lossy(&serialized); - assert_eq!(&s, r#"{"content":{"foo":"bar"},"sender":"@foo:example.com"}"#); -} - -#[test] -#[cfg_attr(debug_assertions, should_panic(expected = "serializing string at the top-level"))] -fn ser_json_raw() { - use conduwuit::ruma::api::client::filter::FilterDefinition; - - let filter = FilterDefinition { - event_fields: Some(vec!["content.body".to_owned()]), - ..Default::default() - }; - - let value = - serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); - let a = serialize_to_vec(value.get()).expect("failed to serialize raw value"); - let s = String::from_utf8_lossy(&a); - assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); -} - -#[test] -#[cfg_attr(debug_assertions, should_panic(expected = "you can skip serialization instead"))] -fn ser_json_raw_json() { - use conduwuit::ruma::api::client::filter::FilterDefinition; - - let filter = FilterDefinition { - event_fields: Some(vec!["content.body".to_owned()]), - ..Default::default() - }; - - let value = - serde_json::value::to_raw_value(&filter).expect("failed to serialize to raw value"); - let a = serialize_to_vec(Json(value)).expect("failed to serialize json value"); - let s = String::from_utf8_lossy(&a); - assert_eq!(&s, r#"{"event_fields":["content.body"]}"#); -} - -#[test] -fn de_tuple() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - - let raw: &[u8] = b"@user:example.com\xFF!room:example.com"; - let (a, b): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); - - assert_eq!(a, user_id, "deserialized user_id does not match"); - assert_eq!(b, room_id, "deserialized room_id does not match"); -} - -#[test] -#[should_panic(expected = "failed to deserialize")] -fn de_tuple_invalid() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - - let raw: &[u8] = b"@user:example.com\xFF@user:example.com"; - let (a, b): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); - - assert_eq!(a, user_id, "deserialized user_id does not match"); - assert_eq!(b, room_id, "deserialized room_id does not match"); -} - -#[test] -#[should_panic(expected = "failed to deserialize")] -fn de_tuple_incomplete() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let raw: &[u8] = b"@user:example.com"; - let (a, _): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); - - assert_eq!(a, user_id, "deserialized user_id does not match"); -} - -#[test] -#[should_panic(expected = "failed to deserialize")] -fn de_tuple_incomplete_with_sep() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let raw: &[u8] = b"@user:example.com\xFF"; - let (a, _): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); - - assert_eq!(a, user_id, "deserialized user_id does not match"); -} - -#[test] -#[cfg_attr( - debug_assertions, - should_panic(expected = "deserialization failed to consume trailing bytes") -)] -fn de_tuple_unfinished() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - - let raw: &[u8] = b"@user:example.com\xFF!room:example.com\xFF@user:example.com"; - let (a, b): (&UserId, &RoomId) = de::from_slice(raw).expect("failed to deserialize"); - - assert_eq!(a, user_id, "deserialized user_id does not match"); - assert_eq!(b, room_id, "deserialized room_id does not match"); -} - -#[test] -fn de_tuple_ignore() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - - let raw: &[u8] = b"@user:example.com\xFF@user2:example.net\xFF!room:example.com"; - let (a, _, c): (&UserId, Ignore, &RoomId) = - de::from_slice(raw).expect("failed to deserialize"); - - assert_eq!(a, user_id, "deserialized user_id does not match"); - assert_eq!(c, room_id, "deserialized room_id does not match"); -} - -#[test] -fn de_json_array() { - let a = &["foo", "bar", "baz"]; - let s = serde_json::to_vec(a).expect("failed to serialize to JSON array"); - - let b: Raw>> = de::from_slice(&s).expect("failed to deserialize"); - - let d: Vec = - serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); - - for (i, a) in a.iter().enumerate() { - assert_eq!(*a, d[i]); - } -} - -#[test] -fn de_json_raw_array() { - let a = &["foo", "bar", "baz"]; - let s = serde_json::to_vec(a).expect("failed to serialize to JSON array"); - - let b: Raw>> = de::from_slice(&s).expect("failed to deserialize"); - - let c: Vec> = - serde_json::from_str(b.json().get()).expect("failed to deserialize JSON"); - - for (i, a) in a.iter().enumerate() { - let c = serde_json::to_value(c[i].json()).expect("failed to deserialize JSON to string"); - assert_eq!(*a, c); - } -} - -#[test] -fn ser_array() { - let a: u64 = 123_456; - let b: u64 = 987_654; - - let arr: &[u64] = &[a, b]; - let vec: Vec = vec![a, b]; - let arv: ArrayVec = [a, b].into(); - - let mut v = Vec::new(); - v.extend_from_slice(&a.to_be_bytes()); - v.extend_from_slice(&b.to_be_bytes()); - - let s = serialize_to_vec(arr).expect("failed to serialize"); - assert_eq!(&s, &v, "serialization does not match"); - - let s = serialize_to_vec(arv.as_slice()).expect("failed to serialize arrayvec"); - assert_eq!(&s, &v, "arrayvec serialization does not match"); - - let s = serialize_to_vec(&vec).expect("failed to serialize vec"); - assert_eq!(&s, &v, "vec serialization does not match"); -} - -#[test] -#[ignore] -fn de_array() { - let a: u64 = 123_456; - let b: u64 = 987_654; - - let mut v: Vec = Vec::new(); - v.extend_from_slice(&a.to_be_bytes()); - v.extend_from_slice(&b.to_be_bytes()); - - let arv: ArrayVec = de::from_slice::>(v.as_slice()) - .map(TryInto::try_into) - .expect("failed to deserialize to arrayvec") - .expect("failed to deserialize into"); - - assert_eq!(arv[0], a, "deserialized arv [0] does not match"); - assert_eq!(arv[1], b, "deserialized arv [1] does not match"); - - let arr: [u64; 2] = de::from_slice::<[u64; 2]>(v.as_slice()) - .map(TryInto::try_into) - .expect("failed to deserialize to array") - .expect("failed to deserialize into"); - - assert_eq!(arr[0], a, "deserialized arr [0] does not match"); - assert_eq!(arr[1], b, "deserialized arr [1] does not match"); - - let vec: Vec = de::from_slice(v.as_slice()).expect("failed to deserialize to vec"); - - assert_eq!(vec[0], a, "deserialized vec [0] does not match"); - assert_eq!(vec[1], b, "deserialized vec [1] does not match"); -} - -#[test] -#[ignore] -fn de_complex() { - type Key<'a> = (&'a UserId, ArrayVec, &'a RoomId); - - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - let a: u64 = 123_456; - let b: u64 = 987_654; - - let mut v = Vec::new(); - v.extend_from_slice(user_id.as_bytes()); - v.extend_from_slice(b"\xFF"); - v.extend_from_slice(&a.to_be_bytes()); - v.extend_from_slice(&b.to_be_bytes()); - v.extend_from_slice(b"\xFF"); - v.extend_from_slice(room_id.as_bytes()); - - let arr: &[u64] = &[a, b]; - let key = (user_id, arr, room_id); - let s = serialize_to_vec(&key).expect("failed to serialize"); - - assert_eq!(&s, &v, "serialization does not match"); - - let key = (user_id, [a, b].into(), room_id); - let arr: Key<'_> = de::from_slice(&v).expect("failed to deserialize"); - - assert_eq!(arr, key, "deserialization does not match"); - - let arr: Key<'_> = de::from_slice(&s).expect("failed to deserialize"); - - assert_eq!(arr, key, "deserialization of serialization does not match"); -} - -#[test] -fn serde_tuple_option_value_some() { - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let mut aa = Vec::::new(); - aa.extend_from_slice(room_id.as_bytes()); - aa.push(0xFF); - aa.extend_from_slice(user_id.as_bytes()); - - let bb: (&RoomId, Option<&UserId>) = (room_id, Some(user_id)); - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (&RoomId, Option<&UserId>) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(bb.1, cc.1); - assert_eq!(cc.0, bb.0); -} - -#[test] -fn serde_tuple_option_value_none() { - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - - let mut aa = Vec::::new(); - aa.extend_from_slice(room_id.as_bytes()); - aa.push(0xFF); - - let bb: (&RoomId, Option<&UserId>) = (room_id, None); - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (&RoomId, Option<&UserId>) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(None, cc.1); - assert_eq!(cc.0, bb.0); -} - -#[test] -fn serde_tuple_option_none_value() { - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let mut aa = Vec::::new(); - aa.push(0xFF); - aa.extend_from_slice(user_id.as_bytes()); - - let bb: (Option<&RoomId>, &UserId) = (None, user_id); - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (Option<&RoomId>, &UserId) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(None, cc.0); - assert_eq!(cc.1, bb.1); -} - -#[test] -fn serde_tuple_option_some_value() { - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let mut aa = Vec::::new(); - aa.extend_from_slice(room_id.as_bytes()); - aa.push(0xFF); - aa.extend_from_slice(user_id.as_bytes()); - - let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (Option<&RoomId>, &UserId) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(bb.0, cc.0); - assert_eq!(cc.1, bb.1); -} - -#[test] -fn serde_tuple_option_some_some() { - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let mut aa = Vec::::new(); - aa.extend_from_slice(room_id.as_bytes()); - aa.push(0xFF); - aa.extend_from_slice(user_id.as_bytes()); - - let bb: (Option<&RoomId>, Option<&UserId>) = (Some(room_id), Some(user_id)); - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (Option<&RoomId>, Option<&UserId>) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(cc.0, bb.0); - assert_eq!(bb.1, cc.1); -} - -#[test] -fn serde_tuple_option_none_none() { - let aa = vec![0xFF]; - - let bb: (Option<&RoomId>, Option<&UserId>) = (None, None); - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (Option<&RoomId>, Option<&UserId>) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(cc.0, bb.0); - assert_eq!(None, cc.1); -} - -#[test] -fn serde_tuple_option_some_none_some() { - let room_id: &RoomId = "!room:example.com".try_into().unwrap(); - let user_id: &UserId = "@user:example.com".try_into().unwrap(); - - let mut aa = Vec::::new(); - aa.extend_from_slice(room_id.as_bytes()); - aa.push(0xFF); - aa.push(0xFF); - aa.extend_from_slice(user_id.as_bytes()); - - let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = - (Some(room_id), None, Some(user_id)); - - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(bb.0, cc.0); - assert_eq!(None, cc.1); - assert_eq!(bb.1, cc.1); - assert_eq!(bb.2, cc.2); -} - -#[test] -fn serde_tuple_option_none_none_none() { - let aa = vec![0xFF, 0xFF]; - - let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = (None, None, None); - let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); - assert_eq!(aa, bbs); - - let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = - de::from_slice(&bbs).expect("failed to deserialize tuple"); - - assert_eq!(None, cc.0); - assert_eq!(bb, cc); -} diff --git a/src/database/util.rs b/src/database/util.rs deleted file mode 100644 index caef94f1..00000000 --- a/src/database/util.rs +++ /dev/null @@ -1,78 +0,0 @@ -use conduwuit::Result; -use rocksdb::{Direction, ErrorKind, IteratorMode}; - -//#[cfg(debug_assertions)] -macro_rules! unhandled { - ($msg:literal) => { - unimplemented!($msg) - }; -} - -// activate when stable; we're not ready for this yet -#[cfg(disable)] // #[cfg(not(debug_assertions))] -macro_rules! unhandled { - ($msg:literal) => { - // SAFETY: Eliminates branches for serializing and deserializing types never - // encountered in the codebase. This can promote optimization and reduce - // codegen. The developer must verify for every invoking callsite that the - // unhandled type is in no way involved and could not possibly be encountered. - unsafe { - std::hint::unreachable_unchecked(); - } - }; -} - -pub(crate) use unhandled; - -#[inline] -pub(crate) fn _into_direction(mode: &IteratorMode<'_>) -> Direction { - use Direction::{Forward, Reverse}; - use IteratorMode::{End, From, Start}; - - match mode { - | Start | From(_, Forward) => Forward, - | End | From(_, Reverse) => Reverse, - } -} - -#[inline] -pub(crate) fn result( - r: std::result::Result, -) -> Result { - r.map_or_else(or_else, and_then) -} - -#[inline(always)] -pub(crate) fn and_then(t: T) -> Result { Ok(t) } - -pub(crate) fn or_else(e: rocksdb::Error) -> Result { Err(map_err(e)) } - -#[inline] -pub(crate) fn is_incomplete(e: &rocksdb::Error) -> bool { e.kind() == ErrorKind::Incomplete } - -pub(crate) fn map_err(e: rocksdb::Error) -> conduwuit::Error { - let kind = io_error_kind(&e.kind()); - let string = e.into_string(); - - std::io::Error::new(kind, string).into() -} - -fn io_error_kind(e: &ErrorKind) -> std::io::ErrorKind { - use std::io; - - match e { - | ErrorKind::NotFound => io::ErrorKind::NotFound, - | ErrorKind::Corruption => io::ErrorKind::InvalidData, - | ErrorKind::InvalidArgument => io::ErrorKind::InvalidInput, - | ErrorKind::Aborted => io::ErrorKind::Interrupted, - | ErrorKind::NotSupported => io::ErrorKind::Unsupported, - | ErrorKind::CompactionTooLarge => io::ErrorKind::FileTooLarge, - | ErrorKind::MergeInProgress | ErrorKind::Busy => io::ErrorKind::ResourceBusy, - | ErrorKind::Expired | ErrorKind::TimedOut => io::ErrorKind::TimedOut, - | ErrorKind::Incomplete | ErrorKind::TryAgain => io::ErrorKind::WouldBlock, - | ErrorKind::ColumnFamilyDropped - | ErrorKind::ShutdownInProgress - | ErrorKind::IOError - | ErrorKind::Unknown => io::ErrorKind::Other, - } -} diff --git a/src/database/watchers.rs b/src/database/watchers.rs index b3907833..9707e64b 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, hash_map}, + collections::{hash_map, HashMap}, future::Future, pin::Pin, sync::RwLock, @@ -10,18 +10,15 @@ use tokio::sync::watch; type Watcher = RwLock, (watch::Sender<()>, watch::Receiver<()>)>>; #[derive(Default)] -pub(crate) struct Watchers { +pub(super) struct Watchers { watchers: Watcher, } impl Watchers { - pub(crate) fn watch<'a>( - &'a self, - prefix: &[u8], - ) -> Pin + Send + 'a>> { + pub(super) fn watch<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { - | hash_map::Entry::Occupied(o) => o.get().1.clone(), - | hash_map::Entry::Vacant(v) => { + hash_map::Entry::Occupied(o) => o.get().1.clone(), + hash_map::Entry::Vacant(v) => { let (tx, rx) = watch::channel(()); v.insert((tx, rx.clone())); rx @@ -34,7 +31,7 @@ impl Watchers { }) } - pub(crate) fn wake(&self, key: &[u8]) { + pub(super) fn wake(&self, key: &[u8]) { let watchers = self.watchers.read().unwrap(); let mut triggered = Vec::new(); @@ -50,9 +47,9 @@ impl Watchers { let mut watchers = self.watchers.write().unwrap(); for prefix in triggered { if let Some(tx) = watchers.remove(prefix) { - tx.0.send(()).expect("channel should still be open"); + _ = tx.0.send(()); } } - } + }; } } diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 00000000..e287f1cb --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,27 @@ +pub mod api; +pub mod clap; +mod config; +mod database; +mod service; +mod utils; + +// Not async due to services() being used in many closures, and async closures +// are not stable as of writing This is the case for every other occurence of +// sync Mutex/RwLock, except for database related ones, where the current +// maintainer (Timo) has asked to not modify those +use std::sync::RwLock; + +pub use api::ruma_wrapper::{Ruma, RumaResponse}; +pub use config::Config; +pub use database::KeyValueDatabase; +pub use service::{pdu::PduEvent, Services}; +pub use utils::error::{Error, Result}; + +pub static SERVICES: RwLock>> = RwLock::new(None); + +pub fn services() -> &'static Services<'static> { + SERVICES + .read() + .unwrap() + .expect("SERVICES should be initialized when this is called") +} diff --git a/src/macros/Cargo.toml b/src/macros/Cargo.toml deleted file mode 100644 index 167de8c0..00000000 --- a/src/macros/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "conduwuit_macros" -categories.workspace = true -description.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version.workspace = true - -[lib] -name = "conduwuit_macros" -path = "mod.rs" -proc-macro = true - -[dependencies] -syn.workspace = true -quote.workspace = true -proc-macro2.workspace = true -itertools.workspace = true - -[lints] -workspace = true diff --git a/src/macros/admin.rs b/src/macros/admin.rs deleted file mode 100644 index fe227b43..00000000 --- a/src/macros/admin.rs +++ /dev/null @@ -1,75 +0,0 @@ -use itertools::Itertools; -use proc_macro::{Span, TokenStream}; -use proc_macro2::TokenStream as TokenStream2; -use quote::{ToTokens, quote}; -use syn::{Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant, parse_quote}; - -use crate::{Result, utils::camel_to_snake_string}; - -pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { - let attr: Attribute = parse_quote! { - #[conduwuit_macros::implement(crate::Context, params = "<'_>")] - }; - - item.attrs.push(attr); - Ok(item.into_token_stream().into()) -} - -pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result { - let name = &item.ident; - let arm: Vec = item.variants.iter().map(dispatch_arm).try_collect()?; - let switch = quote! { - #[allow(clippy::large_stack_frames)] //TODO: fixme - pub(super) async fn process( - command: #name, - context: &crate::Context<'_> - ) -> Result { - use #name::*; - #[allow(non_snake_case)] - match command { - #( #arm )* - } - } - }; - - Ok([item.into_token_stream(), switch] - .into_iter() - .collect::() - .into()) -} - -fn dispatch_arm(v: &Variant) -> Result { - let name = &v.ident; - let target = camel_to_snake_string(&format!("{name}")); - let handler = Ident::new(&target, Span::call_site().into()); - let res = match &v.fields { - | Fields::Named(fields) => { - let field = fields.named.iter().filter_map(|f| f.ident.as_ref()); - let arg = field.clone(); - quote! { - #name { #( #field ),* } => { - Box::pin(context.#handler(#( #arg ),*)).await - }, - } - }, - | Fields::Unnamed(fields) => { - let Some(ref field) = fields.unnamed.first() else { - return Err(Error::new(Span::call_site().into(), "One unnamed field required")); - }; - quote! { - #name ( #field ) => { - Box::pin(#handler::process(#field, context)).await - } - } - }, - | Fields::Unit => { - quote! { - #name => { - Box::pin(context.#handler()).await - }, - } - }, - }; - - Ok(res) -} diff --git a/src/macros/cargo.rs b/src/macros/cargo.rs deleted file mode 100644 index a452c672..00000000 --- a/src/macros/cargo.rs +++ /dev/null @@ -1,47 +0,0 @@ -use std::{fs::read_to_string, path::PathBuf}; - -use proc_macro::{Span, TokenStream}; -use quote::quote; -use syn::{Error, ItemConst, Meta}; - -use crate::{Result, utils}; - -pub(super) fn manifest(item: ItemConst, args: &[Meta]) -> Result { - let member = utils::get_named_string(args, "crate"); - let path = manifest_path(member.as_deref())?; - let manifest = read_to_string(&path).unwrap_or_default(); - let val = manifest.as_str(); - let name = item.ident; - let ret = quote! { - const #name: &'static str = #val; - }; - - Ok(ret.into()) -} - -#[allow(clippy::option_env_unwrap)] -fn manifest_path(member: Option<&str>) -> Result { - let Some(path) = option_env!("CARGO_MANIFEST_DIR") else { - return Err(Error::new( - Span::call_site().into(), - "missing CARGO_MANIFEST_DIR in environment", - )); - }; - - let mut path: PathBuf = path.into(); - - // conduwuit/src/macros/ -> conduwuit/src/ - path.pop(); - - if let Some(member) = member { - // conduwuit/$member/Cargo.toml - path.push(member); - } else { - // conduwuit/src/ -> conduwuit/ - path.pop(); - } - - path.push("Cargo.toml"); - - Ok(path) -} diff --git a/src/macros/config.rs b/src/macros/config.rs deleted file mode 100644 index 7b424325..00000000 --- a/src/macros/config.rs +++ /dev/null @@ -1,282 +0,0 @@ -use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _}; - -use proc_macro::TokenStream; -use proc_macro2::{Span, TokenStream as TokenStream2}; -use quote::{ToTokens, quote}; -use syn::{ - Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaList, - MetaNameValue, Type, TypePath, parse::Parser, punctuated::Punctuated, spanned::Spanned, -}; - -use crate::{ - Result, - utils::{get_simple_settings, is_cargo_build, is_cargo_test}, -}; - -const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; - -const HIDDEN: &[&str] = &["default", "display"]; - -#[allow(clippy::needless_pass_by_value)] -pub(super) fn example_generator(input: ItemStruct, args: &[Meta]) -> Result { - let write = is_cargo_build() && !is_cargo_test(); - let additional = generate_example(&input, args, write)?; - - Ok([input.to_token_stream(), additional] - .into_iter() - .collect::() - .into()) -} - -#[allow(clippy::needless_pass_by_value)] -#[allow(unused_variables)] -fn generate_example(input: &ItemStruct, args: &[Meta], write: bool) -> Result { - let settings = get_simple_settings(args); - - let section = settings.get("section").ok_or_else(|| { - Error::new(args[0].span(), "missing required 'section' attribute argument") - })?; - - let filename = settings.get("filename").ok_or_else(|| { - Error::new(args[0].span(), "missing required 'filename' attribute argument") - })?; - - let undocumented = settings - .get("undocumented") - .map_or(UNDOCUMENTED, String::as_str); - - let ignore: HashSet<&str> = settings - .get("ignore") - .map_or("", String::as_str) - .split(' ') - .collect(); - - let fopts = OpenOptions::new() - .write(true) - .create(section == "global") - .truncate(section == "global") - .append(section != "global") - .clone(); - - let mut file = write - .then(|| { - fopts.open(filename).map_err(|e| { - let msg = format!("Failed to open file for config generation: {e}"); - Error::new(Span::call_site(), msg) - }) - }) - .transpose()?; - - if let Some(file) = file.as_mut() { - if let Some(header) = settings.get("header") { - file.write_all(header.as_bytes()) - .expect("written to config file"); - } - - file.write_fmt(format_args!("\n[{section}]\n")) - .expect("written to config file"); - } - - let mut summary: Vec = Vec::new(); - if let Fields::Named(FieldsNamed { named, .. }) = &input.fields { - for field in named { - let Some(ident) = &field.ident else { - continue; - }; - - if ignore.contains(ident.to_string().as_str()) { - continue; - } - - let Some(type_name) = get_type_name(field) else { - continue; - }; - - let doc = get_doc_comment(field) - .unwrap_or_else(|| undocumented.into()) - .trim_end() - .to_owned(); - - let doc = if doc.ends_with('#') { - format!("{doc}\n") - } else { - format!("{doc}\n#\n") - }; - - let default = get_doc_comment_line(field, "default") - .or_else(|| get_default(field)) - .unwrap_or_default(); - - let default = if !default.is_empty() { - format!(" {default}") - } else { - default - }; - - if let Some(file) = file.as_mut() { - file.write_fmt(format_args!("\n{doc}")) - .expect("written to config file"); - - file.write_fmt(format_args!("#{ident} ={default}\n")) - .expect("written to config file"); - } - - let display = get_doc_comment_line(field, "display"); - let display_directive = |key| { - display - .as_ref() - .into_iter() - .flat_map(|display| display.split(' ')) - .any(|directive| directive == key) - }; - - if !display_directive("hidden") { - let value = if display_directive("sensitive") { - quote! { "***********" } - } else { - quote! { format_args!("{:?}", self.#ident) } - }; - - let name = ident.to_string(); - summary.push(quote! { - writeln!(out, "| {} | {} |", #name, #value)?; - }); - } - } - } - - if let Some(file) = file.as_mut() { - if let Some(footer) = settings.get("footer") { - file.write_all(footer.as_bytes()) - .expect("written to config file"); - } - } - - let struct_name = &input.ident; - let display = quote! { - impl std::fmt::Display for #struct_name { - fn fmt(&self, out: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(out, "| name | value |")?; - writeln!(out, "| :--- | :--- |")?; - #( #summary )* - Ok(()) - } - } - }; - - Ok(display) -} - -fn get_default(field: &Field) -> Option { - for attr in &field.attrs { - let Meta::List(MetaList { path, tokens, .. }) = &attr.meta else { - continue; - }; - - if path - .segments - .iter() - .next() - .is_none_or(|s| s.ident != "serde") - { - continue; - } - - let Some(arg) = Punctuated::::parse_terminated - .parse(tokens.clone().into()) - .ok()? - .iter() - .next() - .cloned() - else { - continue; - }; - - match arg { - | Meta::NameValue(MetaNameValue { - value: Expr::Lit(ExprLit { lit: Lit::Str(str), .. }), - .. - }) => { - match str.value().as_str() { - | "HashSet::new" | "Vec::new" | "RegexSet::empty" => Some("[]".to_owned()), - | "true_fn" => return Some("true".to_owned()), - | _ => return None, - }; - }, - | Meta::Path { .. } => return Some("false".to_owned()), - | _ => return None, - } - } - - None -} - -fn get_doc_comment(field: &Field) -> Option { - let comment = get_doc_comment_full(field)?; - - let out = comment - .lines() - .filter(|line| { - !HIDDEN.iter().any(|key| { - line.trim().starts_with(key) && line.trim().chars().nth(key.len()) == Some(':') - }) - }) - .fold(String::new(), |full, line| full + "#" + line + "\n"); - - (!out.is_empty()).then_some(out) -} - -fn get_doc_comment_line(field: &Field, label: &str) -> Option { - let comment = get_doc_comment_full(field)?; - - comment - .lines() - .map(str::trim) - .filter(|line| line.starts_with(label)) - .filter(|line| line.chars().nth(label.len()) == Some(':')) - .map(|line| { - line.split_once(':') - .map(|(_, v)| v) - .map(str::trim) - .map(ToOwned::to_owned) - }) - .next() - .flatten() -} - -fn get_doc_comment_full(field: &Field) -> Option { - let mut out = String::new(); - for attr in &field.attrs { - let Meta::NameValue(MetaNameValue { path, value, .. }) = &attr.meta else { - continue; - }; - - if path.segments.iter().next().is_none_or(|s| s.ident != "doc") { - continue; - } - - let Expr::Lit(ExprLit { lit, .. }) = &value else { - continue; - }; - - let Lit::Str(token) = &lit else { - continue; - }; - - let value = token.value(); - writeln!(&mut out, "{value}").expect("wrote to output string buffer"); - } - - (!out.is_empty()).then_some(out) -} - -fn get_type_name(field: &Field) -> Option { - let Type::Path(TypePath { path, .. }) = &field.ty else { - return None; - }; - - path.segments - .iter() - .next() - .map(|segment| segment.ident.to_string()) -} diff --git a/src/macros/debug.rs b/src/macros/debug.rs deleted file mode 100644 index e83fd44e..00000000 --- a/src/macros/debug.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::cmp; - -use proc_macro::TokenStream; -use quote::ToTokens; -use syn::{Item, Meta}; - -use crate::Result; - -pub(super) fn recursion_depth(item: Item, _args: &[Meta]) -> Result { - let mut best: usize = 0; - let mut count: usize = 0; - // think you'd find a fancy recursive ast visitor? think again - let tree = format!("{item:#?}"); - for line in tree.lines() { - let trim = line.trim_start_matches(' '); - let diff = line.len().saturating_sub(trim.len()); - let level = diff / 4; - best = cmp::max(level, best); - count = count.saturating_add(1); - } - - println!("--- Recursion Diagnostic ---"); - println!("DEPTH: {best}"); - println!("LENGTH: {count}"); - - Ok(item.into_token_stream().into()) -} diff --git a/src/macros/implement.rs b/src/macros/implement.rs deleted file mode 100644 index 7acc12d2..00000000 --- a/src/macros/implement.rs +++ /dev/null @@ -1,35 +0,0 @@ -use proc_macro::{Span, TokenStream}; -use quote::quote; -use syn::{Error, ItemFn, Meta, Path}; -use utils::get_named_generics; - -use crate::{Result, utils}; - -pub(super) fn implement(item: ItemFn, args: &[Meta]) -> Result { - let generics = get_named_generics(args, "generics")?; - let receiver = get_receiver(args)?; - let params = get_named_generics(args, "params")?; - let input = item; - let out = quote! { - impl #generics #receiver #params { - #input - } - }; - - Ok(out.into()) -} - -fn get_receiver(args: &[Meta]) -> Result { - let receiver = &args.first().ok_or_else(|| { - Error::new(Span::call_site().into(), "Missing required argument to receiver") - })?; - - let Meta::Path(receiver) = receiver else { - return Err(Error::new( - Span::call_site().into(), - "First argument is not path to receiver", - )); - }; - - Ok(receiver.clone()) -} diff --git a/src/macros/mod.rs b/src/macros/mod.rs deleted file mode 100644 index 31a797fe..00000000 --- a/src/macros/mod.rs +++ /dev/null @@ -1,68 +0,0 @@ -mod admin; -mod cargo; -mod config; -mod debug; -mod implement; -mod refutable; -mod rustc; -mod utils; - -use proc_macro::TokenStream; -use syn::{ - Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, - parse::{Parse, Parser}, - parse_macro_input, -}; - -pub(crate) type Result = std::result::Result; - -#[proc_macro_attribute] -pub fn admin_command(args: TokenStream, input: TokenStream) -> TokenStream { - attribute_macro::(args, input, admin::command) -} - -#[proc_macro_attribute] -pub fn admin_command_dispatch(args: TokenStream, input: TokenStream) -> TokenStream { - attribute_macro::(args, input, admin::command_dispatch) -} - -#[proc_macro_attribute] -pub fn cargo_manifest(args: TokenStream, input: TokenStream) -> TokenStream { - attribute_macro::(args, input, cargo::manifest) -} - -#[proc_macro_attribute] -pub fn recursion_depth(args: TokenStream, input: TokenStream) -> TokenStream { - attribute_macro::(args, input, debug::recursion_depth) -} - -#[proc_macro] -pub fn rustc_flags_capture(args: TokenStream) -> TokenStream { rustc::flags_capture(args) } - -#[proc_macro_attribute] -pub fn refutable(args: TokenStream, input: TokenStream) -> TokenStream { - attribute_macro::(args, input, refutable::refutable) -} - -#[proc_macro_attribute] -pub fn implement(args: TokenStream, input: TokenStream) -> TokenStream { - attribute_macro::(args, input, implement::implement) -} - -#[proc_macro_attribute] -pub fn config_example_generator(args: TokenStream, input: TokenStream) -> TokenStream { - attribute_macro::(args, input, config::example_generator) -} - -fn attribute_macro(args: TokenStream, input: TokenStream, func: F) -> TokenStream -where - F: Fn(I, &[Meta]) -> Result, - I: Parse, -{ - let item = parse_macro_input!(input as I); - syn::punctuated::Punctuated::::parse_terminated - .parse(args) - .map(|args| args.iter().cloned().collect::>()) - .and_then(|ref args| func(item, args)) - .unwrap_or_else(|e| e.to_compile_error().into()) -} diff --git a/src/macros/refutable.rs b/src/macros/refutable.rs deleted file mode 100644 index acfc4cd5..00000000 --- a/src/macros/refutable.rs +++ /dev/null @@ -1,45 +0,0 @@ -use proc_macro::{Span, TokenStream}; -use quote::{ToTokens, quote}; -use syn::{FnArg::Typed, Ident, ItemFn, Meta, Pat, PatIdent, PatType, Stmt}; - -use crate::Result; - -pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result { - let inputs = item.sig.inputs.clone(); - let stmt = &mut item.block.stmts; - let sig = &mut item.sig; - for (i, input) in inputs.iter().enumerate() { - let Typed(PatType { pat, .. }) = input else { - continue; - }; - - let Pat::Struct(ref pat) = **pat else { - continue; - }; - - let variant = &pat.path; - let fields = &pat.fields; - - let Some(Typed(PatType { pat, .. })) = sig.inputs.get_mut(i) else { - continue; - }; - - let name = format!("_args_{i}"); - *pat = Box::new(Pat::Ident(PatIdent { - ident: Ident::new(&name, Span::call_site().into()), - attrs: Vec::new(), - by_ref: None, - mutability: None, - subpat: None, - })); - - let field = fields.iter(); - let refute = quote! { - let #variant { #( #field ),*, .. } = #name else { panic!("incorrect variant passed to function argument {i}"); }; - }; - - stmt.insert(0, syn::parse2::(refute)?); - } - - Ok(item.into_token_stream().into()) -} diff --git a/src/macros/rustc.rs b/src/macros/rustc.rs deleted file mode 100644 index 1220c8d4..00000000 --- a/src/macros/rustc.rs +++ /dev/null @@ -1,29 +0,0 @@ -use proc_macro::TokenStream; -use quote::quote; - -pub(super) fn flags_capture(args: TokenStream) -> TokenStream { - let cargo_crate_name = std::env::var("CARGO_CRATE_NAME"); - let crate_name = match cargo_crate_name.as_ref() { - | Err(_) => return args, - | Ok(crate_name) => crate_name.trim_start_matches("conduwuit_"), - }; - - let flag = std::env::args().collect::>(); - let flag_len = flag.len(); - let ret = quote! { - pub static RUSTC_FLAGS: [&str; #flag_len] = [#( #flag ),*]; - - #[conduwuit_core::ctor] - fn _set_rustc_flags() { - conduwuit_core::info::rustc::FLAGS.lock().expect("locked").insert(#crate_name, &RUSTC_FLAGS); - } - - // static strings have to be yanked on module unload - #[conduwuit_core::dtor] - fn _unset_rustc_flags() { - conduwuit_core::info::rustc::FLAGS.lock().expect("locked").remove(#crate_name); - } - }; - - ret.into() -} diff --git a/src/macros/utils.rs b/src/macros/utils.rs deleted file mode 100644 index a45e5ecc..00000000 --- a/src/macros/utils.rs +++ /dev/null @@ -1,87 +0,0 @@ -use std::collections::HashMap; - -use syn::{Expr, ExprLit, Generics, Lit, Meta, MetaNameValue, parse_str}; - -use crate::Result; - -pub(crate) fn get_simple_settings(args: &[Meta]) -> HashMap { - args.iter().fold(HashMap::new(), |mut map, arg| { - let Meta::NameValue(MetaNameValue { path, value, .. }) = arg else { - return map; - }; - - let Expr::Lit(ExprLit { lit: Lit::Str(str), .. }, ..) = value else { - return map; - }; - - if let Some(key) = path.segments.iter().next().map(|s| s.ident.clone()) { - map.insert(key.to_string(), str.value()); - } - - map - }) -} - -pub(crate) fn is_cargo_build() -> bool { - legacy_is_cargo_build() - || std::env::args() - .skip_while(|flag| !flag.starts_with("--emit")) - .nth(1) - .iter() - .flat_map(|flag| flag.split(',')) - .any(|elem| elem == "link") -} - -pub(crate) fn legacy_is_cargo_build() -> bool { - std::env::args() - .find(|flag| flag.starts_with("--emit")) - .as_ref() - .and_then(|flag| flag.split_once('=')) - .map(|val| val.1.split(',')) - .and_then(|mut vals| vals.find(|elem| *elem == "link")) - .is_some() -} - -pub(crate) fn is_cargo_test() -> bool { std::env::args().any(|flag| flag == "--test") } - -pub(crate) fn get_named_generics(args: &[Meta], name: &str) -> Result { - const DEFAULT: &str = "<>"; - - parse_str::(&get_named_string(args, name).unwrap_or_else(|| DEFAULT.to_owned())) -} - -pub(crate) fn get_named_string(args: &[Meta], name: &str) -> Option { - args.iter().find_map(|arg| { - let value = arg.require_name_value().ok()?; - let Expr::Lit(ref lit) = value.value else { - return None; - }; - let Lit::Str(ref str) = lit.lit else { - return None; - }; - value.path.is_ident(name).then_some(str.value()) - }) -} - -#[must_use] -pub(crate) fn camel_to_snake_string(s: &str) -> String { - let mut output = String::with_capacity( - s.chars() - .fold(s.len(), |a, ch| a.saturating_add(usize::from(ch.is_ascii_uppercase()))), - ); - - let mut state = false; - s.chars().for_each(|ch| { - let m = ch.is_ascii_uppercase(); - let s = exchange(&mut state, !m); - if m && s { - output.push('_'); - } - output.push(ch.to_ascii_lowercase()); - }); - - output -} - -#[inline] -pub(crate) fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, source) } diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 00000000..08af119c --- /dev/null +++ b/src/main.rs @@ -0,0 +1,604 @@ +#[cfg(unix)] +use std::fs::Permissions; // not unix specific, just only for UNIX sockets stuff and *nix container checks +#[cfg(unix)] +use std::os::unix::fs::PermissionsExt as _; /* not unix specific, just only for UNIX sockets stuff and *nix + * container checks */ +use std::{io, net::SocketAddr, sync::atomic, time::Duration}; + +use axum::{ + extract::{DefaultBodyLimit, MatchedPath}, + response::IntoResponse, + Router, +}; +use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; +#[cfg(feature = "axum_dual_protocol")] +use axum_server_dual_protocol::ServerExt; +pub use conduit::*; // Re-export everything from the library crate +use http::{ + header::{self, HeaderName}, + Method, StatusCode, +}; +#[cfg(unix)] +use hyperlocal::SocketIncoming; +use ruma::api::client::{ + error::{Error as RumaError, ErrorBody, ErrorKind}, + uiaa::UiaaResponse, +}; +use tokio::{ + signal, + sync::oneshot::{self, Sender}, + task::JoinSet, +}; +use tower::ServiceBuilder; +use tower_http::{ + cors::{self, CorsLayer}, + trace::{DefaultOnFailure, TraceLayer}, + ServiceBuilderExt as _, +}; +use tracing::{debug, error, info, warn, Level}; +use tracing_subscriber::{prelude::*, reload, EnvFilter, Registry}; + +mod routes; + +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc", not(feature = "hardened_malloc")))] +#[global_allocator] +static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +#[cfg(all( + not(target_env = "msvc"), + not(target_os = "macos"), + feature = "hardened_malloc", + target_os = "linux", + not(feature = "jemalloc") +))] +#[global_allocator] +static GLOBAL: hardened_malloc_rs::HardenedMalloc = hardened_malloc_rs::HardenedMalloc; + +struct Server { + config: Config, + + runtime: tokio::runtime::Runtime, + + tracing_reload_handle: reload::Handle, + + #[cfg(feature = "sentry_telemetry")] + _sentry_guard: Option, +} + +fn main() -> Result<(), Error> { + let args = clap::parse(); + let conduwuit: Server = init(args)?; + + conduwuit + .runtime + .block_on(async { async_main(&conduwuit).await }) +} + +async fn async_main(server: &Server) -> Result<(), Error> { + if let Err(error) = start(server).await { + error!("Critical error starting server: {error}"); + return Err(Error::Error(format!("{error}"))); + } + + if let Err(error) = run(server).await { + error!("Critical error running server: {error}"); + return Err(Error::Error(format!("{error}"))); + }; + + if let Err(error) = stop(server).await { + error!("Critical error stopping server: {error}"); + return Err(Error::Error(format!("{error}"))); + } + + Ok(()) +} + +async fn run(server: &Server) -> io::Result<()> { + let app = build(server).await?; + let (tx, rx) = oneshot::channel::<()>(); + let handle = ServerHandle::new(); + tokio::spawn(shutdown(handle.clone(), tx)); + + #[cfg(unix)] + if server.config.unix_socket_path.is_some() { + return run_unix_socket_server(server, app, rx).await; + } + + let addrs = server.config.get_bind_addrs(); + if server.config.tls.is_some() { + return run_tls_server(server, app, handle, addrs).await; + } + + let mut join_set = JoinSet::new(); + for addr in &addrs { + join_set.spawn(bind(*addr).handle(handle.clone()).serve(app.clone())); + } + + #[allow(clippy::let_underscore_untyped)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); + + info!("Listening on {:?}", addrs); + join_set.join_next().await; + + Ok(()) +} + +async fn run_tls_server( + server: &Server, app: axum::routing::IntoMakeService, handle: ServerHandle, addrs: Vec, +) -> io::Result<()> { + let tls = server.config.tls.as_ref().unwrap(); + + debug!( + "Using direct TLS. Certificate path {} and certificate private key path {}", + &tls.certs, &tls.key + ); + info!( + "Note: It is strongly recommended that you use a reverse proxy instead of running conduwuit directly with TLS." + ); + let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; + + if cfg!(feature = "axum_dual_protocol") { + info!( + "conduwuit was built with axum_dual_protocol feature to listen on both HTTP and HTTPS. This will only \ + take affect if `dual_protocol` is enabled in `[global.tls]`" + ); + } + + let mut join_set = JoinSet::new(); + + if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol { + #[cfg(feature = "axum_dual_protocol")] + for addr in &addrs { + join_set.spawn( + axum_server_dual_protocol::bind_dual_protocol(*addr, conf.clone()) + .set_upgrade(false) + .handle(handle.clone()) + .serve(app.clone()), + ); + } + } else { + for addr in &addrs { + join_set.spawn( + bind_rustls(*addr, conf.clone()) + .handle(handle.clone()) + .serve(app.clone()), + ); + } + } + + #[allow(clippy::let_underscore_untyped)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); + + if cfg!(feature = "axum_dual_protocol") && tls.dual_protocol { + warn!( + "Listening on {:?} with TLS certificate {} and supporting plain text (HTTP) connections too (insecure!)", + addrs, &tls.certs + ); + } else { + info!("Listening on {:?} with TLS certificate {}", addrs, &tls.certs); + } + + join_set.join_next().await; + + Ok(()) +} + +#[cfg(unix)] +async fn run_unix_socket_server( + server: &Server, app: axum::routing::IntoMakeService, rx: oneshot::Receiver<()>, +) -> io::Result<()> { + let path = server.config.unix_socket_path.as_ref().unwrap(); + + if path.exists() { + warn!( + "UNIX socket path {:#?} already exists (unclean shutdown?), attempting to remove it.", + path.display() + ); + tokio::fs::remove_file(&path).await?; + } + + tokio::fs::create_dir_all(path.parent().unwrap()).await?; + + let socket_perms = server.config.unix_socket_perms.to_string(); + let octal_perms = u32::from_str_radix(&socket_perms, 8).unwrap(); + + let listener = tokio::net::UnixListener::bind(path.clone())?; + tokio::fs::set_permissions(path, Permissions::from_mode(octal_perms)) + .await + .unwrap(); + let socket = SocketIncoming::from_listener(listener); + + #[allow(clippy::let_underscore_untyped)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); + info!("Listening at {:?}", path); + let server = hyper::Server::builder(socket).serve(app); + let graceful = server.with_graceful_shutdown(async { + rx.await.ok(); + }); + + if let Err(e) = graceful.await { + error!("Server error: {:?}", e); + } + + Ok(()) +} + +async fn shutdown(handle: ServerHandle, tx: Sender<()>) -> Result<()> { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install SIGTERM handler") + .recv() + .await; + }; + + let sig: &str; + #[cfg(unix)] + tokio::select! { + () = ctrl_c => { sig = "Ctrl+C"; }, + () = terminate => { sig = "SIGTERM"; }, + } + #[cfg(not(unix))] + tokio::select! { + _ = ctrl_c => { sig = "Ctrl+C"; }, + } + + warn!("Received {}, shutting down...", sig); + handle.graceful_shutdown(Some(Duration::from_secs(180))); + services().globals.shutdown(); + + #[allow(clippy::let_underscore_untyped)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "systemd")] + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); + + tx.send(()).expect( + "failed sending shutdown transaction to oneshot channel (this is unlikely a conduwuit bug and more so your \ + system may not be in an okay/ideal state.)", + ); + + Ok(()) +} + +async fn stop(_server: &Server) -> io::Result<()> { + info!("Shutdown complete."); + + Ok(()) +} + +/// Async initializations +async fn start(server: &Server) -> Result<(), Error> { + KeyValueDatabase::load_or_create(server.config.clone(), server.tracing_reload_handle.clone()).await?; + + Ok(()) +} + +async fn build(server: &Server) -> io::Result> { + let base_middlewares = ServiceBuilder::new(); + #[cfg(feature = "sentry_telemetry")] + let base_middlewares = base_middlewares.layer(sentry_tower::NewSentryLayer::>::new_from_top()); + + let x_forwarded_for = HeaderName::from_static("x-forwarded-for"); + let middlewares = base_middlewares + .sensitive_headers([header::AUTHORIZATION]) + .sensitive_request_headers([x_forwarded_for].into()) + .layer(axum::middleware::from_fn(request_spawn)) + .layer( + TraceLayer::new_for_http() + .make_span_with(tracing_span::<_>) + .on_failure(DefaultOnFailure::new().level(Level::INFO)), + ) + .layer(axum::middleware::from_fn(request_handler)) + .layer(cors_layer(server)) + .layer(DefaultBodyLimit::max( + server + .config + .max_request_size + .try_into() + .expect("failed to convert max request size"), + )); + + #[cfg(any(feature = "zstd_compression", feature = "gzip_compression", feature = "brotli_compression"))] + { + Ok(routes::routes(&server.config) + .layer(compression_layer(server)) + .layer(middlewares) + .into_make_service()) + } + #[cfg(not(any(feature = "zstd_compression", feature = "gzip_compression", feature = "brotli_compression")))] + { + Ok(routes::routes().layer(middlewares).into_make_service()) + } +} + +async fn request_spawn( + req: http::Request, next: axum::middleware::Next, +) -> Result { + if services().globals.shutdown.load(atomic::Ordering::Relaxed) { + return Err(StatusCode::SERVICE_UNAVAILABLE); + } + tokio::spawn(next.run(req)) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) +} + +async fn request_handler( + req: http::Request, next: axum::middleware::Next, +) -> Result { + let method = req.method().clone(); + let uri = req.uri().clone(); + let inner = next.run(req).await; + if inner.status() == StatusCode::METHOD_NOT_ALLOWED { + if uri.path().contains("_matrix/") { + warn!("Method not allowed: {method} {uri}"); + } else { + info!("Method not allowed: {method} {uri}"); + } + return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError { + body: ErrorBody::Standard { + kind: ErrorKind::Unrecognized, + message: "M_UNRECOGNIZED: Method not allowed for endpoint".to_owned(), + }, + status_code: StatusCode::METHOD_NOT_ALLOWED, + })) + .into_response()); + } + + Ok(inner) +} + +fn cors_layer(_server: &Server) -> CorsLayer { + let methods = [ + Method::GET, + Method::HEAD, + Method::POST, + Method::PUT, + Method::DELETE, + Method::OPTIONS, + ]; + + let headers = [ + header::ORIGIN, + HeaderName::from_static("x-requested-with"), + header::CONTENT_TYPE, + header::ACCEPT, + header::AUTHORIZATION, + ]; + + CorsLayer::new() + .allow_origin(cors::Any) + .allow_methods(methods) + .allow_headers(headers) + .max_age(Duration::from_secs(86400)) +} + +#[cfg(any(feature = "zstd_compression", feature = "gzip_compression", feature = "brotli_compression"))] +fn compression_layer(server: &Server) -> tower_http::compression::CompressionLayer { + let mut compression_layer = tower_http::compression::CompressionLayer::new(); + + #[cfg(feature = "zstd_compression")] + { + if server.config.zstd_compression { + compression_layer = compression_layer.zstd(true); + } else { + compression_layer = compression_layer.no_zstd(); + }; + }; + + #[cfg(feature = "gzip_compression")] + { + if server.config.gzip_compression { + compression_layer = compression_layer.gzip(true); + } else { + compression_layer = compression_layer.no_gzip(); + }; + }; + + #[cfg(feature = "brotli_compression")] + { + if server.config.brotli_compression { + compression_layer = compression_layer.br(true); + } else { + compression_layer = compression_layer.no_br(); + }; + }; + + compression_layer +} + +fn tracing_span(request: &http::Request) -> tracing::Span { + let path = if let Some(path) = request.extensions().get::() { + path.as_str() + } else { + request.uri().path() + }; + + tracing::info_span!("handle", %path) +} + +/// Non-async initializations +fn init(args: clap::Args) -> Result { + let config = Config::new(args.config)?; + + #[cfg(feature = "sentry_telemetry")] + let sentry_guard = if config.sentry { + Some(init_sentry(&config)) + } else { + None + }; + + let tracing_reload_handle; + + #[cfg(feature = "perf_measurements")] + { + tracing_reload_handle = if config.allow_jaeger { + init_tracing_jaeger(&config) + } else if config.tracing_flame { + #[cfg(feature = "perf_measurements")] + init_tracing_flame(&config) + } else { + init_tracing_sub(&config) + }; + }; + + #[cfg(not(feature = "perf_measurements"))] + { + tracing_reload_handle = init_tracing_sub(&config); + }; + + info!( + server_name = ?config.server_name, + database_path = ?config.database_path, + log_levels = ?config.log, + "{}", + env!("CARGO_PKG_VERSION"), + ); + + #[cfg(unix)] + maximize_fd_limit().expect("Unable to increase maximum soft and hard file descriptor limit"); + + Ok(Server { + config, + + runtime: tokio::runtime::Builder::new_multi_thread() + .enable_io() + .enable_time() + .thread_name("conduwuit:worker") + .worker_threads(num_cpus::get_physical()) + .build() + .unwrap(), + + tracing_reload_handle, + + #[cfg(feature = "sentry_telemetry")] + _sentry_guard: sentry_guard, + }) +} + +#[cfg(feature = "sentry_telemetry")] +fn init_sentry(config: &Config) -> sentry::ClientInitGuard { + sentry::init(( + "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536", + sentry::ClientOptions { + release: sentry::release_name!(), + traces_sample_rate: config.sentry_traces_sample_rate, + server_name: if config.sentry_send_server_name { + Some(config.server_name.to_string().into()) + } else { + None + }, + ..Default::default() + }, + )) +} + +fn init_tracing_sub(config: &Config) -> reload::Handle { + let registry = Registry::default(); + let fmt_layer = tracing_subscriber::fmt::Layer::new(); + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}"); + EnvFilter::try_new("warn").unwrap() + }, + }; + + let (reload_filter, reload_handle) = reload::Layer::new(filter_layer); + + #[cfg(feature = "sentry_telemetry")] + let sentry_layer = sentry_tracing::layer(); + + let subscriber; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sentry_telemetry")] + { + subscriber = registry + .with(reload_filter) + .with(fmt_layer) + .with(sentry_layer); + }; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sentry_telemetry"))] + { + subscriber = registry.with(reload_filter).with(fmt_layer); + }; + + tracing::subscriber::set_global_default(subscriber).unwrap(); + + reload_handle +} + +#[cfg(feature = "perf_measurements")] +fn init_tracing_jaeger(config: &Config) -> reload::Handle { + opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); + let tracer = opentelemetry_jaeger::new_agent_pipeline() + .with_auto_split_batch(true) + .with_service_name("conduwuit") + .install_batch(opentelemetry_sdk::runtime::Tokio) + .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your log config is invalid. The following error occurred: {e}"); + EnvFilter::try_new("warn").unwrap() + }, + }; + + let (reload_filter, reload_handle) = reload::Layer::new(filter_layer); + + let subscriber = Registry::default().with(reload_filter).with(telemetry); + + tracing::subscriber::set_global_default(subscriber).unwrap(); + + reload_handle +} + +#[cfg(feature = "perf_measurements")] +fn init_tracing_flame(_config: &Config) -> reload::Handle { + let registry = Registry::default(); + let (flame_layer, _guard) = tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap(); + let flame_layer = flame_layer.with_empty_samples(false); + + let filter_layer = EnvFilter::new("trace,h2=off"); + + let (reload_filter, reload_handle) = reload::Layer::new(filter_layer); + + let subscriber = registry.with(reload_filter).with(flame_layer); + + tracing::subscriber::set_global_default(subscriber).unwrap(); + + reload_handle +} + +// This is needed for opening lots of file descriptors, which tends to +// happen more often when using RocksDB and making lots of federation +// connections at startup. The soft limit is usually 1024, and the hard +// limit is usually 512000; I've personally seen it hit >2000. +// +// * https://www.freedesktop.org/software/systemd/man/systemd.exec.html#id-1.12.2.1.17.6 +// * https://github.com/systemd/systemd/commit/0abf94923b4a95a7d89bc526efc84e7ca2b71741 +#[cfg(unix)] +fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { + use nix::sys::resource::{getrlimit, setrlimit, Resource::RLIMIT_NOFILE as NOFILE}; + + let (soft_limit, hard_limit) = getrlimit(NOFILE)?; + if soft_limit < hard_limit { + setrlimit(NOFILE, hard_limit, hard_limit)?; + assert_eq!((hard_limit, hard_limit), getrlimit(NOFILE)?, "getrlimit != setrlimit"); + debug!(to = hard_limit, from = soft_limit, "Raised RLIMIT_NOFILE",); + } + + Ok(()) +} diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml deleted file mode 100644 index 0c5e2b6f..00000000 --- a/src/main/Cargo.toml +++ /dev/null @@ -1,196 +0,0 @@ -[package] -name = "conduwuit" -default-run = "conduwuit" -authors.workspace = true -categories.workspace = true -description.workspace = true -edition.workspace = true -homepage.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true -metadata.crane.workspace = true - -[package.metadata.deb] -name = "conduwuit" -maintainer = "strawberry " -copyright = "2024, strawberry " -license-file = ["../../LICENSE", "3"] -depends = "$auto, ca-certificates" -extended-description = """\ -a cool hard fork of Conduit, a Matrix homeserver written in Rust""" -section = "net" -priority = "optional" -conf-files = ["/etc/conduwuit/conduwuit.toml"] -maintainer-scripts = "../../debian/" -systemd-units = { unit-name = "conduwuit", start = false } -assets = [ - ["../../debian/README.md", "usr/share/doc/conduwuit/README.Debian", "644"], - ["../../README.md", "usr/share/doc/conduwuit/", "644"], - ["../../target/release/conduwuit", "usr/sbin/conduwuit", "755"], - ["../../conduwuit-example.toml", "etc/conduwuit/conduwuit.toml", "640"], -] - -[features] -default = [ - "blurhashing", - "brotli_compression", - "element_hacks", - "gzip_compression", - "io_uring", - "jemalloc", - "jemalloc_conf", - "media_thumbnail", - "release_max_log_level", - "systemd", - "url_preview", - "zstd_compression", -] - -blurhashing = [ - "conduwuit-service/blurhashing", -] -brotli_compression = [ - "conduwuit-api/brotli_compression", - "conduwuit-core/brotli_compression", - "conduwuit-router/brotli_compression", - "conduwuit-service/brotli_compression", -] -console = [ - "conduwuit-service/console", -] -direct_tls = [ - "conduwuit-router/direct_tls" -] -element_hacks = [ - "conduwuit-api/element_hacks", - "conduwuit-service/element_hacks", -] -gzip_compression = [ - "conduwuit-api/gzip_compression", - "conduwuit-core/gzip_compression", - "conduwuit-router/gzip_compression", - "conduwuit-service/gzip_compression", -] -hardened_malloc = [ - "conduwuit-core/hardened_malloc", -] -io_uring = [ - "conduwuit-database/io_uring", -] -jemalloc = [ - "conduwuit-core/jemalloc", - "conduwuit-database/jemalloc", -] -jemalloc_prof = [ - "conduwuit-core/jemalloc_prof", -] -jemalloc_stats = [ - "conduwuit-core/jemalloc_stats", -] -jemalloc_conf = [ - "conduwuit-core/jemalloc_conf", -] -media_thumbnail = [ - "conduwuit-service/media_thumbnail", -] -perf_measurements = [ - "dep:opentelemetry", - "dep:tracing-flame", - "dep:tracing-opentelemetry", - "dep:opentelemetry_sdk", - "dep:opentelemetry-jaeger", - "conduwuit-core/perf_measurements", - "conduwuit-core/sentry_telemetry", -] -# increases performance, reduces build times, and reduces binary size by not compiling or -# genreating code for log level filters that users will generally not use (debug and trace) -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", - "conduwuit-admin/release_max_log_level", - "conduwuit-api/release_max_log_level", - "conduwuit-core/release_max_log_level", - "conduwuit-database/release_max_log_level", - "conduwuit-router/release_max_log_level", - "conduwuit-service/release_max_log_level", -] -sentry_telemetry = [ - "dep:sentry", - "dep:sentry-tracing", - "dep:sentry-tower", - "conduwuit-core/sentry_telemetry", - "conduwuit-router/sentry_telemetry", -] -systemd = [ - "conduwuit-router/systemd", -] -# enable the tokio_console server ncompatible with release_max_log_level -tokio_console = [ - "dep:console-subscriber", - "tokio/tracing", -] -url_preview = [ - "conduwuit-service/url_preview", -] -zstd_compression = [ - "conduwuit-api/zstd_compression", - "conduwuit-core/zstd_compression", - "conduwuit-database/zstd_compression", - "conduwuit-router/zstd_compression", - "conduwuit-service/zstd_compression", -] -conduwuit_mods = [ - "conduwuit-core/conduwuit_mods", -] - -[dependencies] -conduwuit-admin.workspace = true -conduwuit-api.workspace = true -conduwuit-core.workspace = true -conduwuit-database.workspace = true -conduwuit-router.workspace = true -conduwuit-service.workspace = true - -clap.workspace = true -console-subscriber.optional = true -console-subscriber.workspace = true -const-str.workspace = true -log.workspace = true -opentelemetry-jaeger.optional = true -opentelemetry-jaeger.workspace = true -opentelemetry.optional = true -opentelemetry.workspace = true -opentelemetry_sdk.optional = true -opentelemetry_sdk.workspace = true -sentry-tower.optional = true -sentry-tower.workspace = true -sentry-tracing.optional = true -sentry-tracing.workspace = true -sentry.optional = true -sentry.workspace = true -tokio-metrics.optional = true -tokio-metrics.workspace = true -tokio.workspace = true -tracing-flame.optional = true -tracing-flame.workspace = true -tracing-opentelemetry.optional = true -tracing-opentelemetry.workspace = true -tracing-subscriber.workspace = true -tracing.workspace = true - -[target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies] -hardened_malloc-rs.workspace = true -hardened_malloc-rs.optional = true - -[lints] -workspace = true - -[[bin]] -name = "conduwuit" -path = "main.rs" diff --git a/src/main/clap.rs b/src/main/clap.rs deleted file mode 100644 index 707a1c76..00000000 --- a/src/main/clap.rs +++ /dev/null @@ -1,180 +0,0 @@ -//! Integration with `clap` - -use std::path::PathBuf; - -use clap::{ArgAction, Parser}; -use conduwuit_core::{ - Err, Result, - config::{Figment, FigmentValue}, - err, toml, - utils::available_parallelism, -}; - -/// Commandline arguments -#[derive(Parser, Debug)] -#[clap( - about, - long_about = None, - name = "conduwuit", - version = conduwuit_core::version(), -)] -pub(crate) struct Args { - #[arg(short, long)] - /// Path to the config TOML file (optional) - pub(crate) config: Option>, - - /// Override a configuration variable using TOML 'key=value' syntax - #[arg(long, short('O'))] - pub(crate) option: Vec, - - /// Run in a stricter read-only --maintenance mode. - #[arg(long)] - pub(crate) read_only: bool, - - /// Run in maintenance mode while refusing connections. - #[arg(long)] - pub(crate) maintenance: bool, - - #[cfg(feature = "console")] - /// Activate admin command console automatically after startup. - #[arg(long, num_args(0))] - pub(crate) console: bool, - - /// Execute console command automatically after startup. - #[arg(long)] - pub(crate) execute: Vec, - - /// Set functional testing modes if available. Ex '--test=smoke' - #[arg(long, hide(true))] - pub(crate) test: Vec, - - /// Override the tokio worker_thread count. - #[arg( - long, - hide(true), - env = "TOKIO_WORKER_THREADS", - default_value = available_parallelism().to_string(), - )] - pub(crate) worker_threads: usize, - - /// Override the tokio global_queue_interval. - #[arg(long, hide(true), env = "TOKIO_GLOBAL_QUEUE_INTERVAL", default_value = "192")] - pub(crate) global_event_interval: u32, - - /// Override the tokio event_interval. - #[arg(long, hide(true), env = "TOKIO_EVENT_INTERVAL", default_value = "512")] - pub(crate) kernel_event_interval: u32, - - /// Override the tokio max_io_events_per_tick. - #[arg(long, hide(true), env = "TOKIO_MAX_IO_EVENTS_PER_TICK", default_value = "512")] - pub(crate) kernel_events_per_tick: usize, - - /// Set the histogram bucket size, in microseconds (tokio_unstable). Default - /// is 25 microseconds. If the values of the histogram don't approach zero - /// with the exception of the last bucket, try increasing this value to e.g. - /// 50 or 100. Inversely, decrease to 10 etc if the histogram lacks - /// resolution. - #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL", default_value = "25")] - pub(crate) worker_histogram_interval: u64, - - /// Set the histogram bucket count (tokio_unstable). Default is 20. - #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", default_value = "20")] - pub(crate) worker_histogram_buckets: usize, - - /// Toggles worker affinity feature. - #[arg( - long, - hide(true), - env = "CONDUWUIT_RUNTIME_WORKER_AFFINITY", - action = ArgAction::Set, - num_args = 0..=1, - require_equals(false), - default_value = "true", - default_missing_value = "true", - )] - pub(crate) worker_affinity: bool, - - /// Toggles feature to promote memory reclamation by the operating system - /// when tokio worker runs out of work. - #[arg( - long, - hide(true), - env = "CONDUWUIT_RUNTIME_GC_ON_PARK", - action = ArgAction::Set, - num_args = 0..=1, - require_equals(false), - )] - pub(crate) gc_on_park: Option, - - /// Toggles muzzy decay for jemalloc arenas associated with a tokio - /// worker (when worker-affinity is enabled). Setting to false releases - /// memory to the operating system using MADV_FREE without MADV_DONTNEED. - /// Setting to false increases performance by reducing pagefaults, but - /// resident memory usage appears high until there is memory pressure. The - /// default is true unless the system has four or more cores. - #[arg( - long, - hide(true), - env = "CONDUWUIT_RUNTIME_GC_MUZZY", - action = ArgAction::Set, - num_args = 0..=1, - require_equals(false), - )] - pub(crate) gc_muzzy: Option, -} - -/// Parse commandline arguments into structured data -#[must_use] -pub(super) fn parse() -> Args { Args::parse() } - -/// Synthesize any command line options with configuration file options. -pub(crate) fn update(mut config: Figment, args: &Args) -> Result { - if args.read_only { - config = config.join(("rocksdb_read_only", true)); - } - - if args.maintenance || args.read_only { - config = config.join(("startup_netburst", false)); - config = config.join(("listening", false)); - } - - #[cfg(feature = "console")] - // Indicate the admin console should be spawned automatically if the - // configuration file hasn't already. - if args.console { - config = config.join(("admin_console_automatic", true)); - } - - // Execute commands after any commands listed in configuration file - config = config.adjoin(("admin_execute", &args.execute)); - - // Update config with names of any functional-tests - config = config.adjoin(("test", &args.test)); - - // All other individual overrides can go last in case we have options which - // set multiple conf items at once and the user still needs granular overrides. - for option in &args.option { - let (key, val) = option - .split_once('=') - .ok_or_else(|| err!("Missing '=' in -O/--option: {option:?}"))?; - - if key.is_empty() { - return Err!("Missing key= in -O/--option: {option:?}"); - } - - if val.is_empty() { - return Err!("Missing =val in -O/--option: {option:?}"); - } - - // The value has to pass for what would appear as a line in the TOML file. - let val = toml::from_str::(option)?; - let FigmentValue::Dict(_, val) = val else { - panic!("Unexpected Figment Value: {val:#?}"); - }; - - // Figment::merge() overrides existing - config = config.merge((key, val[key].clone())); - } - - Ok(config) -} diff --git a/src/main/logging.rs b/src/main/logging.rs deleted file mode 100644 index eeeda127..00000000 --- a/src/main/logging.rs +++ /dev/null @@ -1,147 +0,0 @@ -use std::sync::Arc; - -use conduwuit_core::{ - Result, - config::Config, - debug_warn, err, - log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span}, - result::UnwrapOrErr, -}; -use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload}; - -#[cfg(feature = "perf_measurements")] -pub(crate) type TracingFlameGuard = - Option>>; -#[cfg(not(feature = "perf_measurements"))] -pub(crate) type TracingFlameGuard = (); - -#[allow(clippy::redundant_clone)] -pub(crate) fn init( - config: &Config, -) -> Result<(LogLevelReloadHandles, TracingFlameGuard, Arc)> { - let reload_handles = LogLevelReloadHandles::default(); - - let console_span_events = fmt_span::from_str(&config.log_span_events).unwrap_or_err(); - let console_filter = EnvFilter::builder() - .with_regex(config.log_filter_regex) - .parse(&config.log) - .map_err(|e| err!(Config("log", "{e}.")))?; - let console_layer = fmt::Layer::new() - .with_span_events(console_span_events) - .event_format(ConsoleFormat::new(config)) - .fmt_fields(ConsoleFormat::new(config)) - .with_writer(ConsoleWriter::new(config)); - - let (console_reload_filter, console_reload_handle) = - reload::Layer::new(console_filter.clone()); - reload_handles.add("console", Box::new(console_reload_handle)); - - let cap_state = Arc::new(capture::State::new()); - let cap_layer = capture::Layer::new(&cap_state); - - let subscriber = Registry::default() - .with(console_layer.with_filter(console_reload_filter)) - .with(cap_layer); - - #[cfg(feature = "sentry_telemetry")] - let subscriber = { - let sentry_filter = EnvFilter::try_new(&config.sentry_filter) - .map_err(|e| err!(Config("sentry_filter", "{e}.")))?; - let sentry_layer = sentry_tracing::layer(); - let (sentry_reload_filter, sentry_reload_handle) = reload::Layer::new(sentry_filter); - reload_handles.add("sentry", Box::new(sentry_reload_handle)); - subscriber.with(sentry_layer.with_filter(sentry_reload_filter)) - }; - - #[cfg(feature = "perf_measurements")] - let (subscriber, flame_guard) = { - let (flame_layer, flame_guard) = if config.tracing_flame { - let flame_filter = EnvFilter::try_new(&config.tracing_flame_filter) - .map_err(|e| err!(Config("tracing_flame_filter", "{e}.")))?; - let (flame_layer, flame_guard) = - tracing_flame::FlameLayer::with_file(&config.tracing_flame_output_path) - .map_err(|e| err!(Config("tracing_flame_output_path", "{e}.")))?; - let flame_layer = flame_layer - .with_empty_samples(false) - .with_filter(flame_filter); - (Some(flame_layer), Some(flame_guard)) - } else { - (None, None) - }; - - let jaeger_filter = EnvFilter::try_new(&config.jaeger_filter) - .map_err(|e| err!(Config("jaeger_filter", "{e}.")))?; - let jaeger_layer = config.allow_jaeger.then(|| { - opentelemetry::global::set_text_map_propagator( - opentelemetry_jaeger::Propagator::new(), - ); - let tracer = opentelemetry_jaeger::new_agent_pipeline() - .with_auto_split_batch(true) - .with_service_name("conduwuit") - .install_batch(opentelemetry_sdk::runtime::Tokio) - .expect("jaeger agent pipeline"); - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - let (jaeger_reload_filter, jaeger_reload_handle) = - reload::Layer::new(jaeger_filter.clone()); - reload_handles.add("jaeger", Box::new(jaeger_reload_handle)); - Some(telemetry.with_filter(jaeger_reload_filter)) - }); - - let subscriber = subscriber.with(flame_layer).with(jaeger_layer); - (subscriber, flame_guard) - }; - - #[cfg(not(feature = "perf_measurements"))] - #[cfg_attr(not(feature = "perf_measurements"), allow(clippy::let_unit_value))] - let flame_guard = (); - - let ret = (reload_handles, flame_guard, cap_state); - - // Enable the tokio console. This is slightly kludgy because we're judggling - // compile-time and runtime conditions to elide it, each of those changing the - // subscriber's type. - let (console_enabled, console_disabled_reason) = tokio_console_enabled(config); - #[cfg(all(feature = "tokio_console", tokio_unstable))] - if console_enabled { - let console_layer = console_subscriber::ConsoleLayer::builder() - .with_default_env() - .spawn(); - - set_global_default(subscriber.with(console_layer)); - return Ok(ret); - } - - set_global_default(subscriber); - - // If there's a reason the tokio console was disabled when it might be desired - // we output that here after initializing logging - if !console_enabled && !console_disabled_reason.is_empty() { - debug_warn!("{console_disabled_reason}"); - } - - Ok(ret) -} - -fn tokio_console_enabled(config: &Config) -> (bool, &'static str) { - if !cfg!(all(feature = "tokio_console", tokio_unstable)) { - return (false, ""); - } - - if cfg!(feature = "release_max_log_level") && !cfg!(debug_assertions) { - return ( - false, - "'tokio_console' feature and 'release_max_log_level' feature are incompatible.", - ); - } - - if !config.tokio_console { - return (false, "tokio console is available but disabled by the configuration."); - } - - (true, "") -} - -fn set_global_default(subscriber: S) { - tracing::subscriber::set_global_default(subscriber) - .expect("the global default tracing subscriber failed to be initialized"); -} diff --git a/src/main/main.rs b/src/main/main.rs deleted file mode 100644 index 1a9d3fe4..00000000 --- a/src/main/main.rs +++ /dev/null @@ -1,120 +0,0 @@ -#![type_length_limit = "49152"] //TODO: reduce me - -pub(crate) mod clap; -mod logging; -mod mods; -mod restart; -mod runtime; -mod sentry; -mod server; -mod signal; - -use std::sync::{Arc, atomic::Ordering}; - -use conduwuit_core::{Error, Result, debug_info, error, rustc_flags_capture}; -use server::Server; - -rustc_flags_capture! {} - -fn main() -> Result { - let args = clap::parse(); - let runtime = runtime::new(&args)?; - let server = Server::new(&args, Some(runtime.handle()))?; - - runtime.spawn(signal::signal(server.clone())); - runtime.block_on(async_main(&server))?; - runtime::shutdown(&server, runtime); - - #[cfg(unix)] - if server.server.restarting.load(Ordering::Acquire) { - restart::restart(); - } - - debug_info!("Exit"); - Ok(()) -} - -/// Operate the server normally in release-mode static builds. This will start, -/// run and stop the server within the asynchronous runtime. -#[cfg(any(not(conduwuit_mods), not(feature = "conduwuit_mods")))] -#[tracing::instrument( - name = "main", - parent = None, - skip_all -)] -async fn async_main(server: &Arc) -> Result<(), Error> { - extern crate conduwuit_router as router; - - match router::start(&server.server).await { - | Ok(services) => server.services.lock().await.insert(services), - | Err(error) => { - error!("Critical error starting server: {error}"); - return Err(error); - }, - }; - - if let Err(error) = router::run( - server - .services - .lock() - .await - .as_ref() - .expect("services initialized"), - ) - .await - { - error!("Critical error running server: {error}"); - return Err(error); - } - - if let Err(error) = router::stop( - server - .services - .lock() - .await - .take() - .expect("services initialied"), - ) - .await - { - error!("Critical error stopping server: {error}"); - return Err(error); - } - - debug_info!("Exit runtime"); - Ok(()) -} - -/// Operate the server in developer-mode dynamic builds. This will start, run, -/// and hot-reload portions of the server as-needed before returning for an -/// actual shutdown. This is not available in release-mode or static builds. -#[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] -async fn async_main(server: &Arc) -> Result<(), Error> { - let mut starts = true; - let mut reloads = true; - while reloads { - if let Err(error) = mods::open(server).await { - error!("Loading router: {error}"); - return Err(error); - } - - let result = mods::run(server, starts).await; - if let Ok(result) = result { - (starts, reloads) = result; - } - - let force = !reloads || result.is_err(); - if let Err(error) = mods::close(server, force).await { - error!("Unloading router: {error}"); - return Err(error); - } - - if let Err(error) = result { - error!("{error}"); - return Err(error); - } - } - - debug_info!("Exit runtime"); - Ok(()) -} diff --git a/src/main/mods.rs b/src/main/mods.rs deleted file mode 100644 index d585a381..00000000 --- a/src/main/mods.rs +++ /dev/null @@ -1,156 +0,0 @@ -#![cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] - -#[unsafe(no_link)] -extern crate conduwuit_service; - -use std::{ - future::Future, - pin::Pin, - sync::{Arc, atomic::Ordering}, -}; - -use conduwuit_core::{Error, Result, debug, error, mods}; -use conduwuit_service::Services; - -use crate::Server; - -type StartFuncResult = Pin>> + Send>>; -type StartFuncProto = fn(&Arc) -> StartFuncResult; - -type RunFuncResult = Pin> + Send>>; -type RunFuncProto = fn(&Arc) -> RunFuncResult; - -type StopFuncResult = Pin> + Send>>; -type StopFuncProto = fn(Arc) -> StopFuncResult; - -const RESTART_THRESH: &str = "conduwuit_service"; -const MODULE_NAMES: &[&str] = &[ - //"conduwuit_core", - "conduwuit_database", - "conduwuit_service", - "conduwuit_api", - "conduwuit_admin", - "conduwuit_router", -]; - -#[cfg(panic_trap)] -conduwuit_core::mod_init! {{ - conduwuit_core::debug::set_panic_trap(); -}} - -pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, bool), Error> { - let main_lock = server.mods.read().await; - let main_mod = (*main_lock).last().expect("main module loaded"); - if starts { - let start = main_mod.get::("start")?; - match start(&server.server).await { - | Ok(services) => server.services.lock().await.insert(services), - | Err(error) => { - error!("Starting server: {error}"); - return Err(error); - }, - }; - } - server.server.stopping.store(false, Ordering::Release); - let run = main_mod.get::("run")?; - if let Err(error) = run(server - .services - .lock() - .await - .as_ref() - .expect("services initialized")) - .await - { - error!("Running server: {error}"); - return Err(error); - } - let reloads = server.server.reloading.swap(false, Ordering::AcqRel); - let stops = !reloads || stale(server).await? <= restart_thresh(); - let starts = reloads && stops; - if stops { - let stop = main_mod.get::("stop")?; - if let Err(error) = stop( - server - .services - .lock() - .await - .take() - .expect("services initialized"), - ) - .await - { - error!("Stopping server: {error}"); - return Err(error); - } - } - - Ok((starts, reloads)) -} - -pub(crate) async fn open(server: &Arc) -> Result { - let mut mods_lock = server.mods.write().await; - let mods: &mut Vec = &mut mods_lock; - debug!( - available = %available(), - loaded = %mods.len(), - "Loading modules", - ); - - for (i, name) in MODULE_NAMES.iter().enumerate() { - if mods.get(i).is_none() { - mods.push(mods::Module::from_name(name)?); - } - } - - Ok(mods.len()) -} - -pub(crate) async fn close(server: &Arc, force: bool) -> Result { - let stale = stale_count(server).await; - let mut mods_lock = server.mods.write().await; - let mods: &mut Vec = &mut mods_lock; - debug!( - available = %available(), - loaded = %mods.len(), - stale = %stale, - force, - "Unloading modules", - ); - - while mods.last().is_some() { - let module = &mods.last().expect("module"); - if force || module.deleted()? { - mods.pop(); - } else { - break; - } - } - - Ok(mods.len()) -} - -async fn stale_count(server: &Arc) -> usize { - let watermark = stale(server).await.unwrap_or(available()); - available().saturating_sub(watermark) -} - -async fn stale(server: &Arc) -> Result { - let mods_lock = server.mods.read().await; - let mods: &Vec = &mods_lock; - for (i, module) in mods.iter().enumerate() { - if module.deleted()? { - return Ok(i); - } - } - - Ok(mods.len()) -} - -fn restart_thresh() -> usize { - MODULE_NAMES - .iter() - .position(|&name| name.ends_with(RESTART_THRESH)) - .unwrap_or(MODULE_NAMES.len()) -} - -const fn available() -> usize { MODULE_NAMES.len() } diff --git a/src/main/restart.rs b/src/main/restart.rs deleted file mode 100644 index b9d1dc94..00000000 --- a/src/main/restart.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![cfg(unix)] - -use std::{env, os::unix::process::CommandExt, process::Command}; - -use conduwuit_core::{debug, info, utils}; - -#[cold] -pub(super) fn restart() -> ! { - // SAFETY: We have allowed an override for the case where the current_exe() has - // been replaced or removed. By default the server will fail to restart if the - // binary has been replaced (i.e. by cargo); this is for security purposes. - // Command::exec() used to panic in that case. - // - // We can (and do) prevent that panic by checking the result of current_exe() - // prior to committing to restart, returning an error to the user without any - // unexpected shutdown. In a nutshell that is the execuse for this unsafety. - // Nevertheless, we still want a way to override the restart preventation (i.e. - // admin server restart --force). - let exe = unsafe { utils::sys::current_exe().expect("program path must be available") }; - let envs = env::vars(); - let args = env::args().skip(1); - debug!(?exe, ?args, ?envs, "Restart"); - - info!("Restart"); - - let error = Command::new(exe).args(args).envs(envs).exec(); - panic!("{error:?}"); -} diff --git a/src/main/runtime.rs b/src/main/runtime.rs deleted file mode 100644 index 1c58ea81..00000000 --- a/src/main/runtime.rs +++ /dev/null @@ -1,278 +0,0 @@ -use std::{ - iter::once, - sync::{ - Arc, OnceLock, - atomic::{AtomicUsize, Ordering}, - }, - thread, - time::Duration, -}; - -#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -use conduwuit_core::result::LogDebugErr; -use conduwuit_core::{ - Result, debug, is_true, - utils::sys::compute::{nth_core_available, set_affinity}, -}; -use tokio::runtime::Builder; - -use crate::{clap::Args, server::Server}; - -const WORKER_NAME: &str = "conduwuit:worker"; -const WORKER_MIN: usize = 2; -const WORKER_KEEPALIVE: u64 = 36; -const MAX_BLOCKING_THREADS: usize = 1024; -const SHUTDOWN_TIMEOUT: Duration = Duration::from_millis(10000); -#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -const DISABLE_MUZZY_THRESHOLD: usize = 4; - -static WORKER_AFFINITY: OnceLock = OnceLock::new(); -static GC_ON_PARK: OnceLock> = OnceLock::new(); -static GC_MUZZY: OnceLock> = OnceLock::new(); - -pub(super) fn new(args: &Args) -> Result { - WORKER_AFFINITY - .set(args.worker_affinity) - .expect("set WORKER_AFFINITY from program argument"); - - GC_ON_PARK - .set(args.gc_on_park) - .expect("set GC_ON_PARK from program argument"); - - GC_MUZZY - .set(args.gc_muzzy) - .expect("set GC_MUZZY from program argument"); - - let mut builder = Builder::new_multi_thread(); - builder - .enable_io() - .enable_time() - .thread_name(WORKER_NAME) - .worker_threads(args.worker_threads.max(WORKER_MIN)) - .max_blocking_threads(MAX_BLOCKING_THREADS) - .thread_keep_alive(Duration::from_secs(WORKER_KEEPALIVE)) - .global_queue_interval(args.global_event_interval) - .event_interval(args.kernel_event_interval) - .max_io_events_per_tick(args.kernel_events_per_tick) - .on_thread_start(thread_start) - .on_thread_stop(thread_stop) - .on_thread_unpark(thread_unpark) - .on_thread_park(thread_park); - - #[cfg(tokio_unstable)] - builder - .on_task_spawn(task_spawn) - .on_before_task_poll(task_enter) - .on_after_task_poll(task_leave) - .on_task_terminate(task_terminate); - - #[cfg(tokio_unstable)] - enable_histogram(&mut builder, args); - - builder.build().map_err(Into::into) -} - -#[cfg(tokio_unstable)] -fn enable_histogram(builder: &mut Builder, args: &Args) { - use tokio::runtime::HistogramConfiguration; - - let buckets = args.worker_histogram_buckets; - let interval = Duration::from_micros(args.worker_histogram_interval); - let linear = HistogramConfiguration::linear(interval, buckets); - builder - .enable_metrics_poll_time_histogram() - .metrics_poll_time_histogram_configuration(linear); -} - -#[cfg(tokio_unstable)] -#[tracing::instrument(name = "stop", level = "info", skip_all)] -pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { - use conduwuit_core::event; - use tracing::Level; - - // The final metrics output is promoted to INFO when tokio_unstable is active in - // a release/bench mode and DEBUG is likely optimized out - const LEVEL: Level = if cfg!(debug_assertions) { - Level::DEBUG - } else { - Level::INFO - }; - - debug!( - timeout = ?SHUTDOWN_TIMEOUT, - "Waiting for runtime..." - ); - - runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); - let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default(); - - event!(LEVEL, ?runtime_metrics, "Final runtime metrics"); -} - -#[cfg(not(tokio_unstable))] -#[tracing::instrument(name = "stop", level = "info", skip_all)] -pub(super) fn shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { - debug!( - timeout = ?SHUTDOWN_TIMEOUT, - "Waiting for runtime..." - ); - - runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); -} - -#[tracing::instrument( - name = "fork", - level = "debug", - skip_all, - fields( - id = ?thread::current().id(), - name = %thread::current().name().unwrap_or("None"), - ), -)] -fn thread_start() { - debug_assert_eq!( - Some(WORKER_NAME), - thread::current().name(), - "tokio worker name mismatch at thread start" - ); - - if WORKER_AFFINITY.get().is_some_and(is_true!()) { - set_worker_affinity(); - } -} - -fn set_worker_affinity() { - static CORES_OCCUPIED: AtomicUsize = AtomicUsize::new(0); - - let handle = tokio::runtime::Handle::current(); - let num_workers = handle.metrics().num_workers(); - let i = CORES_OCCUPIED.fetch_add(1, Ordering::Relaxed); - if i >= num_workers { - return; - } - - let Some(id) = nth_core_available(i) else { - return; - }; - - set_affinity(once(id)); - set_worker_mallctl(id); -} - -#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -fn set_worker_mallctl(id: usize) { - use conduwuit_core::alloc::je::{ - is_affine_arena, - this_thread::{set_arena, set_muzzy_decay}, - }; - - if is_affine_arena() { - set_arena(id).log_debug_err().ok(); - } - - let muzzy_option = GC_MUZZY - .get() - .expect("GC_MUZZY initialized by runtime::new()"); - - let muzzy_auto_disable = - conduwuit_core::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; - if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { - set_muzzy_decay(-1).log_debug_err().ok(); - } -} - -#[cfg(any(not(feature = "jemalloc"), target_env = "msvc"))] -fn set_worker_mallctl(_: usize) {} - -#[tracing::instrument( - name = "join", - level = "debug", - skip_all, - fields( - id = ?thread::current().id(), - name = %thread::current().name().unwrap_or("None"), - ), -)] -fn thread_stop() {} - -#[tracing::instrument( - name = "work", - level = "trace", - skip_all, - fields( - id = ?thread::current().id(), - name = %thread::current().name().unwrap_or("None"), - ), -)] -fn thread_unpark() {} - -#[tracing::instrument( - name = "park", - level = "trace", - skip_all, - fields( - id = ?thread::current().id(), - name = %thread::current().name().unwrap_or("None"), - ), -)] -fn thread_park() { - match GC_ON_PARK - .get() - .as_ref() - .expect("GC_ON_PARK initialized by runtime::new()") - { - | Some(true) | None if cfg!(feature = "jemalloc_conf") => gc_on_park(), - | _ => (), - } -} - -fn gc_on_park() { - #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] - conduwuit_core::alloc::je::this_thread::decay() - .log_debug_err() - .ok(); -} - -#[cfg(tokio_unstable)] -#[tracing::instrument( - name = "spawn", - level = "trace", - skip_all, - fields( - id = %meta.id(), - ), -)] -fn task_spawn(meta: &tokio::runtime::TaskMeta<'_>) {} - -#[cfg(tokio_unstable)] -#[tracing::instrument( - name = "finish", - level = "trace", - skip_all, - fields( - id = %meta.id() - ), -)] -fn task_terminate(meta: &tokio::runtime::TaskMeta<'_>) {} - -#[cfg(tokio_unstable)] -#[tracing::instrument( - name = "enter", - level = "trace", - skip_all, - fields( - id = %meta.id() - ), -)] -fn task_enter(meta: &tokio::runtime::TaskMeta<'_>) {} - -#[cfg(tokio_unstable)] -#[tracing::instrument( - name = "leave", - level = "trace", - skip_all, - fields( - id = %meta.id() - ), -)] -fn task_leave(meta: &tokio::runtime::TaskMeta<'_>) {} diff --git a/src/main/sentry.rs b/src/main/sentry.rs deleted file mode 100644 index 68f12eb7..00000000 --- a/src/main/sentry.rs +++ /dev/null @@ -1,91 +0,0 @@ -#![cfg(feature = "sentry_telemetry")] - -use std::{ - str::FromStr, - sync::{Arc, OnceLock}, -}; - -use conduwuit_core::{config::Config, debug, trace}; -use sentry::{ - Breadcrumb, ClientOptions, Level, - types::{ - Dsn, - protocol::v7::{Context, Event}, - }, -}; - -static SEND_PANIC: OnceLock = OnceLock::new(); -static SEND_ERROR: OnceLock = OnceLock::new(); - -pub(crate) fn init(config: &Config) -> Option { - config.sentry.then(|| sentry::init(options(config))) -} - -fn options(config: &Config) -> ClientOptions { - SEND_PANIC - .set(config.sentry_send_panic) - .expect("SEND_PANIC was not previously set"); - SEND_ERROR - .set(config.sentry_send_error) - .expect("SEND_ERROR was not previously set"); - - let dsn = config - .sentry_endpoint - .as_ref() - .expect("init_sentry should only be called if sentry is enabled and this is not None") - .as_str(); - - ClientOptions { - dsn: Some(Dsn::from_str(dsn).expect("sentry_endpoint must be a valid URL")), - server_name: config - .sentry_send_server_name - .then(|| config.server_name.to_string().into()), - traces_sample_rate: config.sentry_traces_sample_rate, - debug: cfg!(debug_assertions), - release: sentry::release_name!(), - user_agent: conduwuit_core::version::user_agent().into(), - attach_stacktrace: config.sentry_attach_stacktrace, - before_send: Some(Arc::new(before_send)), - before_breadcrumb: Some(Arc::new(before_breadcrumb)), - ..Default::default() - } -} - -fn before_send(event: Event<'static>) -> Option> { - if event.exception.iter().any(|e| e.ty == "panic") && !SEND_PANIC.get().unwrap_or(&true) { - return None; - } - - if event.level == Level::Error { - if !SEND_ERROR.get().unwrap_or(&true) { - return None; - } - - if cfg!(debug_assertions) { - return None; - } - - //NOTE: we can enable this to specify error!(sentry = true, ...) - if let Some(Context::Other(context)) = event.contexts.get("Rust Tracing Fields") { - if !context.contains_key("sentry") { - //return None; - } - } - } - - if event.level == Level::Fatal { - trace!("{event:#?}"); - } - - debug!("Sending sentry event: {event:?}"); - Some(event) -} - -fn before_breadcrumb(crumb: Breadcrumb) -> Option { - if crumb.ty == "log" && crumb.level == Level::Debug { - return None; - } - - trace!("Sentry breadcrumb: {crumb:?}"); - Some(crumb) -} diff --git a/src/main/server.rs b/src/main/server.rs deleted file mode 100644 index 8f697ca4..00000000 --- a/src/main/server.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::{path::PathBuf, sync::Arc}; - -use conduwuit_core::{ - Error, Result, - config::Config, - info, - log::Log, - utils::{stream, sys}, -}; -use tokio::{runtime, sync::Mutex}; - -use crate::{clap::Args, logging::TracingFlameGuard}; - -/// Server runtime state; complete -pub(crate) struct Server { - /// Server runtime state; public portion - pub(crate) server: Arc, - - pub(crate) services: Mutex>>, - - _tracing_flame_guard: TracingFlameGuard, - - #[cfg(feature = "sentry_telemetry")] - _sentry_guard: Option<::sentry::ClientInitGuard>, - - #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] - // Module instances; TODO: move to mods::loaded mgmt vector - pub(crate) mods: tokio::sync::RwLock>, -} - -impl Server { - pub(crate) fn new( - args: &Args, - runtime: Option<&runtime::Handle>, - ) -> Result, Error> { - let _runtime_guard = runtime.map(runtime::Handle::enter); - - let config_paths = args - .config - .as_deref() - .into_iter() - .flat_map(<[_]>::iter) - .map(PathBuf::as_path); - - let config = Config::load(config_paths) - .and_then(|raw| crate::clap::update(raw, args)) - .and_then(|raw| Config::new(&raw))?; - - let (tracing_reload_handle, tracing_flame_guard, capture) = - crate::logging::init(&config)?; - - config.check()?; - - #[cfg(feature = "sentry_telemetry")] - let sentry_guard = crate::sentry::init(&config); - - #[cfg(unix)] - sys::maximize_fd_limit() - .expect("Unable to increase maximum soft and hard file descriptor limit"); - - let (_old_width, _new_width) = stream::set_width(config.stream_width_default); - let (_old_amp, _new_amp) = stream::set_amplification(config.stream_amplification); - - info!( - server_name = %config.server_name, - database_path = ?config.database_path, - log_levels = %config.log, - "{}", - conduwuit_core::version(), - ); - - Ok(Arc::new(Self { - server: Arc::new(conduwuit_core::Server::new(config, runtime.cloned(), Log { - reload: tracing_reload_handle, - capture, - })), - - services: None.into(), - - _tracing_flame_guard: tracing_flame_guard, - - #[cfg(feature = "sentry_telemetry")] - _sentry_guard: sentry_guard, - - #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] - mods: tokio::sync::RwLock::new(Vec::new()), - })) - } -} diff --git a/src/main/signal.rs b/src/main/signal.rs deleted file mode 100644 index a5d07774..00000000 --- a/src/main/signal.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::sync::Arc; - -use conduwuit_core::{debug_error, trace, warn}; -use tokio::signal; - -use super::server::Server; - -#[cfg(unix)] -#[tracing::instrument(skip_all)] -pub(super) async fn signal(server: Arc) { - use signal::unix; - use unix::SignalKind; - - const CONSOLE: bool = cfg!(feature = "console"); - const RELOADING: bool = cfg!(all(conduwuit_mods, feature = "conduwuit_mods", not(CONSOLE))); - - let mut quit = unix::signal(SignalKind::quit()).expect("SIGQUIT handler"); - let mut term = unix::signal(SignalKind::terminate()).expect("SIGTERM handler"); - let mut usr1 = unix::signal(SignalKind::user_defined1()).expect("SIGUSR1 handler"); - let mut usr2 = unix::signal(SignalKind::user_defined2()).expect("SIGUSR2 handler"); - loop { - trace!("Installed signal handlers"); - let sig: &'static str; - tokio::select! { - _ = signal::ctrl_c() => { sig = "SIGINT"; }, - _ = quit.recv() => { sig = "SIGQUIT"; }, - _ = term.recv() => { sig = "SIGTERM"; }, - _ = usr1.recv() => { sig = "SIGUSR1"; }, - _ = usr2.recv() => { sig = "SIGUSR2"; }, - } - - warn!("Received {sig}"); - let result = if RELOADING && sig == "SIGINT" { - server.server.reload() - } else if matches!(sig, "SIGQUIT" | "SIGTERM") || (!CONSOLE && sig == "SIGINT") { - server.server.shutdown() - } else { - server.server.signal(sig) - }; - - if let Err(e) = result { - debug_error!(?sig, "signal: {e}"); - } - } -} - -#[cfg(not(unix))] -#[tracing::instrument(skip_all)] -pub(super) async fn signal(server: Arc) { - loop { - tokio::select! { - _ = signal::ctrl_c() => { - warn!("Received Ctrl+C"); - if let Err(e) = server.server.signal.send("SIGINT") { - debug_error!("signal channel: {e}"); - } - }, - } - } -} diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml deleted file mode 100644 index e4ddcb9b..00000000 --- a/src/router/Cargo.toml +++ /dev/null @@ -1,133 +0,0 @@ -[package] -name = "conduwuit_router" -categories.workspace = true -description.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version.workspace = true - -[lib] -path = "mod.rs" -crate-type = [ - "rlib", -# "dylib", -] - -[features] -brotli_compression = [ - "conduwuit-admin/brotli_compression", - "conduwuit-api/brotli_compression", - "conduwuit-core/brotli_compression", - "conduwuit-service/brotli_compression", - "tower-http/compression-br", -] -direct_tls = [ - "axum-server/tls-rustls", - "dep:rustls", - "dep:axum-server-dual-protocol", -] -gzip_compression = [ - "conduwuit-admin/gzip_compression", - "conduwuit-api/gzip_compression", - "conduwuit-core/gzip_compression", - "conduwuit-service/gzip_compression", - "tower-http/compression-gzip", -] -io_uring = [ - "conduwuit-admin/io_uring", - "conduwuit-api/io_uring", - "conduwuit-service/io_uring", - "conduwuit-api/io_uring", -] -jemalloc = [ - "conduwuit-admin/jemalloc", - "conduwuit-api/jemalloc", - "conduwuit-core/jemalloc", - "conduwuit-service/jemalloc", -] -jemalloc_conf = [ - "conduwuit-admin/jemalloc_conf", - "conduwuit-api/jemalloc_conf", - "conduwuit-core/jemalloc_conf", - "conduwuit-service/jemalloc_conf", -] -jemalloc_prof = [ - "conduwuit-admin/jemalloc_prof", - "conduwuit-api/jemalloc_prof", - "conduwuit-core/jemalloc_prof", - "conduwuit-service/jemalloc_prof", -] -jemalloc_stats = [ - "conduwuit-admin/jemalloc_stats", - "conduwuit-api/jemalloc_stats", - "conduwuit-core/jemalloc_stats", - "conduwuit-service/jemalloc_stats", -] -release_max_log_level = [ - "conduwuit-admin/release_max_log_level", - "conduwuit-api/release_max_log_level", - "conduwuit-core/release_max_log_level", - "conduwuit-service/release_max_log_level", - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", -] -sentry_telemetry = [ - "conduwuit-core/sentry_telemetry", - "dep:sentry", - "dep:sentry-tracing", - "dep:sentry-tower", -] -systemd = [ - "dep:sd-notify", -] -zstd_compression = [ - "conduwuit-api/zstd_compression", - "conduwuit-core/zstd_compression", - "conduwuit-service/zstd_compression", - "tower-http/compression-zstd", -] - -[dependencies] -axum-client-ip.workspace = true -axum-server-dual-protocol.workspace = true -axum-server-dual-protocol.optional = true -axum-server.workspace = true -axum.workspace = true -bytes.workspace = true -conduwuit-admin.workspace = true -conduwuit-api.workspace = true -conduwuit-core.workspace = true -conduwuit-service.workspace = true -const-str.workspace = true -futures.workspace = true -http.workspace = true -http-body-util.workspace = true -hyper.workspace = true -hyper-util.workspace = true -log.workspace = true -ruma.workspace = true -rustls.workspace = true -rustls.optional = true -sentry.optional = true -sentry.workspace = true -sentry-tower.optional = true -sentry-tower.workspace = true -sentry-tracing.optional = true -sentry-tracing.workspace = true -serde_json.workspace = true -tokio.workspace = true -tower.workspace = true -tower-http.workspace = true -tracing.workspace = true - -[target.'cfg(all(unix, target_os = "linux"))'.dependencies] -sd-notify.workspace = true -sd-notify.optional = true - -[lints] -workspace = true diff --git a/src/router/layers.rs b/src/router/layers.rs deleted file mode 100644 index 6920555d..00000000 --- a/src/router/layers.rs +++ /dev/null @@ -1,232 +0,0 @@ -use std::{any::Any, sync::Arc, time::Duration}; - -use axum::{ - Router, - extract::{DefaultBodyLimit, MatchedPath}, -}; -use axum_client_ip::SecureClientIpSource; -use conduwuit::{Result, Server, debug, error}; -use conduwuit_api::router::state::Guard; -use conduwuit_service::Services; -use http::{ - HeaderValue, Method, StatusCode, - header::{self, HeaderName}, -}; -use tower::ServiceBuilder; -use tower_http::{ - catch_panic::CatchPanicLayer, - cors::{self, CorsLayer}, - sensitive_headers::SetSensitiveHeadersLayer, - set_header::SetResponseHeaderLayer, - timeout::{RequestBodyTimeoutLayer, ResponseBodyTimeoutLayer, TimeoutLayer}, - trace::{DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, TraceLayer}, -}; -use tracing::Level; - -use crate::{request, router}; - -const CONDUWUIT_CSP: &[&str; 5] = &[ - "default-src 'none'", - "frame-ancestors 'none'", - "form-action 'none'", - "base-uri 'none'", - "sandbox", -]; - -const CONDUWUIT_PERMISSIONS_POLICY: &[&str; 2] = &["interest-cohort=()", "browsing-topics=()"]; - -pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { - let server = &services.server; - let layers = ServiceBuilder::new(); - - #[cfg(feature = "sentry_telemetry")] - let layers = layers.layer(sentry_tower::NewSentryLayer::>::new_from_top()); - - #[cfg(any( - feature = "zstd_compression", - feature = "gzip_compression", - feature = "brotli_compression" - ))] - let layers = layers.layer(compression_layer(server)); - - let services_ = services.clone(); - let layers = layers - .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) - .layer( - TraceLayer::new_for_http() - .make_span_with(tracing_span::<_>) - .on_failure(DefaultOnFailure::new().level(Level::ERROR)) - .on_request(DefaultOnRequest::new().level(Level::TRACE)) - .on_response(DefaultOnResponse::new().level(Level::DEBUG)), - ) - .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) - .layer(SecureClientIpSource::ConnectInfo.into_extension()) - .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs( - server.config.client_response_timeout, - ))) - .layer(RequestBodyTimeoutLayer::new(Duration::from_secs( - server.config.client_receive_timeout, - ))) - .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) - .layer(SetResponseHeaderLayer::if_not_present( - HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster - HeaderValue::from_static("?1"), - )) - .layer(SetResponseHeaderLayer::if_not_present( - header::X_CONTENT_TYPE_OPTIONS, - HeaderValue::from_static("nosniff"), - )) - .layer(SetResponseHeaderLayer::if_not_present( - header::X_XSS_PROTECTION, - HeaderValue::from_static("0"), - )) - .layer(SetResponseHeaderLayer::if_not_present( - header::X_FRAME_OPTIONS, - HeaderValue::from_static("DENY"), - )) - .layer(SetResponseHeaderLayer::if_not_present( - HeaderName::from_static("permissions-policy"), - HeaderValue::from_str(&CONDUWUIT_PERMISSIONS_POLICY.join(","))?, - )) - .layer(SetResponseHeaderLayer::if_not_present( - header::CONTENT_SECURITY_POLICY, - HeaderValue::from_str(&CONDUWUIT_CSP.join(";"))?, - )) - .layer(cors_layer(server)) - .layer(body_limit_layer(server)) - .layer(CatchPanicLayer::custom(move |panic| catch_panic(panic, services_.clone()))); - - let (router, guard) = router::build(services); - Ok((router.layer(layers), guard)) -} - -#[cfg(any( - feature = "zstd_compression", - feature = "gzip_compression", - feature = "brotli_compression" -))] -fn compression_layer(server: &Server) -> tower_http::compression::CompressionLayer { - let mut compression_layer = tower_http::compression::CompressionLayer::new(); - - #[cfg(feature = "zstd_compression")] - { - compression_layer = if server.config.zstd_compression { - compression_layer.zstd(true) - } else { - compression_layer.no_zstd() - }; - }; - - #[cfg(feature = "gzip_compression")] - { - compression_layer = if server.config.gzip_compression { - compression_layer.gzip(true) - } else { - compression_layer.no_gzip() - }; - }; - - #[cfg(feature = "brotli_compression")] - { - compression_layer = if server.config.brotli_compression { - compression_layer.br(true) - } else { - compression_layer.no_br() - }; - }; - - compression_layer -} - -fn cors_layer(_server: &Server) -> CorsLayer { - const METHODS: [Method; 7] = [ - Method::GET, - Method::HEAD, - Method::PATCH, - Method::POST, - Method::PUT, - Method::DELETE, - Method::OPTIONS, - ]; - - let headers: [HeaderName; 5] = [ - header::ORIGIN, - HeaderName::from_lowercase(b"x-requested-with").unwrap(), - header::CONTENT_TYPE, - header::ACCEPT, - header::AUTHORIZATION, - ]; - - CorsLayer::new() - .allow_origin(cors::Any) - .allow_methods(METHODS) - .allow_headers(headers) - .max_age(Duration::from_secs(86400)) -} - -fn body_limit_layer(server: &Server) -> DefaultBodyLimit { - DefaultBodyLimit::max(server.config.max_request_size) -} - -#[tracing::instrument(name = "panic", level = "error", skip_all)] -#[allow(clippy::needless_pass_by_value)] -fn catch_panic( - err: Box, - services: Arc, -) -> http::Response> { - services - .server - .metrics - .requests_panic - .fetch_add(1, std::sync::atomic::Ordering::Release); - - let details = match err.downcast_ref::() { - | Some(s) => s.clone(), - | _ => match err.downcast_ref::<&str>() { - | Some(s) => (*s).to_owned(), - | _ => "Unknown internal server error occurred.".to_owned(), - }, - }; - - error!("{details:#}"); - let body = serde_json::json!({ - "errcode": "M_UNKNOWN", - "error": "M_UNKNOWN: Internal server error occurred", - "details": details, - }); - - http::Response::builder() - .status(StatusCode::INTERNAL_SERVER_ERROR) - .header(header::CONTENT_TYPE, "application/json") - .body(http_body_util::Full::from(body.to_string())) - .expect("Failed to create response for our panic catcher?") -} - -fn tracing_span(request: &http::Request) -> tracing::Span { - let path = request - .extensions() - .get::() - .map_or_else(|| request_path_str(request), truncated_matched_path); - - tracing::span! { - parent: None, - debug::INFO_SPAN_LEVEL, - "router", - method = %request.method(), - %path, - } -} - -fn request_path_str(request: &http::Request) -> &str { - request - .uri() - .path_and_query() - .expect("all requests have a path") - .as_str() -} - -fn truncated_matched_path(path: &MatchedPath) -> &str { - path.as_str() - .rsplit_once(':') - .map_or(path.as_str(), |path| path.0.strip_suffix('/').unwrap_or(path.0)) -} diff --git a/src/router/mod.rs b/src/router/mod.rs deleted file mode 100644 index 7038c5df..00000000 --- a/src/router/mod.rs +++ /dev/null @@ -1,52 +0,0 @@ -#![type_length_limit = "32768"] //TODO: reduce me - -mod layers; -mod request; -mod router; -mod run; -mod serve; - -extern crate conduwuit_core as conduwuit; - -use std::{panic::AssertUnwindSafe, pin::Pin, sync::Arc}; - -use conduwuit::{Error, Result, Server}; -use conduwuit_service::Services; -use futures::{Future, FutureExt, TryFutureExt}; - -conduwuit::mod_ctor! {} -conduwuit::mod_dtor! {} -conduwuit::rustc_flags_capture! {} - -#[unsafe(no_mangle)] -pub extern "Rust" fn start( - server: &Arc, -) -> Pin>> + Send>> { - AssertUnwindSafe(run::start(server.clone())) - .catch_unwind() - .map_err(Error::from_panic) - .unwrap_or_else(Err) - .boxed() -} - -#[unsafe(no_mangle)] -pub extern "Rust" fn stop( - services: Arc, -) -> Pin> + Send>> { - AssertUnwindSafe(run::stop(services)) - .catch_unwind() - .map_err(Error::from_panic) - .unwrap_or_else(Err) - .boxed() -} - -#[unsafe(no_mangle)] -pub extern "Rust" fn run( - services: &Arc, -) -> Pin> + Send>> { - AssertUnwindSafe(run::run(services.clone())) - .catch_unwind() - .map_err(Error::from_panic) - .unwrap_or_else(Err) - .boxed() -} diff --git a/src/router/request.rs b/src/router/request.rs deleted file mode 100644 index dba90324..00000000 --- a/src/router/request.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std::{ - fmt::Debug, - sync::{Arc, atomic::Ordering}, - time::Duration, -}; - -use axum::{ - extract::State, - response::{IntoResponse, Response}, -}; -use conduwuit::{Result, debug, debug_error, debug_warn, err, error, trace}; -use conduwuit_service::Services; -use futures::FutureExt; -use http::{Method, StatusCode, Uri}; -use tokio::time::sleep; -use tracing::Span; - -#[tracing::instrument(name = "request", level = "debug", skip_all)] -pub(crate) async fn handle( - State(services): State>, - req: http::Request, - next: axum::middleware::Next, -) -> Result { - if !services.server.running() { - debug_warn!( - method = %req.method(), - uri = %req.uri(), - "unavailable pending shutdown" - ); - - return Err(StatusCode::SERVICE_UNAVAILABLE); - } - - let uri = req.uri().clone(); - let method = req.method().clone(); - let services_ = services.clone(); - let parent = Span::current(); - let task = services.server.runtime().spawn(async move { - tokio::select! { - response = execute(&services_, req, next, &parent) => response, - response = services_.server.until_shutdown() - .then(|()| { - let timeout = services_.server.config.client_shutdown_timeout; - let timeout = Duration::from_secs(timeout); - sleep(timeout) - }) - .map(|()| StatusCode::SERVICE_UNAVAILABLE) - .map(IntoResponse::into_response) => response, - } - }); - - task.await - .map_err(unhandled) - .and_then(move |result| handle_result(&method, &uri, result)) -} - -#[tracing::instrument( - name = "handle", - level = "debug", - parent = parent, - skip_all, - fields( - active = %services - .server - .metrics - .requests_handle_active - .fetch_add(1, Ordering::Relaxed), - handled = %services - .server - .metrics - .requests_handle_finished - .load(Ordering::Relaxed), - ) -)] -async fn execute( - // we made a safety contract that Services will not go out of scope - // during the request; this ensures a reference is accounted for at - // the base frame of the task regardless of its detachment. - services: &Arc, - req: http::Request, - next: axum::middleware::Next, - parent: &Span, -) -> Response { - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = services.server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed); - _ = services.server - .metrics - .requests_handle_active - .fetch_sub(1, Ordering::Relaxed); - }}; - - next.run(req).await -} - -fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { - let status = result.status(); - let reason = status.canonical_reason().unwrap_or("Unknown Reason"); - let code = status.as_u16(); - - if status.is_server_error() { - error!(method = ?method, uri = ?uri, "{code} {reason}"); - } else if status.is_client_error() { - debug_error!(method = ?method, uri = ?uri, "{code} {reason}"); - } else if status.is_redirection() { - debug!(method = ?method, uri = ?uri, "{code} {reason}"); - } else { - trace!(method = ?method, uri = ?uri, "{code} {reason}"); - } - - if status == StatusCode::METHOD_NOT_ALLOWED { - return Ok(err!(Request(Unrecognized("Method Not Allowed"))).into_response()); - } - - Ok(result) -} - -#[cold] -fn unhandled(e: Error) -> StatusCode { - error!("unhandled error or panic during request: {e:?}"); - - StatusCode::INTERNAL_SERVER_ERROR -} diff --git a/src/router/router.rs b/src/router/router.rs deleted file mode 100644 index 0f95b924..00000000 --- a/src/router/router.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::sync::Arc; - -use axum::{Router, response::IntoResponse, routing::get}; -use conduwuit::Error; -use conduwuit_api::router::{state, state::Guard}; -use conduwuit_service::Services; -use http::{StatusCode, Uri}; -use ruma::api::client::error::ErrorKind; - -pub(crate) fn build(services: &Arc) -> (Router, Guard) { - let router = Router::::new(); - let (state, guard) = state::create(services.clone()); - let router = conduwuit_api::router::build(router, &services.server) - .route("/", get(it_works)) - .fallback(not_found) - .with_state(state); - - (router, guard) -} - -async fn not_found(_uri: Uri) -> impl IntoResponse { - Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND) -} - -async fn it_works() -> &'static str { "hewwo from conduwuit woof!" } diff --git a/src/router/run.rs b/src/router/run.rs deleted file mode 100644 index ff54594f..00000000 --- a/src/router/run.rs +++ /dev/null @@ -1,154 +0,0 @@ -extern crate conduwuit_admin as admin; -extern crate conduwuit_core as conduwuit; -extern crate conduwuit_service as service; - -use std::{ - sync::{Arc, Weak, atomic::Ordering}, - time::Duration, -}; - -use axum_server::Handle as ServerHandle; -use conduwuit::{Error, Result, Server, debug, debug_error, debug_info, error, info}; -use futures::FutureExt; -use service::Services; -use tokio::{ - sync::broadcast::{self, Sender}, - task::JoinHandle, -}; - -use crate::serve; - -/// Main loop base -#[tracing::instrument(skip_all)] -pub(crate) async fn run(services: Arc) -> Result<()> { - let server = &services.server; - debug!("Start"); - - // Install the admin room callback here for now - admin::init(&services.admin).await; - - // Setup shutdown/signal handling - let handle = ServerHandle::new(); - let (tx, _) = broadcast::channel::<()>(1); - let sigs = server - .runtime() - .spawn(signal(server.clone(), tx.clone(), handle.clone())); - - let mut listener = - server - .runtime() - .spawn(serve::serve(services.clone(), handle.clone(), tx.subscribe())); - - // Focal point - debug!("Running"); - let res = tokio::select! { - res = &mut listener => res.map_err(Error::from).unwrap_or_else(Err), - res = services.poll() => handle_services_poll(server, res, listener).await, - }; - - // Join the signal handler before we leave. - sigs.abort(); - _ = sigs.await; - - // Remove the admin room callback - admin::fini(&services.admin).await; - - debug_info!("Finish"); - res -} - -/// Async initializations -#[tracing::instrument(skip_all)] -pub(crate) async fn start(server: Arc) -> Result> { - debug!("Starting..."); - - let services = Services::build(server).await?.start().await?; - - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) - .expect("failed to notify systemd of ready state"); - - debug!("Started"); - Ok(services) -} - -/// Async destructions -#[tracing::instrument(skip_all)] -pub(crate) async fn stop(services: Arc) -> Result<()> { - debug!("Shutting down..."); - - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - - // Wait for all completions before dropping or we'll lose them to the module - // unload and explode. - services.stop().await; - - // Check that Services and Database will drop as expected, The complex of Arc's - // used for various components can easily lead to references being held - // somewhere improperly; this can hang shutdowns. - debug!("Cleaning up..."); - let db = Arc::downgrade(&services.db); - if let Err(services) = Arc::try_unwrap(services) { - debug_error!( - "{} dangling references to Services after shutdown", - Arc::strong_count(&services) - ); - } - - if Weak::strong_count(&db) > 0 { - debug_error!( - "{} dangling references to Database after shutdown", - Weak::strong_count(&db) - ); - } - - info!("Shutdown complete."); - Ok(()) -} - -#[tracing::instrument(skip_all)] -async fn signal(server: Arc, tx: Sender<()>, handle: axum_server::Handle) { - server - .clone() - .until_shutdown() - .then(move |()| handle_shutdown(server, tx, handle)) - .await; -} - -async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_server::Handle) { - if let Err(e) = tx.send(()) { - error!("failed sending shutdown transaction to channel: {e}"); - } - - let timeout = server.config.client_shutdown_timeout; - let timeout = Duration::from_secs(timeout); - debug!( - ?timeout, - handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), - "Notifying for graceful shutdown" - ); - - handle.graceful_shutdown(Some(timeout)); -} - -async fn handle_services_poll( - server: &Arc, - result: Result<()>, - listener: JoinHandle>, -) -> Result<()> { - debug!("Service manager finished: {result:?}"); - - if server.running() { - if let Err(e) = server.shutdown() { - error!("Failed to send shutdown signal: {e}"); - } - } - - if let Err(e) = listener.await { - error!("Client listener task finished with error: {e}"); - } - - result -} diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs deleted file mode 100644 index 2399edf0..00000000 --- a/src/router/serve/mod.rs +++ /dev/null @@ -1,46 +0,0 @@ -mod plain; -#[cfg(feature = "direct_tls")] -mod tls; -mod unix; - -use std::sync::Arc; - -use axum_server::Handle as ServerHandle; -use conduwuit::{Result, err}; -use conduwuit_service::Services; -use tokio::sync::broadcast; - -use super::layers; - -/// Serve clients -pub(super) async fn serve( - services: Arc, - handle: ServerHandle, - mut shutdown: broadcast::Receiver<()>, -) -> Result { - let server = &services.server; - let config = &server.config; - if !config.listening { - return shutdown - .recv() - .await - .map_err(|e| err!(error!("channel error: {e}"))); - } - - let addrs = config.get_bind_addrs(); - let (app, _guard) = layers::build(&services)?; - if cfg!(unix) && config.unix_socket_path.is_some() { - unix::serve(server, app, shutdown).await - } else if config.tls.certs.is_some() { - #[cfg(feature = "direct_tls")] - return tls::serve(server, app, handle, addrs).await; - - #[cfg(not(feature = "direct_tls"))] - return conduwuit::Err!(Config( - "tls", - "conduwuit was not built with direct TLS support (\"direct_tls\")" - )); - } else { - plain::serve(server, app, handle, addrs).await - } -} diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs deleted file mode 100644 index 6db7e138..00000000 --- a/src/router/serve/plain.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::{ - net::SocketAddr, - sync::{Arc, atomic::Ordering}, -}; - -use axum::Router; -use axum_server::{Handle as ServerHandle, bind}; -use conduwuit::{Result, Server, debug_info, info}; -use tokio::task::JoinSet; - -pub(super) async fn serve( - server: &Arc, - app: Router, - handle: ServerHandle, - addrs: Vec, -) -> Result<()> { - let app = app.into_make_service_with_connect_info::(); - let mut join_set = JoinSet::new(); - for addr in &addrs { - join_set - .spawn_on(bind(*addr).handle(handle.clone()).serve(app.clone()), server.runtime()); - } - - info!("Listening on {addrs:?}"); - while join_set.join_next().await.is_some() {} - - let handle_active = server - .metrics - .requests_handle_active - .load(Ordering::Relaxed); - debug_info!( - handle_finished = server - .metrics - .requests_handle_finished - .load(Ordering::Relaxed), - panics = server.metrics.requests_panic.load(Ordering::Relaxed), - handle_active, - "Stopped listening on {addrs:?}", - ); - - debug_assert!(handle_active == 0, "active request handles still pending"); - - Ok(()) -} diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs deleted file mode 100644 index 20b58601..00000000 --- a/src/router/serve/tls.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::{net::SocketAddr, sync::Arc}; - -use axum::Router; -use axum_server::Handle as ServerHandle; -use axum_server_dual_protocol::{ - ServerExt, - axum_server::{bind_rustls, tls_rustls::RustlsConfig}, -}; -use conduwuit::{Result, Server, err}; -use tokio::task::JoinSet; -use tracing::{debug, info, warn}; - -pub(super) async fn serve( - server: &Arc, - app: Router, - handle: ServerHandle, - addrs: Vec, -) -> Result { - let tls = &server.config.tls; - let certs = tls.certs.as_ref().ok_or_else(|| { - err!(Config("tls.certs", "Missing required value in tls config section")) - })?; - let key = tls - .key - .as_ref() - .ok_or_else(|| err!(Config("tls.key", "Missing required value in tls config section")))?; - - // we use ring for ruma and hashing state, but aws-lc-rs is the new default. - // without this, TLS mode will panic. - rustls::crypto::aws_lc_rs::default_provider() - .install_default() - .expect("failed to initialise aws-lc-rs rustls crypto provider"); - - info!( - "Note: It is strongly recommended that you use a reverse proxy instead of running \ - conduwuit directly with TLS." - ); - debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); - let conf = RustlsConfig::from_pem_file(certs, key) - .await - .map_err(|e| err!(Config("tls", "Failed to load certificates or key: {e}")))?; - - let mut join_set = JoinSet::new(); - let app = app.into_make_service_with_connect_info::(); - if tls.dual_protocol { - for addr in &addrs { - join_set.spawn_on( - axum_server_dual_protocol::bind_dual_protocol(*addr, conf.clone()) - .set_upgrade(false) - .handle(handle.clone()) - .serve(app.clone()), - server.runtime(), - ); - } - } else { - for addr in &addrs { - join_set.spawn_on( - bind_rustls(*addr, conf.clone()) - .handle(handle.clone()) - .serve(app.clone()), - server.runtime(), - ); - } - } - - if tls.dual_protocol { - warn!( - "Listening on {addrs:?} with TLS certificate {certs} and supporting plain text \ - (HTTP) connections too (insecure!)", - ); - } else { - info!("Listening on {addrs:?} with TLS certificate {certs}"); - } - - while join_set.join_next().await.is_some() {} - - Ok(()) -} diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs deleted file mode 100644 index 2af17274..00000000 --- a/src/router/serve/unix.rs +++ /dev/null @@ -1,185 +0,0 @@ -#![cfg(unix)] - -use std::{ - net::{self, IpAddr, Ipv4Addr}, - os::fd::AsRawFd, - path::Path, - sync::{Arc, atomic::Ordering}, -}; - -use axum::{ - Router, - extract::{Request, connect_info::IntoMakeServiceWithConnectInfo}, -}; -use conduwuit::{ - Err, Result, Server, debug, debug_error, info, result::UnwrapInfallible, trace, warn, -}; -use hyper::{body::Incoming, service::service_fn}; -use hyper_util::{ - rt::{TokioExecutor, TokioIo}, - server, -}; -use tokio::{ - fs, - net::{UnixListener, UnixStream, unix::SocketAddr}, - sync::broadcast::{self}, - task::JoinSet, - time::{Duration, sleep}, -}; -use tower::{Service, ServiceExt}; - -type MakeService = IntoMakeServiceWithConnectInfo; - -const NULL_ADDR: net::SocketAddr = net::SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0); -const FINI_POLL_INTERVAL: Duration = Duration::from_millis(750); - -#[tracing::instrument(skip_all, level = "debug")] -pub(super) async fn serve( - server: &Arc, - app: Router, - mut shutdown: broadcast::Receiver<()>, -) -> Result<()> { - let mut tasks = JoinSet::<()>::new(); - let executor = TokioExecutor::new(); - let app = app.into_make_service_with_connect_info::(); - let builder = server::conn::auto::Builder::new(executor); - let listener = init(server).await?; - while server.running() { - let app = app.clone(); - let builder = builder.clone(); - tokio::select! { - _sig = shutdown.recv() => break, - conn = listener.accept() => match conn { - Ok(conn) => accept(server, &listener, &mut tasks, app, builder, conn).await, - Err(err) => debug_error!(?listener, "accept error: {err}"), - }, - } - } - - fini(server, listener, tasks).await; - - Ok(()) -} - -#[tracing::instrument( - level = "trace", - skip_all, - fields( - ?listener, - socket = ?conn.0, - ), -)] -async fn accept( - server: &Arc, - listener: &UnixListener, - tasks: &mut JoinSet<()>, - app: MakeService, - builder: server::conn::auto::Builder, - conn: (UnixStream, SocketAddr), -) { - let (socket, _) = conn; - let server_ = server.clone(); - let task = async move { accepted(server_, builder, socket, app).await }; - - _ = tasks.spawn_on(task, server.runtime()); - while tasks.try_join_next().is_some() {} -} - -#[tracing::instrument( - level = "trace", - skip_all, - fields( - fd = %socket.as_raw_fd(), - path = ?socket.local_addr(), - ), -)] -async fn accepted( - server: Arc, - builder: server::conn::auto::Builder, - socket: UnixStream, - mut app: MakeService, -) { - let socket = TokioIo::new(socket); - let called = app.call(NULL_ADDR).await.unwrap_infallible(); - let service = move |req: Request| called.clone().oneshot(req); - let handler = service_fn(service); - trace!(?socket, ?handler, "serving connection"); - - // bug on darwin causes all results to be errors. do not unwrap this - tokio::select! { - () = server.until_shutdown() => (), - _ = builder.serve_connection(socket, handler) => (), - }; -} - -async fn init(server: &Arc) -> Result { - use std::os::unix::fs::PermissionsExt; - - let config = &server.config; - let path = config - .unix_socket_path - .as_ref() - .expect("failed to extract configured unix socket path"); - - if path.exists() { - warn!("Removing existing UNIX socket {:#?} (unclean shutdown?)...", path.display()); - fs::remove_file(&path) - .await - .map_err(|e| warn!("Failed to remove existing UNIX socket: {e}")) - .unwrap(); - } - - let dir = path.parent().unwrap_or_else(|| Path::new("/")); - if let Err(e) = fs::create_dir_all(dir).await { - return Err!("Failed to create {dir:?} for socket {path:?}: {e}"); - } - - let listener = UnixListener::bind(path); - if let Err(e) = listener { - return Err!("Failed to bind listener {path:?}: {e}"); - } - - let socket_perms = config.unix_socket_perms.to_string(); - let octal_perms = - u32::from_str_radix(&socket_perms, 8).expect("failed to convert octal permissions"); - let perms = std::fs::Permissions::from_mode(octal_perms); - if let Err(e) = fs::set_permissions(&path, perms).await { - return Err!("Failed to set socket {path:?} permissions: {e}"); - } - - info!("Listening at {path:?}"); - - Ok(listener.unwrap()) -} - -async fn fini(server: &Arc, listener: UnixListener, mut tasks: JoinSet<()>) { - let local = listener.local_addr(); - - debug!("Closing listener at {local:?} ..."); - drop(listener); - - debug!("Waiting for requests to finish..."); - while server - .metrics - .requests_handle_active - .load(Ordering::Relaxed) - .gt(&0) - { - tokio::select! { - task = tasks.join_next() => if task.is_none() { break; }, - () = sleep(FINI_POLL_INTERVAL) => {}, - } - } - - debug!("Shutting down..."); - tasks.shutdown().await; - - if let Ok(local) = local { - if let Some(path) = local.as_pathname() { - debug!(?path, "Removing unix socket file."); - if let Err(e) = fs::remove_file(path).await { - warn!(?path, "Failed to remove UNIX socket file: {e}"); - } - } - } -} diff --git a/src/routes.rs b/src/routes.rs new file mode 100644 index 00000000..a29cacd3 --- /dev/null +++ b/src/routes.rs @@ -0,0 +1,325 @@ +use std::future::Future; + +use axum::{ + extract::FromRequestParts, + response::IntoResponse, + routing::{any, get, on, post, MethodFilter}, + Router, +}; +use conduit::{ + api::{client_server, server_server}, + Config, Error, Result, Ruma, RumaResponse, +}; +use http::{Method, Uri}; +use ruma::api::{client::error::ErrorKind, IncomingRequest}; +use tracing::{info, warn}; + +pub fn routes(config: &Config) -> Router { + let router = Router::new() + .ruma_route(client_server::get_supported_versions_route) + .ruma_route(client_server::get_register_available_route) + .ruma_route(client_server::register_route) + .ruma_route(client_server::get_login_types_route) + .ruma_route(client_server::login_route) + .ruma_route(client_server::whoami_route) + .ruma_route(client_server::logout_route) + .ruma_route(client_server::logout_all_route) + .ruma_route(client_server::change_password_route) + .ruma_route(client_server::deactivate_route) + .ruma_route(client_server::third_party_route) + .ruma_route(client_server::request_3pid_management_token_via_email_route) + .ruma_route(client_server::request_3pid_management_token_via_msisdn_route) + .ruma_route(client_server::get_capabilities_route) + .ruma_route(client_server::get_pushrules_all_route) + .ruma_route(client_server::set_pushrule_route) + .ruma_route(client_server::get_pushrule_route) + .ruma_route(client_server::set_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_actions_route) + .ruma_route(client_server::set_pushrule_actions_route) + .ruma_route(client_server::delete_pushrule_route) + .ruma_route(client_server::get_room_event_route) + .ruma_route(client_server::get_room_aliases_route) + .ruma_route(client_server::get_filter_route) + .ruma_route(client_server::create_filter_route) + .ruma_route(client_server::set_global_account_data_route) + .ruma_route(client_server::set_room_account_data_route) + .ruma_route(client_server::get_global_account_data_route) + .ruma_route(client_server::get_room_account_data_route) + .ruma_route(client_server::set_displayname_route) + .ruma_route(client_server::get_displayname_route) + .ruma_route(client_server::set_avatar_url_route) + .ruma_route(client_server::get_avatar_url_route) + .ruma_route(client_server::get_profile_route) + .ruma_route(client_server::set_presence_route) + .ruma_route(client_server::get_presence_route) + .ruma_route(client_server::upload_keys_route) + .ruma_route(client_server::get_keys_route) + .ruma_route(client_server::claim_keys_route) + .ruma_route(client_server::create_backup_version_route) + .ruma_route(client_server::update_backup_version_route) + .ruma_route(client_server::delete_backup_version_route) + .ruma_route(client_server::get_latest_backup_info_route) + .ruma_route(client_server::get_backup_info_route) + .ruma_route(client_server::add_backup_keys_route) + .ruma_route(client_server::add_backup_keys_for_room_route) + .ruma_route(client_server::add_backup_keys_for_session_route) + .ruma_route(client_server::delete_backup_keys_for_room_route) + .ruma_route(client_server::delete_backup_keys_for_session_route) + .ruma_route(client_server::delete_backup_keys_route) + .ruma_route(client_server::get_backup_keys_for_room_route) + .ruma_route(client_server::get_backup_keys_for_session_route) + .ruma_route(client_server::get_backup_keys_route) + .ruma_route(client_server::set_read_marker_route) + .ruma_route(client_server::create_receipt_route) + .ruma_route(client_server::create_typing_event_route) + .ruma_route(client_server::create_room_route) + .ruma_route(client_server::redact_event_route) + .ruma_route(client_server::report_event_route) + .ruma_route(client_server::create_alias_route) + .ruma_route(client_server::delete_alias_route) + .ruma_route(client_server::get_alias_route) + .ruma_route(client_server::join_room_by_id_route) + .ruma_route(client_server::join_room_by_id_or_alias_route) + .ruma_route(client_server::joined_members_route) + .ruma_route(client_server::leave_room_route) + .ruma_route(client_server::forget_room_route) + .ruma_route(client_server::joined_rooms_route) + .ruma_route(client_server::kick_user_route) + .ruma_route(client_server::ban_user_route) + .ruma_route(client_server::unban_user_route) + .ruma_route(client_server::invite_user_route) + .ruma_route(client_server::set_room_visibility_route) + .ruma_route(client_server::get_room_visibility_route) + .ruma_route(client_server::get_public_rooms_route) + .ruma_route(client_server::get_public_rooms_filtered_route) + .ruma_route(client_server::search_users_route) + .ruma_route(client_server::get_member_events_route) + .ruma_route(client_server::get_protocols_route) + .ruma_route(client_server::send_message_event_route) + .ruma_route(client_server::send_state_event_for_key_route) + .ruma_route(client_server::get_state_events_route) + .ruma_route(client_server::get_state_events_for_key_route) + // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes + // share one Ruma request / response type pair with {get,send}_state_event_for_key_route + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + // These two endpoints allow trailing slashes + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .ruma_route(client_server::sync_events_route) + .ruma_route(client_server::sync_events_v4_route) + .ruma_route(client_server::get_context_route) + .ruma_route(client_server::get_message_events_route) + .ruma_route(client_server::search_events_route) + .ruma_route(client_server::turn_server_route) + .ruma_route(client_server::send_event_to_device_route) + .ruma_route(client_server::get_media_config_route) + .ruma_route(client_server::get_media_preview_route) + .ruma_route(client_server::create_content_route) + // legacy v1 media routes + .route( + "/_matrix/media/v1/preview_url", + get(client_server::get_media_preview_v1_route) + ) + .route( + "/_matrix/media/v1/config", + get(client_server::get_media_config_v1_route) + ) + .route( + "/_matrix/media/v1/upload", + post(client_server::create_content_v1_route) + ) + .route( + "/_matrix/media/v1/download/:server_name/:media_id", + get(client_server::get_content_v1_route) + ) + .route( + "/_matrix/media/v1/download/:server_name/:media_id/:file_name", + get(client_server::get_content_as_filename_v1_route) + ) + .route( + "/_matrix/media/v1/thumbnail/:server_name/:media_id", + get(client_server::get_content_thumbnail_v1_route) + ) + .ruma_route(client_server::get_content_route) + .ruma_route(client_server::get_content_as_filename_route) + .ruma_route(client_server::get_content_thumbnail_route) + .ruma_route(client_server::get_devices_route) + .ruma_route(client_server::get_device_route) + .ruma_route(client_server::update_device_route) + .ruma_route(client_server::delete_device_route) + .ruma_route(client_server::delete_devices_route) + .ruma_route(client_server::get_tags_route) + .ruma_route(client_server::update_tag_route) + .ruma_route(client_server::delete_tag_route) + .ruma_route(client_server::upload_signing_keys_route) + .ruma_route(client_server::upload_signatures_route) + .ruma_route(client_server::get_key_changes_route) + .ruma_route(client_server::get_pushers_route) + .ruma_route(client_server::set_pushers_route) + // .ruma_route(client_server::third_party_route) + .ruma_route(client_server::upgrade_room_route) + .ruma_route(client_server::get_threads_route) + .ruma_route(client_server::get_relating_events_with_rel_type_and_event_type_route) + .ruma_route(client_server::get_relating_events_with_rel_type_route) + .ruma_route(client_server::get_relating_events_route) + .ruma_route(client_server::get_hierarchy_route) + .ruma_route(client_server::get_mutual_rooms_route) + .ruma_route(client_server::well_known_support) + .ruma_route(client_server::well_known_client) + .route("/_conduwuit/server_version", get(client_server::conduwuit_server_version)) + .route("/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync)) + .route("/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync)) + .route("/client/server.json", get(client_server::syncv3_client_server_json)) + .route("/", get(it_works)) + .fallback(not_found); + + if config.allow_federation { + router + .ruma_route(server_server::get_server_version_route) + .route("/_matrix/key/v2/server", get(server_server::get_server_keys_route)) + .route( + "/_matrix/key/v2/server/:key_id", + get(server_server::get_server_keys_deprecated_route), + ) + .ruma_route(server_server::get_public_rooms_route) + .ruma_route(server_server::get_public_rooms_filtered_route) + .ruma_route(server_server::send_transaction_message_route) + .ruma_route(server_server::get_event_route) + .ruma_route(server_server::get_backfill_route) + .ruma_route(server_server::get_missing_events_route) + .ruma_route(server_server::get_event_authorization_route) + .ruma_route(server_server::get_room_state_route) + .ruma_route(server_server::get_room_state_ids_route) + .ruma_route(server_server::create_join_event_template_route) + .ruma_route(server_server::create_join_event_v1_route) + .ruma_route(server_server::create_join_event_v2_route) + .ruma_route(server_server::create_invite_route) + .ruma_route(server_server::get_devices_route) + .ruma_route(server_server::get_room_information_route) + .ruma_route(server_server::get_profile_information_route) + .ruma_route(server_server::get_keys_route) + .ruma_route(server_server::claim_keys_route) + .ruma_route(server_server::get_hierarchy_route) + .ruma_route(server_server::well_known_server) + } else { + router + .route("/_matrix/federation/*path", any(federation_disabled)) + .route("/.well-known/matrix/server", any(federation_disabled)) + .route("/_matrix/key/*path", any(federation_disabled)) + } +} + +async fn not_found(uri: Uri) -> impl IntoResponse { + if uri.path().contains("_matrix/") { + warn!("Not found: {uri}"); + } else { + info!("Not found: {uri}"); + } + + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") +} + +async fn initial_sync(_uri: Uri) -> impl IntoResponse { + Error::BadRequest(ErrorKind::GuestAccessForbidden, "Guest access not implemented") +} + +async fn it_works() -> &'static str { "hewwo from conduwuit woof!" } + +async fn federation_disabled() -> impl IntoResponse { Error::bad_config("Federation is disabled.") } + +trait RouterExt { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static; +} + +impl RouterExt for Router { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static, + { + handler.add_to_router(self) + } +} + +pub trait RumaHandler { + // Can't transform to a handler without boxing or relying on the nightly-only + // impl-trait-in-traits feature. Moving a small amount of extra logic into the + // trait allows bypassing both. + fn add_to_router(self, router: Router) -> Router; +} + +macro_rules! impl_ruma_handler { + ( $($ty:ident),* $(,)? ) => { + #[axum::async_trait] + #[allow(non_snake_case)] + impl RumaHandler<($($ty,)* Ruma,)> for F + where + Req: IncomingRequest + Send + 'static, + F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, + Fut: Future> + + Send, + E: IntoResponse, + $( $ty: FromRequestParts<()> + Send + 'static, )* + { + fn add_to_router(self, mut router: Router) -> Router { + let meta = Req::METADATA; + let method_filter = method_to_filter(meta.method); + + for path in meta.history.all_paths() { + let handler = self.clone(); + + router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { + handler($($ty,)* req).await.map(RumaResponse) + })) + } + + router + } + } + }; +} + +impl_ruma_handler!(); +impl_ruma_handler!(T1); +impl_ruma_handler!(T1, T2); +impl_ruma_handler!(T1, T2, T3); +impl_ruma_handler!(T1, T2, T3, T4); +impl_ruma_handler!(T1, T2, T3, T4, T5); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); + +fn method_to_filter(method: Method) -> MethodFilter { + match method { + Method::DELETE => MethodFilter::DELETE, + Method::GET => MethodFilter::GET, + Method::HEAD => MethodFilter::HEAD, + Method::OPTIONS => MethodFilter::OPTIONS, + Method::PATCH => MethodFilter::PATCH, + Method::POST => MethodFilter::POST, + Method::PUT => MethodFilter::PUT, + Method::TRACE => MethodFilter::TRACE, + m => panic!("Unsupported HTTP method: {m:?}"), + } +} diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml deleted file mode 100644 index 8b0d1405..00000000 --- a/src/service/Cargo.toml +++ /dev/null @@ -1,116 +0,0 @@ -[package] -name = "conduwuit_service" -categories.workspace = true -description.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version.workspace = true - -[lib] -path = "mod.rs" -crate-type = [ - "rlib", -# "dylib", -] - -[features] -blurhashing = [ - "dep:image", - "dep:blurhash", -] -brotli_compression = [ - "conduwuit-core/brotli_compression", - "reqwest/brotli", -] -console = [ - "dep:rustyline-async", - "dep:termimad", -] -element_hacks = [] -gzip_compression = [ - "conduwuit-core/gzip_compression", - "reqwest/gzip", -] -io_uring = [ - "conduwuit-database/io_uring", -] -jemalloc = [ - "conduwuit-core/jemalloc", - "conduwuit-database/jemalloc", -] -jemalloc_conf = [ - "conduwuit-core/jemalloc_conf", - "conduwuit-database/jemalloc_conf", -] -jemalloc_prof = [ - "conduwuit-core/jemalloc_prof", - "conduwuit-database/jemalloc_prof", -] -jemalloc_stats = [ - "conduwuit-core/jemalloc_stats", - "conduwuit-database/jemalloc_stats", -] -media_thumbnail = [ - "dep:image", -] -release_max_log_level = [ - "conduwuit-core/release_max_log_level", - "conduwuit-database/release_max_log_level", - "log/max_level_trace", - "log/release_max_level_info", - "tracing/max_level_trace", - "tracing/release_max_level_info", -] -url_preview = [ - "dep:image", - "dep:webpage", -] -zstd_compression = [ - "conduwuit-core/zstd_compression", - "conduwuit-database/zstd_compression", - "reqwest/zstd", -] - -[dependencies] -async-trait.workspace = true -base64.workspace = true -bytes.workspace = true -conduwuit-core.workspace = true -conduwuit-database.workspace = true -const-str.workspace = true -either.workspace = true -futures.workspace = true -hickory-resolver.workspace = true -http.workspace = true -image.workspace = true -image.optional = true -ipaddress.workspace = true -itertools.workspace = true -log.workspace = true -loole.workspace = true -lru-cache.workspace = true -rand.workspace = true -regex.workspace = true -reqwest.workspace = true -ruma.workspace = true -rustyline-async.workspace = true -rustyline-async.optional = true -serde_json.workspace = true -serde.workspace = true -serde_yaml.workspace = true -sha2.workspace = true -termimad.workspace = true -termimad.optional = true -tokio.workspace = true -tracing.workspace = true -url.workspace = true -webpage.workspace = true -webpage.optional = true -blurhash.workspace = true -blurhash.optional = true - -[lints] -workspace = true diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs new file mode 100644 index 00000000..492c500c --- /dev/null +++ b/src/service/account_data/data.rs @@ -0,0 +1,28 @@ +use std::collections::HashMap; + +use ruma::{ + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + RoomId, UserId, +}; + +use crate::Result; + +pub trait Data: Send + Sync { + /// Places one event in the account data of the user and removes the + /// previous entry. + fn update( + &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, + data: &serde_json::Value, + ) -> Result<()>; + + /// Searches the account data for a specific kind. + fn get( + &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, + ) -> Result>>; + + /// Returns all changes to the account data that happened after `since`. + fn changes_since( + &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, + ) -> Result>>; +} diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 453051be..6acfbef4 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,158 +1,44 @@ -use std::sync::Arc; +mod data; -use conduwuit::{ - Err, Result, err, implement, - utils::{ReadyExt, result::LogErr, stream::TryIgnore}, -}; -use database::{Deserialized, Handle, Ignore, Json, Map}; -use futures::{Stream, StreamExt, TryFutureExt}; +use std::collections::HashMap; + +pub(crate) use data::Data; use ruma::{ - RoomId, UserId, - events::{ - AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, - GlobalAccountDataEventType, RoomAccountDataEventType, - }, + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, + RoomId, UserId, }; -use serde::Deserialize; -use crate::{Dep, globals}; +use crate::Result; pub struct Service { - services: Services, - db: Data, + pub db: &'static dyn Data, } -struct Data { - roomuserdataid_accountdata: Arc, - roomusertype_roomuserdataid: Arc, -} - -struct Services { - globals: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - globals: args.depend::("globals"), - }, - db: Data { - roomuserdataid_accountdata: args.db["roomuserdataid_accountdata"].clone(), - roomusertype_roomuserdataid: args.db["roomusertype_roomuserdataid"].clone(), - }, - })) +impl Service { + /// Places one event in the account data of the user and removes the + /// previous entry. + #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] + pub fn update( + &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, + data: &serde_json::Value, + ) -> Result<()> { + self.db.update(room_id, user_id, event_type, data) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -/// Places one event in the account data of the user and removes the -/// previous entry. -#[allow(clippy::needless_pass_by_value)] -#[implement(Service)] -pub async fn update( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - event_type: RoomAccountDataEventType, - data: &serde_json::Value, -) -> Result<()> { - if data.get("type").is_none() || data.get("content").is_none() { - return Err!(Request(InvalidParam("Account data doesn't have all required fields."))); + /// Searches the account data for a specific kind. + #[tracing::instrument(skip(self, room_id, user_id, event_type))] + pub fn get( + &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, + ) -> Result>> { + self.db.get(room_id, user_id, event_type) } - let count = self.services.globals.next_count().unwrap(); - let roomuserdataid = (room_id, user_id, count, &event_type); - self.db - .roomuserdataid_accountdata - .put(roomuserdataid, Json(data)); - - let key = (room_id, user_id, &event_type); - let prev = self.db.roomusertype_roomuserdataid.qry(&key).await; - self.db.roomusertype_roomuserdataid.put(key, roomuserdataid); - - // Remove old entry - if let Ok(prev) = prev { - self.db.roomuserdataid_accountdata.remove(&prev); + /// Returns all changes to the account data that happened after `since`. + #[tracing::instrument(skip(self, room_id, user_id, since))] + pub fn changes_since( + &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, + ) -> Result>> { + self.db.changes_since(room_id, user_id, since) } - - Ok(()) -} - -/// Searches the room account data for a specific kind. -#[implement(Service)] -pub async fn get_global(&self, user_id: &UserId, kind: GlobalAccountDataEventType) -> Result -where - T: for<'de> Deserialize<'de>, -{ - self.get_raw(None, user_id, &kind.to_string()) - .await - .deserialized() -} - -/// Searches the global account data for a specific kind. -#[implement(Service)] -pub async fn get_room( - &self, - room_id: &RoomId, - user_id: &UserId, - kind: RoomAccountDataEventType, -) -> Result -where - T: for<'de> Deserialize<'de>, -{ - self.get_raw(Some(room_id), user_id, &kind.to_string()) - .await - .deserialized() -} - -#[implement(Service)] -pub async fn get_raw( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - kind: &str, -) -> Result> { - let key = (room_id, user_id, kind.to_owned()); - self.db - .roomusertype_roomuserdataid - .qry(&key) - .and_then(|roomuserdataid| self.db.roomuserdataid_accountdata.get(&roomuserdataid)) - .await -} - -/// Returns all changes to the account data that happened after `since`. -#[implement(Service)] -pub fn changes_since<'a>( - &'a self, - room_id: Option<&'a RoomId>, - user_id: &'a UserId, - since: u64, - to: Option, -) -> impl Stream + Send + 'a { - type Key<'a> = (Option<&'a RoomId>, &'a UserId, u64, Ignore); - - // Skip the data that's exactly at since, because we sent that last time - let first_possible = (room_id, user_id, since.saturating_add(1)); - - self.db - .roomuserdataid_accountdata - .stream_from(&first_possible) - .ignore_err() - .ready_take_while(move |((room_id_, user_id_, count, _), _): &(Key<'_>, _)| { - room_id == *room_id_ && user_id == *user_id_ && to.is_none_or(|to| *count <= to) - }) - .map(move |(_, v)| { - match room_id { - | Some(_) => serde_json::from_slice::>(v) - .map(AnyRawAccountDataEvent::Room), - | None => serde_json::from_slice::>(v) - .map(AnyRawAccountDataEvent::Global), - } - .map_err(|e| err!(Database("Database contains invalid account data: {e}"))) - .log_err() - }) - .ignore_err() } diff --git a/src/service/admin/appservice.rs b/src/service/admin/appservice.rs new file mode 100644 index 00000000..ff0611e0 --- /dev/null +++ b/src/service/admin/appservice.rs @@ -0,0 +1,100 @@ +use clap::Subcommand; +use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; + +use crate::{service::admin::escape_html, services, Result}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum AppserviceCommand { + /// - Register an appservice using its registration YAML + /// + /// This command needs a YAML generated by an appservice (such as a bridge), + /// which must be provided in a Markdown code block below the command. + /// + /// Registering a new bridge using the ID of an existing bridge will replace + /// the old one. + Register, + + /// - Unregister an appservice using its ID + /// + /// You can find the ID using the `list-appservices` command. + Unregister { + /// The appservice to unregister + appservice_identifier: String, + }, + + /// - Show an appservice's config using its ID + /// + /// You can find the ID using the `list-appservices` command. + Show { + /// The appservice to show + appservice_identifier: String, + }, + + /// - List all the currently registered appservices + List, +} + +pub(crate) async fn process(command: AppserviceCommand, body: Vec<&str>) -> Result { + match command { + AppserviceCommand::Register => { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = serde_yaml::from_str::(&appservice_config); + match parsed_config { + Ok(yaml) => match services().appservice.register_appservice(yaml).await { + Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( + "Appservice registered with ID: {id}." + ))), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Failed to register appservice: {e}" + ))), + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Could not parse appservice config: {e}" + ))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } + }, + AppserviceCommand::Unregister { + appservice_identifier, + } => match services() + .appservice + .unregister_appservice(&appservice_identifier) + .await + { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Failed to unregister appservice: {e}" + ))), + }, + AppserviceCommand::Show { + appservice_identifier, + } => match services() + .appservice + .get_registration(&appservice_identifier) + .await + { + Some(config) => { + let config_str = serde_yaml::to_string(&config).expect("config should've been validated on register"); + let output = format!("Config for {}:\n\n```yaml\n{}\n```", appservice_identifier, config_str,); + let output_html = format!( + "Config for {}:\n\n

{}
", + escape_html(&appservice_identifier), + escape_html(&config_str), + ); + Ok(RoomMessageEventContent::text_html(output, output_html)) + }, + None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), + }, + AppserviceCommand::List => { + let appservices = services().appservice.iter_ids().await; + let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", ")); + Ok(RoomMessageEventContent::text_plain(output)) + }, + } +} diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs deleted file mode 100644 index 02f41303..00000000 --- a/src/service/admin/console.rs +++ /dev/null @@ -1,259 +0,0 @@ -#![cfg(feature = "console")] - -use std::{ - collections::VecDeque, - sync::{Arc, Mutex}, -}; - -use conduwuit::{Server, debug, defer, error, log, log::is_systemd_mode}; -use futures::future::{AbortHandle, Abortable}; -use ruma::events::room::message::RoomMessageEventContent; -use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; -use termimad::MadSkin; -use tokio::task::JoinHandle; - -use crate::{Dep, admin}; - -pub struct Console { - server: Arc, - admin: Dep, - worker_join: Mutex>>, - input_abort: Mutex>, - command_abort: Mutex>, - history: Mutex>, - output: MadSkin, -} - -const PROMPT: &str = "uwu> "; -const HISTORY_LIMIT: usize = 48; - -impl Console { - pub(super) fn new(args: &crate::Args<'_>) -> Arc { - Arc::new(Self { - server: args.server.clone(), - admin: args.depend::("admin"), - worker_join: None.into(), - input_abort: None.into(), - command_abort: None.into(), - history: VecDeque::with_capacity(HISTORY_LIMIT).into(), - output: configure_output(MadSkin::default_dark()), - }) - } - - pub(super) async fn handle_signal(self: &Arc, sig: &'static str) { - if !self.server.running() { - self.interrupt(); - } else if sig == "SIGINT" { - self.interrupt_command(); - self.start().await; - } - } - - pub async fn start(self: &Arc) { - let mut worker_join = self.worker_join.lock().expect("locked"); - if worker_join.is_none() { - let self_ = Arc::clone(self); - _ = worker_join.insert(self.server.runtime().spawn(self_.worker())); - } - } - - pub async fn close(self: &Arc) { - self.interrupt(); - - let Some(worker_join) = self.worker_join.lock().expect("locked").take() else { - return; - }; - - _ = worker_join.await; - } - - pub fn interrupt(self: &Arc) { - self.interrupt_command(); - self.interrupt_readline(); - self.worker_join - .lock() - .expect("locked") - .as_ref() - .map(JoinHandle::abort); - } - - pub fn interrupt_readline(self: &Arc) { - if let Some(input_abort) = self.input_abort.lock().expect("locked").take() { - debug!("Interrupting console readline..."); - input_abort.abort(); - } - } - - pub fn interrupt_command(self: &Arc) { - if let Some(command_abort) = self.command_abort.lock().expect("locked").take() { - debug!("Interrupting console command..."); - command_abort.abort(); - } - } - - #[tracing::instrument(skip_all, name = "console", level = "trace")] - async fn worker(self: Arc) { - debug!("session starting"); - - self.output - .print_inline(&format!("**conduwuit {}** admin console\n", conduwuit::version())); - self.output - .print_text("\"help\" for help, ^D to exit the console, ^\\ to stop the server\n"); - - while self.server.running() { - match self.readline().await { - | Ok(event) => match event { - | ReadlineEvent::Line(string) => self.clone().handle(string).await, - | ReadlineEvent::Interrupted => continue, - | ReadlineEvent::Eof => break, - | ReadlineEvent::Quit => - self.server.shutdown().unwrap_or_else(error::default_log), - }, - | Err(error) => match error { - | ReadlineError::Closed => break, - | ReadlineError::IO(error) => { - error!("console I/O: {error:?}"); - break; - }, - }, - } - } - - debug!("session ending"); - self.worker_join.lock().expect("locked").take(); - } - - async fn readline(self: &Arc) -> Result { - let _suppression = (!is_systemd_mode()).then(|| log::Suppress::new(&self.server)); - - let (mut readline, _writer) = Readline::new(PROMPT.to_owned())?; - let self_ = Arc::clone(self); - readline.set_tab_completer(move |line| self_.tab_complete(line)); - self.set_history(&mut readline); - - let future = readline.readline(); - - let (abort, abort_reg) = AbortHandle::new_pair(); - let future = Abortable::new(future, abort_reg); - _ = self.input_abort.lock().expect("locked").insert(abort); - defer! {{ - _ = self.input_abort.lock().expect("locked").take(); - }} - - let Ok(result) = future.await else { - return Ok(ReadlineEvent::Eof); - }; - - readline.flush()?; - result - } - - async fn handle(self: Arc, line: String) { - if line.trim().is_empty() { - return; - } - - self.add_history(line.clone()); - let future = self.clone().process(line); - - let (abort, abort_reg) = AbortHandle::new_pair(); - let future = Abortable::new(future, abort_reg); - _ = self.command_abort.lock().expect("locked").insert(abort); - defer! {{ - _ = self.command_abort.lock().expect("locked").take(); - }} - - _ = future.await; - } - - async fn process(self: Arc, line: String) { - match self.admin.command_in_place(line, None).await { - | Ok(Some(ref content)) => self.output(content), - | Err(ref content) => self.output_err(content), - | _ => unreachable!(), - } - } - - fn output_err(self: Arc, output_content: &RoomMessageEventContent) { - let output = configure_output_err(self.output.clone()); - output.print_text(output_content.body()); - } - - fn output(self: Arc, output_content: &RoomMessageEventContent) { - self.output.print_text(output_content.body()); - } - - fn set_history(&self, readline: &mut Readline) { - self.history - .lock() - .expect("locked") - .iter() - .rev() - .for_each(|entry| { - readline - .add_history_entry(entry.clone()) - .expect("added history entry"); - }); - } - - fn add_history(&self, line: String) { - let mut history = self.history.lock().expect("locked"); - history.push_front(line); - history.truncate(HISTORY_LIMIT); - } - - fn tab_complete(&self, line: &str) -> String { - self.admin - .complete_command(line) - .unwrap_or_else(|| line.to_owned()) - } -} - -/// Standalone/static markdown printer for errors. -pub fn print_err(markdown: &str) { - let output = configure_output_err(MadSkin::default_dark()); - output.print_text(markdown); -} -/// Standalone/static markdown printer. -pub fn print(markdown: &str) { - let output = configure_output(MadSkin::default_dark()); - output.print_text(markdown); -} - -fn configure_output_err(mut output: MadSkin) -> MadSkin { - use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; - - let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(196), Color::AnsiValue(234)); - output.inline_code = code_style.clone(); - output.code_block = LineStyle { - left_margin: 0, - right_margin: 0, - align: Alignment::Left, - compound_style: code_style, - }; - - output -} - -fn configure_output(mut output: MadSkin) -> MadSkin { - use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; - - let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(40), Color::AnsiValue(234)); - output.inline_code = code_style.clone(); - output.code_block = LineStyle { - left_margin: 0, - right_margin: 0, - align: Alignment::Left, - compound_style: code_style, - }; - - let table_style = CompoundStyle::default(); - output.table = LineStyle { - left_margin: 1, - right_margin: 1, - align: Alignment::Left, - compound_style: table_style, - }; - - output -} diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs deleted file mode 100644 index cd0fc5a9..00000000 --- a/src/service/admin/create.rs +++ /dev/null @@ -1,214 +0,0 @@ -use std::collections::BTreeMap; - -use conduwuit::{Result, pdu::PduBuilder}; -use futures::FutureExt; -use ruma::{ - RoomId, RoomVersionId, - events::room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - preview_url::RoomPreviewUrlsEventContent, - topic::RoomTopicEventContent, - }, -}; - -use crate::Services; - -/// Create the admin room. -/// -/// Users in this room are considered admins by conduwuit, and the room can be -/// used to issue admin commands by talking to the server user inside it. -pub async fn create_admin_room(services: &Services) -> Result { - let room_id = RoomId::new(services.globals.server_name()); - let room_version = &services.config.default_room_version; - - let _short_id = services - .rooms - .short - .get_or_create_shortroomid(&room_id) - .await; - - let state_lock = services.rooms.state.mutex.lock(&room_id).await; - - // Create a user for the server - let server_user = services.globals.server_user.as_ref(); - services.users.create(server_user, None)?; - - let create_content = { - use RoomVersionId::*; - match room_version { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => - RoomCreateEventContent::new_v1(server_user.into()), - | _ => RoomCreateEventContent::new_v11(), - } - }; - - // 1. The room create event - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomCreateEventContent { - federate: true, - predecessor: None, - room_version: room_version.clone(), - ..create_content - }), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 2. Make server user/bot join - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::from(server_user), - &RoomMemberEventContent::new(MembershipState::Join), - ), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 3. Power levels - let users = BTreeMap::from_iter([(server_user.into(), 69420.into())]); - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { - users, - ..Default::default() - }), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 4.1 Join Rules - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomJoinRulesEventContent::new(JoinRule::Invite)), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 4.2 History Visibility - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared), - ), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 4.3 Guest Access - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::new(), - &RoomGuestAccessEventContent::new(GuestAccess::Forbidden), - ), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 5. Events implied by name and topic - let room_name = format!("{} Admin Room", services.config.server_name); - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomNameEventContent::new(room_name)), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomTopicEventContent { - topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name), - }), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - // 6. Room alias - let alias = &services.globals.admin_alias; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - services - .rooms - .alias - .set_alias(alias, &room_id, server_user)?; - - // 7. (ad-hoc) Disable room URL previews for everyone by default - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPreviewUrlsEventContent { disabled: true }), - server_user, - &room_id, - &state_lock, - ) - .boxed() - .await?; - - Ok(()) -} diff --git a/src/service/admin/debug.rs b/src/service/admin/debug.rs new file mode 100644 index 00000000..c27f5900 --- /dev/null +++ b/src/service/admin/debug.rs @@ -0,0 +1,432 @@ +use std::{collections::BTreeMap, sync::Arc, time::Instant}; + +use clap::Subcommand; +use ruma::{ + api::client::error::ErrorKind, events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, + RoomId, RoomVersionId, ServerName, +}; +use tokio::sync::RwLock; +use tracing::{debug, error, info, warn}; +use tracing_subscriber::EnvFilter; + +use crate::{api::server_server::parse_incoming_pdu, services, utils::HtmlEscape, Error, PduEvent, Result}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum DebugCommand { + /// - Get the auth_chain of a PDU + GetAuthChain { + /// An event ID (the $ character followed by the base64 reference hash) + event_id: Box, + }, + + /// - Parse and print a PDU from a JSON + /// + /// The PDU event is only checked for validity and is not added to the + /// database. + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + ParsePdu, + + /// - Retrieve and print a PDU by ID from the conduwuit database + GetPdu { + /// An event ID (a $ followed by the base64 reference hash) + event_id: Box, + }, + + /// - Attempts to retrieve a PDU from a remote server. Inserts it into our + /// database/timeline if found and we do not have this PDU already + /// (following normal event auth rules, handles it as an incoming PDU). + GetRemotePdu { + /// An event ID (a $ followed by the base64 reference hash) + event_id: Box, + + /// Argument for us to attempt to fetch the event from the + /// specified remote server. + server: Box, + }, + + /// - Gets all the room state events for the specified room. + /// + /// This is functionally equivalent to `GET + /// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does + /// *not* check if the sender user is allowed to see state events. This is + /// done because it's implied that server admins here have database access + /// and can see/get room info themselves anyways if they were malicious + /// admins. + /// + /// Of course the check is still done on the actual client API. + GetRoomState { + /// Room ID + room_id: Box, + }, + + /// - Sends a federation request to the remote server's + /// `/_matrix/federation/v1/version` endpoint and measures the latency it + /// took for the server to respond + Ping { + server: Box, + }, + + /// - Forces device lists for all local and remote users to be updated (as + /// having new keys available) + ForceDeviceListUpdates, + + /// - Change tracing log level/filter on the fly + /// + /// This accepts the same format as the `log` config option. + ChangeLogLevel { + /// Log level/filter + filter: Option, + + /// Resets the log level/filter to the one in your config + #[arg(short, long)] + reset: bool, + }, +} + +pub(crate) async fn process(command: DebugCommand, body: Vec<&str>) -> Result { + Ok(match command { + DebugCommand::GetAuthChain { + event_id, + } => { + let event_id = Arc::::from(event_id); + if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let start = Instant::now(); + let count = services() + .rooms + .auth_chain + .event_ids_iter(room_id, vec![event_id]) + .await? + .count(); + let elapsed = start.elapsed(); + RoomMessageEventContent::text_plain(format!("Loaded auth chain with length {count} in {elapsed:?}")) + } else { + RoomMessageEventContent::text_plain("Event not found.") + } + }, + DebugCommand::ParsePdu => { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + Ok(hash) => { + let event_id = EventId::parse(format!("${hash}")); + + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => { + RoomMessageEventContent::text_plain(format!("EventId: {event_id:?}\n{pdu:#?}")) + }, + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {event_id:?}\nCould not parse event: {e}" + )), + } + }, + Err(e) => RoomMessageEventContent::text_plain(format!("Could not parse PDU JSON: {e:?}")), + }, + Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json in command body: {e}")), + } + } else { + RoomMessageEventContent::text_plain("Expected code block in command body.") + } + }, + DebugCommand::GetPdu { + event_id, + } => { + let mut outlier = false; + let mut pdu_json = services() + .rooms + .timeline + .get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + return Ok(RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + if outlier { + "Outlier PDU found in our database" + } else { + "PDU found in our database" + }, + json_text + ), + format!( + "

{}

\n
{}\n
\n", + if outlier { + "Outlier PDU found in our database" + } else { + "PDU found in our database" + }, + HtmlEscape(&json_text) + ), + )); + }, + None => { + return Ok(RoomMessageEventContent::text_plain("PDU not found locally.")); + }, + } + }, + DebugCommand::GetRemotePdu { + event_id, + server, + } => { + if !services().globals.config.allow_federation { + return Ok(RoomMessageEventContent::text_plain( + "Federation is disabled on this homeserver.", + )); + } + + if server == services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain( + "Not allowed to send federation requests to ourselves. Please use `get-pdu` for fetching local \ + PDUs.", + )); + } + + // TODO: use Futures as some requests may take a while so we dont block the + // admin room + match services() + .sending + .send_federation_request( + &server, + ruma::api::federation::event::get_event::v1::Request { + event_id: event_id.clone().into(), + }, + ) + .await + { + Ok(response) => { + let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { + warn!( + "Requested event ID {event_id} from server but failed to convert from RawValue to \ + CanonicalJsonObject (malformed event/response?): {e}" + ); + Error::BadRequest(ErrorKind::Unknown, "Received response from server but failed to parse PDU") + })?; + + debug!("Attempting to parse PDU: {:?}", &response.pdu); + let parsed_pdu = { + let parsed_result = parse_incoming_pdu(&response.pdu); + let (event_id, value, room_id) = match parsed_result { + Ok(t) => t, + Err(e) => { + warn!("Failed to parse PDU: {e}"); + info!("Full PDU: {:?}", &response.pdu); + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse PDU remote server {server} sent us: {e}" + ))); + }, + }; + + vec![(event_id, value, room_id)] + }; + + let pub_key_map = RwLock::new(BTreeMap::new()); + + debug!("Attempting to fetch homeserver signing keys for {server}"); + services() + .rooms + .event_handler + .fetch_required_signing_keys( + parsed_pdu.iter().map(|(_event_id, event, _room_id)| event), + &pub_key_map, + ) + .await + .unwrap_or_else(|e| { + warn!("Could not fetch all signatures for PDUs from {server}: {e:?}"); + }); + + info!("Attempting to handle event ID {event_id} as backfilled PDU"); + services() + .rooms + .timeline + .backfill_pdu(&server, response.pdu, &pub_key_map) + .await?; + + let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + + return Ok(RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + "Got PDU from specified server and handled as backfilled PDU successfully. Event body:", + json_text + ), + format!( + "

{}

\n
{}\n
\n", + "Got PDU from specified server and handled as backfilled PDU successfully. Event body:", + HtmlEscape(&json_text) + ), + )); + }, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Remote server did not have PDU or failed sending request to remote server: {e}" + ))); + }, + } + }, + DebugCommand::GetRoomState { + room_id, + } => { + let room_state = services() + .rooms + .state_accessor + .room_state_full(&room_id) + .await? + .values() + .map(|pdu| pdu.to_state_event()) + .collect::>(); + + if room_state.is_empty() { + return Ok(RoomMessageEventContent::text_plain( + "Unable to find room state in our database (vector is empty)", + )); + } + + let json_text = serde_json::to_string_pretty(&room_state).map_err(|e| { + error!("Failed converting room state vector in our database to pretty JSON: {e}"); + Error::bad_database( + "Failed to convert room state events to pretty JSON, possible invalid room state events in our \ + database", + ) + })?; + + return Ok(RoomMessageEventContent::text_html( + format!("{}\n```json\n{}\n```", "Found full room state", json_text), + format!( + "

{}

\n
{}\n
\n", + "Found full room state", + HtmlEscape(&json_text) + ), + )); + }, + DebugCommand::Ping { + server, + } => { + if server == services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain( + "Not allowed to send federation requests to ourselves.", + )); + } + + let timer = tokio::time::Instant::now(); + + match services() + .sending + .send_federation_request(&server, ruma::api::federation::discovery::get_server_version::v1::Request {}) + .await + { + Ok(response) => { + let ping_time = timer.elapsed(); + + let json_text_res = serde_json::to_string_pretty(&response.server); + + if let Ok(json) = json_text_res { + return Ok(RoomMessageEventContent::text_html( + format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```"), + format!( + "

Got response which took {ping_time:?} time:

\n
{}\n
\n", + HtmlEscape(&json) + ), + )); + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "Got non-JSON response which took {ping_time:?} time:\n{0:?}", + response + ))); + }, + Err(e) => { + error!("Failed sending federation request to specified server from ping debug command: {e}"); + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed sending federation request to specified server:\n\n{e}", + ))); + }, + } + }, + DebugCommand::ForceDeviceListUpdates => { + // Force E2EE device list updates for all users + for user_id in services().users.iter().filter_map(Result::ok) { + services().users.mark_device_key_update(&user_id)?; + } + RoomMessageEventContent::text_plain("Marked all devices for all users as having new keys to update") + }, + DebugCommand::ChangeLogLevel { + filter, + reset, + } => { + if reset { + let old_filter_layer = match EnvFilter::try_new(&services().globals.config.log) { + Ok(s) => s, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Log level from config appears to be invalid now: {e}" + ))); + }, + }; + + match services() + .globals + .tracing_reload_handle + .modify(|filter| *filter = old_filter_layer) + { + Ok(()) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Successfully changed log level back to config value {}", + services().globals.config.log + ))); + }, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to modify and reload the global tracing log level: {e}" + ))); + }, + } + } + + if let Some(filter) = filter { + let new_filter_layer = match EnvFilter::try_new(filter) { + Ok(s) => s, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Invalid log level filter specified: {e}" + ))); + }, + }; + + match services() + .globals + .tracing_reload_handle + .modify(|filter| *filter = new_filter_layer) + { + Ok(()) => { + return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); + }, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to modify and reload the global tracing log level: {e}" + ))); + }, + } + } + + return Ok(RoomMessageEventContent::text_plain("No log level was specified.")); + }, + }) +} diff --git a/src/service/admin/execute.rs b/src/service/admin/execute.rs deleted file mode 100644 index 174b28ed..00000000 --- a/src/service/admin/execute.rs +++ /dev/null @@ -1,127 +0,0 @@ -use conduwuit::{Err, Result, debug, debug_info, error, implement, info}; -use ruma::events::room::message::RoomMessageEventContent; -use tokio::time::{Duration, sleep}; - -pub(super) const SIGNAL: &str = "SIGUSR2"; - -/// Possibly spawn the terminal console at startup if configured. -#[implement(super::Service)] -pub(super) async fn console_auto_start(&self) { - #[cfg(feature = "console")] - if self.services.server.config.admin_console_automatic { - // Allow more of the startup sequence to execute before spawning - tokio::task::yield_now().await; - self.console.start().await; - } -} - -/// Shutdown the console when the admin worker terminates. -#[implement(super::Service)] -pub(super) async fn console_auto_stop(&self) { - #[cfg(feature = "console")] - self.console.close().await; -} - -/// Execute admin commands after startup -#[implement(super::Service)] -pub(super) async fn startup_execute(&self) -> Result { - // List of comamnds to execute - let commands = &self.services.server.config.admin_execute; - - // Determine if we're running in smoketest-mode which will change some behaviors - let smoketest = self.services.server.config.test.contains("smoke"); - - // When true, errors are ignored and startup continues. - let errors = !smoketest && self.services.server.config.admin_execute_errors_ignore; - - //TODO: remove this after run-states are broadcast - sleep(Duration::from_millis(500)).await; - - for (i, command) in commands.iter().enumerate() { - if let Err(e) = self.execute_command(i, command.clone()).await { - if !errors { - return Err(e); - } - } - - tokio::task::yield_now().await; - } - - // The smoketest functionality is placed here for now and simply initiates - // shutdown after all commands have executed. - if smoketest { - debug_info!("Smoketest mode. All commands complete. Shutting down now..."); - self.services - .server - .shutdown() - .inspect_err(error::inspect_log) - .expect("Error shutting down from smoketest"); - } - - Ok(()) -} - -/// Execute admin commands after signal -#[implement(super::Service)] -pub(super) async fn signal_execute(&self) -> Result { - // List of comamnds to execute - let commands = self.services.server.config.admin_signal_execute.clone(); - - // When true, errors are ignored and execution continues. - let ignore_errors = self.services.server.config.admin_execute_errors_ignore; - - for (i, command) in commands.iter().enumerate() { - if let Err(e) = self.execute_command(i, command.clone()).await { - if !ignore_errors { - return Err(e); - } - } - - tokio::task::yield_now().await; - } - - Ok(()) -} - -/// Execute one admin command after startup or signal -#[implement(super::Service)] -async fn execute_command(&self, i: usize, command: String) -> Result { - debug!("Execute command #{i}: executing {command:?}"); - - match self.command_in_place(command, None).await { - | Ok(Some(output)) => Self::execute_command_output(i, &output), - | Err(output) => Self::execute_command_error(i, &output), - | Ok(None) => { - info!("Execute command #{i} completed (no output)."); - Ok(()) - }, - } -} - -#[cfg(feature = "console")] -#[implement(super::Service)] -fn execute_command_output(i: usize, content: &RoomMessageEventContent) -> Result { - debug_info!("Execute command #{i} completed:"); - super::console::print(content.body()); - Ok(()) -} - -#[cfg(feature = "console")] -#[implement(super::Service)] -fn execute_command_error(i: usize, content: &RoomMessageEventContent) -> Result { - super::console::print_err(content.body()); - Err!(debug_error!("Execute command #{i} failed.")) -} - -#[cfg(not(feature = "console"))] -#[implement(super::Service)] -fn execute_command_output(i: usize, content: &RoomMessageEventContent) -> Result { - info!("Execute command #{i} completed:\n{:#}", content.body()); - Ok(()) -} - -#[cfg(not(feature = "console"))] -#[implement(super::Service)] -fn execute_command_error(i: usize, content: &RoomMessageEventContent) -> Result { - Err!(error!("Execute command #{i} failed:\n{:#}", content.body())) -} diff --git a/src/service/admin/federation.rs b/src/service/admin/federation.rs new file mode 100644 index 00000000..c7a61103 --- /dev/null +++ b/src/service/admin/federation.rs @@ -0,0 +1,172 @@ +use std::{collections::BTreeMap, fmt::Write as _}; + +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName}; +use tokio::sync::RwLock; + +use crate::{services, utils::HtmlEscape, Result}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum FederationCommand { + /// - List all rooms we are currently handling an incoming pdu from + IncomingFederation, + + /// - Disables incoming federation handling for a room. + DisableRoom { + room_id: Box, + }, + + /// - Enables incoming federation handling for a room again. + EnableRoom { + room_id: Box, + }, + + /// - Verify json signatures + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + SignJson, + + /// - Verify json signatures + /// + /// This command needs a JSON blob provided in a Markdown code block below + /// the command. + VerifyJson, + + /// - Fetch `/.well-known/matrix/support` from the specified server + /// + /// Despite the name, this is not a federation endpoint and does not go + /// through the federation / server resolution process as per-spec this is + /// supposed to be served at the server_name. + /// + /// Respecting homeservers put this file here for listing administration, + /// moderation, and security inquiries. This command provides a way to + /// easily fetch that information. + FetchSupportWellKnown { + server_name: Box, + }, +} + +pub(crate) async fn process(command: FederationCommand, body: Vec<&str>) -> Result { + match command { + FederationCommand::DisableRoom { + room_id, + } => { + services().rooms.metadata.disable_room(&room_id, true)?; + Ok(RoomMessageEventContent::text_plain("Room disabled.")) + }, + FederationCommand::EnableRoom { + room_id, + } => { + services().rooms.metadata.disable_room(&room_id, false)?; + Ok(RoomMessageEventContent::text_plain("Room enabled.")) + }, + FederationCommand::IncomingFederation => { + let map = services().globals.roomid_federationhandletime.read().await; + let mut msg = format!("Handling {} incoming pdus:\n", map.len()); + + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + let _ = writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60); + } + Ok(RoomMessageEventContent::text_plain(&msg)) + }, + FederationCommand::SignJson => { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(mut value) => { + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut value, + ) + .expect("our request json is what ruma expects"); + let json_text = serde_json::to_string_pretty(&value).expect("canonical json is valid json"); + Ok(RoomMessageEventContent::text_plain(json_text)) + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } + }, + FederationCommand::VerifyJson => { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + let pub_key_map = RwLock::new(BTreeMap::new()); + + services() + .rooms + .event_handler + .fetch_required_signing_keys([&value], &pub_key_map) + .await?; + + let pub_key_map = pub_key_map.read().await; + match ruma::signatures::verify_json(&pub_key_map, &value) { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Signature verification failed: {e}" + ))), + } + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } + }, + FederationCommand::FetchSupportWellKnown { + server_name, + } => { + let response = services() + .globals + .client + .default + .get(format!("https://{server_name}/.well-known/matrix/support")) + .send() + .await?; + + let text = response.text().await?; + + if text.is_empty() { + return Ok(RoomMessageEventContent::text_plain("Response text/body is empty.")); + } + + if text.len() > 1500 { + return Ok(RoomMessageEventContent::text_plain( + "Response text/body is over 1500 characters, assuming no support well-known.", + )); + } + + let json: serde_json::Value = match serde_json::from_str(&text) { + Ok(json) => json, + Err(_) => { + return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); + }, + }; + + let pretty_json: String = match serde_json::to_string_pretty(&json) { + Ok(json) => json, + Err(_) => { + return Ok(RoomMessageEventContent::text_plain("Response text/body is not valid JSON.")); + }, + }; + + Ok(RoomMessageEventContent::text_html( + format!("Got JSON response:\n\n```json\n{pretty_json}\n```"), + format!( + "

Got JSON response:

\n
{}\n
\n", + HtmlEscape(&pretty_json) + ), + )) + }, + } +} diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs deleted file mode 100644 index 2d90ea52..00000000 --- a/src/service/admin/grant.rs +++ /dev/null @@ -1,172 +0,0 @@ -use std::collections::BTreeMap; - -use conduwuit::{Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder}; -use ruma::{ - RoomId, UserId, - events::{ - RoomAccountDataEventType, StateEventType, - room::{ - member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, - power_levels::RoomPowerLevelsEventContent, - }, - tag::{TagEvent, TagEventContent, TagInfo}, - }, -}; - -/// Invite the user to the conduwuit admin room. -/// -/// This is equivalent to granting server admin privileges. -#[implement(super::Service)] -pub async fn make_user_admin(&self, user_id: &UserId) -> Result { - let Ok(room_id) = self.get_admin_room().await else { - debug_warn!( - "make_user_admin was called without an admin room being available or created" - ); - return Ok(()); - }; - - let state_lock = self.services.state.mutex.lock(&room_id).await; - - if self.services.state_cache.is_joined(user_id, &room_id).await { - return Err!(debug_warn!("User is already joined in the admin room")); - } - if self - .services - .state_cache - .is_invited(user_id, &room_id) - .await - { - return Err!(debug_warn!("User is already pending an invitation to the admin room")); - } - - // Use the server user to grant the new admin's power level - let server_user = self.services.globals.server_user.as_ref(); - - // if this is our local user, just forcefully join them in the room. otherwise, - // invite the remote user. - if self.services.globals.user_is_local(user_id) { - debug_info!("Inviting local user {user_id} to admin room {room_id}"); - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::from(user_id), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - server_user, - &room_id, - &state_lock, - ) - .await?; - - debug_info!("Force joining local user {user_id} to admin room {room_id}"); - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - String::from(user_id), - &RoomMemberEventContent::new(MembershipState::Join), - ), - user_id, - &room_id, - &state_lock, - ) - .await?; - } else { - debug_info!("Inviting remote user {user_id} to admin room {room_id}"); - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - server_user, - &room_id, - &state_lock, - ) - .await?; - } - - // Set power levels - let mut room_power_levels = self - .services - .state_accessor - .room_state_get_content::( - &room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await - .unwrap_or_default(); - - room_power_levels - .users - .insert(server_user.into(), 69420.into()); - room_power_levels.users.insert(user_id.into(), 100.into()); - - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state(String::new(), &room_power_levels), - server_user, - &room_id, - &state_lock, - ) - .await?; - - // Set room tag - let room_tag = self.services.server.config.admin_room_tag.as_str(); - if !room_tag.is_empty() { - if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag).await { - error!(?room_id, ?user_id, ?room_tag, "Failed to set tag for admin grant: {e}"); - } - } - - if self.services.server.config.admin_room_notices { - let welcome_message = String::from( - "## Thank you for trying out Continuwuity!\n\nContinuwuity is a hard fork of conduwuit, which is also a hard fork of Conduit, currently in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. Continuwuity is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> Source code: https://forgejo.ellis.link/continuwuation/continuwuity\n> Documentation: https://continuwuity.org/\n> Report issues: https://forgejo.ellis.link/continuwuation/continuwuity/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nContinuwuity space: `/join #space:continuwuity.org`\nContinuwuity main room (Ask questions and get notified on updates): `/join #continuwuity:continuwuity.org`\nContinuwuity offtopic room: `/join #offtopic:continuwuity.org`", - ); - - // Send welcome message - self.services - .timeline - .build_and_append_pdu( - PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)), - server_user, - &room_id, - &state_lock, - ) - .await?; - } - - Ok(()) -} - -#[implement(super::Service)] -async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result { - let mut event = self - .services - .account_data - .get_room(room_id, user_id, RoomAccountDataEventType::Tag) - .await - .unwrap_or_else(|_| TagEvent { - content: TagEventContent { tags: BTreeMap::new() }, - }); - - event - .content - .tags - .insert(tag.to_owned().into(), TagInfo::new()); - - self.services - .account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &serde_json::to_value(event)?, - ) - .await -} diff --git a/src/service/admin/media.rs b/src/service/admin/media.rs new file mode 100644 index 00000000..ee86401e --- /dev/null +++ b/src/service/admin/media.rs @@ -0,0 +1,216 @@ +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, EventId}; +use tracing::{debug, info}; + +use crate::{service::admin::MxcUri, services, Result}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum MediaCommand { + /// - Deletes a single media file from our database and on the filesystem + /// via a single MXC URL + Delete { + /// The MXC URL to delete + #[arg(long)] + mxc: Option>, + + /// - The message event ID which contains the media and thumbnail MXC + /// URLs + #[arg(long)] + event_id: Option>, + }, + + /// - Deletes a codeblock list of MXC URLs from our database and on the + /// filesystem + DeleteList, + + /// - Deletes all remote media in the last X amount of time using filesystem + /// metadata first created at date. + DeletePastRemoteMedia { + /// - The duration (at or after), e.g. "5m" to delete all media in the + /// past 5 minutes + duration: String, + }, +} + +pub(crate) async fn process(command: MediaCommand, body: Vec<&str>) -> Result { + match command { + MediaCommand::Delete { + mxc, + event_id, + } => { + if event_id.is_some() && mxc.is_some() { + return Ok(RoomMessageEventContent::text_plain( + "Please specify either an MXC or an event ID, not both.", + )); + } + + if let Some(mxc) = mxc { + if !mxc.to_string().starts_with("mxc://") { + return Ok(RoomMessageEventContent::text_plain("MXC provided is not valid.")); + } + + debug!("Got MXC URL: {}", mxc); + services().media.delete(mxc.to_string()).await?; + + return Ok(RoomMessageEventContent::text_plain( + "Deleted the MXC from our database and on our filesystem.", + )); + } else if let Some(event_id) = event_id { + debug!("Got event ID to delete media from: {}", event_id); + + let mut mxc_urls = vec![]; + let mut mxc_deletion_count = 0; + + // parsing the PDU for any MXC URLs begins here + if let Some(event_json) = services().rooms.timeline.get_pdu_json(&event_id)? { + if let Some(content_key) = event_json.get("content") { + debug!("Event ID has \"content\"."); + let content_obj = content_key.as_object(); + + if let Some(content) = content_obj { + // 1. attempts to parse the "url" key + debug!("Attempting to go into \"url\" key for main media file"); + if let Some(url) = content.get("url") { + debug!("Got a URL in the event ID {event_id}: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {} to list of MXCs to delete", url); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + info!( + "Found a URL in the event ID {event_id} but did not start with mxc://, \ + ignoring" + ); + } + } + + // 2. attempts to parse the "info" key + debug!("Attempting to go into \"info\" key for thumbnails"); + if let Some(info_key) = content.get("info") { + debug!("Event ID has \"info\"."); + let info_obj = info_key.as_object(); + + if let Some(info) = info_obj { + if let Some(thumbnail_url) = info.get("thumbnail_url") { + debug!("Found a thumbnail_url in info key: {thumbnail_url}"); + + if thumbnail_url.to_string().starts_with("\"mxc://") { + debug!("Pushing thumbnail URL {} to list of MXCs to delete", thumbnail_url); + let final_thumbnail_url = thumbnail_url.to_string().replace('"', ""); + mxc_urls.push(final_thumbnail_url); + } else { + info!( + "Found a thumbnail URL in the event ID {event_id} but did not start \ + with mxc://, ignoring" + ); + } + } else { + info!("No \"thumbnail_url\" key in \"info\" key, assuming no thumbnails."); + } + } + } + + // 3. attempts to parse the "file" key + debug!("Attempting to go into \"file\" key"); + if let Some(file_key) = content.get("file") { + debug!("Event ID has \"file\"."); + let file_obj = file_key.as_object(); + + if let Some(file) = file_obj { + if let Some(url) = file.get("url") { + debug!("Found url in file key: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {} to list of MXCs to delete", url); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + info!( + "Found a URL in the event ID {event_id} but did not start with \ + mxc://, ignoring" + ); + } + } else { + info!("No \"url\" key in \"file\" key."); + } + } + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not have a \"content\" key or failed parsing the event ID JSON.", + )); + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not have a \"content\" key, this is not a message or an event type that \ + contains media.", + )); + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not exist or is not known to us.", + )); + } + + if mxc_urls.is_empty() { + // we shouldn't get here (should have errored earlier) but just in case for + // whatever reason we do... + info!("Parsed event ID {event_id} but did not contain any MXC URLs."); + return Ok(RoomMessageEventContent::text_plain("Parsed event ID but found no MXC URLs.")); + } + + for mxc_url in mxc_urls { + services().media.delete(mxc_url).await?; + mxc_deletion_count += 1; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from event ID \ + {event_id}." + ))); + } + + Ok(RoomMessageEventContent::text_plain( + "Please specify either an MXC using --mxc or an event ID using --event-id of the message containing \ + an image. See --help for details.", + )) + }, + MediaCommand::DeleteList => { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let mxc_list = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut mxc_deletion_count = 0; + + for mxc in mxc_list { + debug!("Deleting MXC {} in bulk", mxc); + services().media.delete(mxc.to_owned()).await?; + mxc_deletion_count += 1; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "Finished bulk MXC deletion, deleted {} total MXCs from our database and the filesystem.", + mxc_deletion_count + ))); + } + + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + }, + MediaCommand::DeletePastRemoteMedia { + duration, + } => { + let deleted_count = services() + .media + .delete_all_remote_media_at_after_time(duration) + .await?; + + Ok(RoomMessageEventContent::text_plain(format!( + "Deleted {} total files.", + deleted_count + ))) + }, + } +} diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b3466711..c16c5e95 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -1,389 +1,787 @@ -pub mod console; -mod create; -mod execute; -mod grant; +use std::{collections::BTreeMap, sync::Arc}; -use std::{ - future::Future, - pin::Pin, - sync::{Arc, RwLock as StdRwLock, Weak}, -}; - -use async_trait::async_trait; -use conduwuit::{ - Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, -}; -pub use create::create_admin_room; -use futures::{FutureExt, TryFutureExt}; -use loole::{Receiver, Sender}; +use clap::Parser; +use regex::Regex; use ruma::{ - OwnedEventId, OwnedRoomId, RoomId, UserId, - events::room::message::{Relation, RoomMessageEventContent}, + api::client::error::ErrorKind, + events::{ + relation::InReplyTo, + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + message::{Relation::Reply, RoomMessageEventContent}, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + topic::RoomTopicEventContent, + }, + TimelineEventType, + }, + EventId, MxcUri, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; -use tokio::sync::RwLock; +use serde_json::value::to_raw_value; +use tokio::sync::Mutex; +use tracing::{error, warn}; -use crate::{Dep, account_data, globals, rooms, rooms::state::RoomMutexGuard}; +use super::pdu::PduBuilder; +use crate::{ + service::admin::{ + appservice::AppserviceCommand, debug::DebugCommand, federation::FederationCommand, media::MediaCommand, + room::RoomCommand, server::ServerCommand, user::UserCommand, + }, + services, Error, Result, +}; + +pub(crate) mod appservice; +pub(crate) mod debug; +pub(crate) mod federation; +pub(crate) mod media; +pub(crate) mod room; +pub(crate) mod room_alias; +pub(crate) mod room_directory; +pub(crate) mod room_moderation; +pub(crate) mod server; +pub(crate) mod user; + +const PAGE_SIZE: usize = 100; + +#[cfg_attr(test, derive(Debug))] +#[derive(Parser)] +#[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] +enum AdminCommand { + #[command(subcommand)] + /// - Commands for managing appservices + Appservices(AppserviceCommand), + + #[command(subcommand)] + /// - Commands for managing local users + Users(UserCommand), + + #[command(subcommand)] + /// - Commands for managing rooms + Rooms(RoomCommand), + + #[command(subcommand)] + /// - Commands for managing federation + Federation(FederationCommand), + + #[command(subcommand)] + /// - Commands for managing the server + Server(ServerCommand), + + #[command(subcommand)] + /// - Commands for managing media + Media(MediaCommand), + + #[command(subcommand)] + // TODO: should i split out debug commands to a separate thing? the + // debug commands seem like they could fit in the other categories fine + // this is more like a "miscellaneous" category than a debug one + /// - Commands for debugging things + Debug(DebugCommand), +} + +#[derive(Debug)] +pub enum AdminRoomEvent { + ProcessMessage(String, Arc), + SendMessage(RoomMessageEventContent), +} pub struct Service { - services: Services, - channel: (Sender, Receiver), - pub handle: RwLock>, - pub complete: StdRwLock>, - #[cfg(feature = "console")] - pub console: Arc, + pub sender: loole::Sender, + receiver: Mutex>, } -struct Services { - server: Arc, - globals: Dep, - alias: Dep, - timeline: Dep, - state: Dep, - state_cache: Dep, - state_accessor: Dep, - account_data: Dep, - services: StdRwLock>>, -} - -/// Inputs to a command are a multi-line string and optional reply_id. -#[derive(Debug)] -pub struct CommandInput { - pub command: String, - pub reply_id: Option, -} - -/// Prototype of the tab-completer. The input is buffered text when tab -/// asserted; the output will fully replace the input buffer. -pub type Completer = fn(&str) -> String; - -/// Prototype of the command processor. This is a callback supplied by the -/// reloadable admin module. -pub type Processor = fn(Arc, CommandInput) -> ProcessorFuture; - -/// Return type of the processor -pub type ProcessorFuture = Pin + Send>>; - -/// Result wrapping of a command's handling. Both variants are complete message -/// events which have digested any prior errors. The wrapping preserves whether -/// the command failed without interpreting the text. Ok(None) outputs are -/// dropped to produce no response. -pub type ProcessorResult = Result, CommandOutput>; - -/// Alias for the output structure. -pub type CommandOutput = RoomMessageEventContent; - -/// Maximum number of commands which can be queued for dispatch. -const COMMAND_QUEUE_LIMIT: usize = 512; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - server: args.server.clone(), - globals: args.depend::("globals"), - alias: args.depend::("rooms::alias"), - timeline: args.depend::("rooms::timeline"), - state: args.depend::("rooms::state"), - state_cache: args.depend::("rooms::state_cache"), - state_accessor: args - .depend::("rooms::state_accessor"), - account_data: args.depend::("account_data"), - services: None.into(), - }, - channel: loole::bounded(COMMAND_QUEUE_LIMIT), - handle: RwLock::new(None), - complete: StdRwLock::new(None), - #[cfg(feature = "console")] - console: console::Console::new(&args), - })) +impl Service { + pub fn build() -> Arc { + let (sender, receiver) = loole::unbounded(); + Arc::new(Self { + sender, + receiver: Mutex::new(receiver), + }) } - async fn worker(self: Arc) -> Result<()> { - let mut signals = self.services.server.signal.subscribe(); - let receiver = self.channel.1.clone(); + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(self); + tokio::spawn(async move { + self2 + .handler() + .await + .expect("Failed to initialize admin room handler"); + }); + } - self.startup_execute().await?; - self.console_auto_start().await; + async fn handler(&self) -> Result<()> { + let receiver = self.receiver.lock().await; + // TODO: Use futures when we have long admin commands + //let mut futures = FuturesUnordered::new(); - loop { - tokio::select! { - command = receiver.recv_async() => match command { - Ok(command) => self.handle_command(command).await, - Err(_) => break, - }, - sig = signals.recv() => match sig { - Ok(sig) => self.handle_signal(sig).await, - Err(_) => continue, - }, + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); + + if let Ok(Some(conduit_room)) = Self::get_admin_room() { + loop { + tokio::select! { + event = receiver.recv_async() => { + match event { + Ok(event) => { + let (mut message_content, reply) = match event { + AdminRoomEvent::SendMessage(content) => (content, None), + AdminRoomEvent::ProcessMessage(room_message, reply_id) => { + (self.process_admin_message(room_message).await, Some(reply_id)) + } + }; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .await + .entry(conduit_room.clone()) + .or_default(), + ); + + let state_lock = mutex_state.lock().await; + + if let Some(reply) = reply { + message_content.relates_to = Some(Reply { in_reply_to: InReplyTo { event_id: reply.into() } }); + } + + if let Err(e) = services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMessage, + content: to_raw_value(&message_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &state_lock) + .await { + error!("Failed to build and append admin room response PDU: \"{e}\""); + + let error_room_message = RoomMessageEventContent::text_plain(format!("Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command may have finished successfully, but we could not return the output.")); + + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMessage, + content: to_raw_value(&error_room_message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &state_lock) + .await?; + } + drop(state_lock); + } + Err(e) => { + // generally shouldn't happen + error!("Failed to receive admin room event from channel: {e}"); + } + } + } + } } } - self.console_auto_stop().await; //TODO: not unwind safe - Ok(()) } - fn interrupt(&self) { - #[cfg(feature = "console")] - self.console.interrupt(); - - let (sender, _) = &self.channel; - if !sender.is_closed() { - sender.close(); - } + pub fn process_message(&self, room_message: String, event_id: Arc) { + self.sender + .send(AdminRoomEvent::ProcessMessage(room_message, event_id)) + .unwrap(); } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - /// Sends markdown message (not an m.notice for notification reasons) to the - /// admin room as the admin user. - pub async fn send_text(&self, body: &str) { - self.send_message(RoomMessageEventContent::text_markdown(body)) - .await - .ok(); + pub fn send_message(&self, message_content: RoomMessageEventContent) { + self.sender + .send(AdminRoomEvent::SendMessage(message_content)) + .unwrap(); } - /// Sends a message to the admin room as the admin user (see send_text() for - /// convenience). - pub async fn send_message(&self, message_content: RoomMessageEventContent) -> Result<()> { - let user_id = &self.services.globals.server_user; - let room_id = self.get_admin_room().await?; - self.respond_to_room(message_content, &room_id, user_id) - .boxed() - .await - } + // Parse and process a message from the admin room + async fn process_admin_message(&self, room_message: String) -> RoomMessageEventContent { + let mut lines = room_message.lines().filter(|l| !l.trim().is_empty()); + let command_line = lines.next().expect("each string has at least one line"); + let body = lines.collect::>(); - /// Posts a command to the command processor queue and returns. Processing - /// will take place on the service worker's task asynchronously. Errors if - /// the queue is full. - pub fn command(&self, command: String, reply_id: Option) -> Result<()> { - self.channel - .0 - .send(CommandInput { command, reply_id }) - .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) - } + let admin_command = match self.parse_admin_command(command_line) { + Ok(command) => command, + Err(error) => { + let server_name = services().globals.server_name(); + let message = error.replace("server.name", server_name.as_str()); + let html_message = self.usage_to_html(&message, server_name); - /// Dispatches a comamnd to the processor on the current task and waits for - /// completion. - pub async fn command_in_place( - &self, - command: String, - reply_id: Option, - ) -> ProcessorResult { - self.process_command(CommandInput { command, reply_id }) - .await - } - - /// Invokes the tab-completer to complete the command. When unavailable, - /// None is returned. - pub fn complete_command(&self, command: &str) -> Option { - self.complete - .read() - .expect("locked for reading") - .map(|complete| complete(command)) - } - - async fn handle_signal(&self, sig: &'static str) { - if sig == execute::SIGNAL { - self.signal_execute().await.ok(); - } - - #[cfg(feature = "console")] - self.console.handle_signal(sig).await; - } - - async fn handle_command(&self, command: CommandInput) { - match self.process_command(command).await { - | Ok(None) => debug!("Command successful with no response"), - | Ok(Some(output)) | Err(output) => self - .handle_response(output) - .await - .unwrap_or_else(default_log), - } - } - - async fn process_command(&self, command: CommandInput) -> ProcessorResult { - let handle = &self - .handle - .read() - .await - .expect("Admin module is not loaded"); - - let services = self - .services - .services - .read() - .expect("locked") - .as_ref() - .and_then(Weak::upgrade) - .expect("Services self-reference not initialized."); - - handle(services, command).await - } - - /// Checks whether a given user is an admin of this server - pub async fn user_is_admin(&self, user_id: &UserId) -> bool { - let Ok(admin_room) = self.get_admin_room().await else { - return false; + return RoomMessageEventContent::text_html(message, html_message); + }, }; - self.services - .state_cache - .is_joined(user_id, &admin_room) - .await + match self.process_admin_command(admin_command, body).await { + Ok(reply_message) => reply_message, + Err(error) => { + let markdown_message = format!("Encountered an error while handling the command:\n```\n{error}\n```",); + let html_message = format!("Encountered an error while handling the command:\n
\n{error}\n
",); + + RoomMessageEventContent::text_html(markdown_message, html_message) + }, + } + } + + // Parse chat messages from the admin room into an AdminCommand object + fn parse_admin_command(&self, command_line: &str) -> Result { + // Note: argv[0] is `@conduit:servername:`, which is treated as the main command + let mut argv = command_line.split_whitespace().collect::>(); + + // Replace `help command` with `command --help` + // Clap has a help subcommand, but it omits the long help description. + if argv.len() > 1 && argv[1] == "help" { + argv.remove(1); + argv.push("--help"); + } + + // Backwards compatibility with `register_appservice`-style commands + let command_with_dashes; + if argv.len() > 1 && argv[1].contains('_') { + command_with_dashes = argv[1].replace('_', "-"); + argv[1] = &command_with_dashes; + } + + AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) + } + + async fn process_admin_command(&self, command: AdminCommand, body: Vec<&str>) -> Result { + let reply_message_content = match command { + AdminCommand::Appservices(command) => appservice::process(command, body).await?, + AdminCommand::Media(command) => media::process(command, body).await?, + AdminCommand::Users(command) => user::process(command, body).await?, + AdminCommand::Rooms(command) => room::process(command, body).await?, + AdminCommand::Federation(command) => federation::process(command, body).await?, + AdminCommand::Server(command) => server::process(command, body).await?, + AdminCommand::Debug(command) => debug::process(command, body).await?, + }; + + Ok(reply_message_content) + } + + // Utility to turn clap's `--help` text to HTML. + fn usage_to_html(&self, text: &str, server_name: &ServerName) -> String { + // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: + // subcmdname` + let text = text.replace(&format!("@conduit:{server_name}:-"), &format!("@conduit:{server_name}: ")); + + // For the conduit admin room, subcommands become main commands + let text = text.replace("SUBCOMMAND", "COMMAND"); + let text = text.replace("subcommand", "command"); + + // Escape option names (e.g. ``) since they look like HTML tags + let text = escape_html(&text); + + // Italicize the first line (command name and version text) + let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1\n"); + + // Unmerge wrapped lines + let text = text.replace("\n ", " "); + + // Wrap option names in backticks. The lines look like: + // -V, --version Prints version information + // And are converted to: + // -V, --version: Prints version information + // (?m) enables multi-line mode for ^ and $ + let re = Regex::new("(?m)^ {4}(([a-zA-Z_&;-]+(, )?)+) +(.*)$").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1: $4"); + + // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // start with a `#` in the USAGE section. + let mut text_lines = text.lines().collect::>(); + let mut command_body = String::new(); + + if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { + text_lines.remove(line_index); + + while text_lines + .get(line_index) + .is_some_and(|line| line.starts_with('#')) + { + command_body += if text_lines[line_index].starts_with("# ") { + &text_lines[line_index][2..] + } else { + &text_lines[line_index][1..] + }; + command_body += "[nobr]\n"; + text_lines.remove(line_index); + } + } + + let text = text_lines.join("\n"); + + // Improve the usage section + let text = if command_body.is_empty() { + // Wrap the usage line in code tags + let re = Regex::new("(?m)^USAGE:\n {4}(@conduit:.*)$").expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n$1").to_string() + } else { + // Wrap the usage line in a code block, and add a yaml block example + // This makes the usage of e.g. `register-appservice` more accurate + let re = Regex::new("(?m)^USAGE:\n {4}(.*?)\n\n").expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n
$1[nobr]\n[commandbodyblock]
") + .replace("[commandbodyblock]", &command_body) + }; + + // Add HTML line-breaks + + text.replace("\n\n\n", "\n\n") + .replace('\n', "
\n") + .replace("[nobr]
", "") + } + + /// Create the admin room. + /// + /// Users in this room are considered admins by conduit, and the room can be + /// used to issue admin commands by talking to the server user inside it. + pub(crate) async fn create_admin_room(&self) -> Result<()> { + let room_id = RoomId::new(services().globals.server_name()); + + services().rooms.short.get_or_create_shortroomid(&room_id)?; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Create a user for the server + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + services().users.create(&conduit_user, None)?; + + let room_version = services().globals.default_room_version(); + let mut content = match room_version { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()), + RoomVersionId::V11 => RoomCreateEventContent::new_v11(), + _ => { + warn!("Unexpected or unsupported room version {}", room_version); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + }; + + content.federate = true; + content.predecessor = None; + content.room_version = room_version; + + // 1. The room create event + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // 2. Make conduit bot join + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // 4.1 Join Rules + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // 4.2 History Visibility + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // 4.3 Guest Access + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // 5. Events implied by name and topic + let room_name = format!("{} Admin Room", services().globals.server_name()); + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(room_name)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", services().globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // 6. Room alias + let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + services().rooms.alias.set_alias(&alias, &room_id)?; + + Ok(()) } /// Gets the room ID of the admin room /// /// Errors are propagated from the database, and will have None if there is /// no admin room - pub async fn get_admin_room(&self) -> Result { - let room_id = self - .services + pub(crate) fn get_admin_room() -> Result> { + let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + services() + .rooms .alias - .resolve_local_alias(&self.services.globals.admin_alias) - .await?; - - self.services - .state_cache - .is_joined(&self.services.globals.server_user, &room_id) - .await - .then_some(room_id) - .ok_or_else(|| err!(Request(NotFound("Admin user not joined to admin room")))) + .resolve_local_alias(&admin_room_alias) } - async fn handle_response(&self, content: RoomMessageEventContent) -> Result<()> { - let Some(Relation::Reply { in_reply_to }) = content.relates_to.as_ref() else { - return Ok(()); - }; - - let Ok(pdu) = self.services.timeline.get_pdu(&in_reply_to.event_id).await else { - error!( - event_id = ?in_reply_to.event_id, - "Missing admin command in_reply_to event" + /// Invite the user to the conduit admin room. + /// + /// In conduit, this is equivalent to granting admin privileges. + pub(crate) async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Result<()> { + if let Some(room_id) = Self::get_admin_room()? { + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.clone()) + .or_default(), ); - return Ok(()); - }; + let state_lock = mutex_state.lock().await; - let response_sender = if self.is_admin_room(&pdu.room_id).await { - &self.services.globals.server_user + // Use the server user to grant the new admin's power level + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + // Invite and join the real user + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + &room_id, + &state_lock, + ) + .await?; + + // Set power level + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); + users.insert(user_id.to_owned(), 100.into()); + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(String::new()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + + // Send welcome message + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMessage, + content: to_raw_value(&RoomMessageEventContent::text_html( + format!("## Thank you for trying out conduwuit!\n\nconduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Git and Documentation: https://github.com/girlbossceo/conduwuit\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nconduwuit room (Ask questions and get notified on updates):\n`/join #conduwuit:puppygock.gay`", services().globals.server_name()), + format!("

Thank you for trying out conduwuit!

\n

conduwuit is a fork of upstream Conduit which is in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

\n

Helpful links:

\n
\n

Git and Documentation: https://github.com/girlbossceo/conduwuit
Report issues: https://github.com/girlbossceo/conduwuit/issues

\n
\n

For a list of available commands, send the following message in this room: @conduit:{}: --help

\n

Here are some rooms you can join (by typing the command):

\n

conduwuit room (Ask questions and get notified on updates):
/join #conduwuit:puppygock.gay

\n", services().globals.server_name()), + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ).await?; + + Ok(()) } else { - &pdu.sender - }; - - self.respond_to_room(content, &pdu.room_id, response_sender) - .boxed() - .await - } - - async fn respond_to_room( - &self, - content: RoomMessageEventContent, - room_id: &RoomId, - user_id: &UserId, - ) -> Result<()> { - assert!(self.user_is_admin(user_id).await, "sender is not admin"); - - let state_lock = self.services.state.mutex.lock(room_id).await; - - if let Err(e) = self - .services - .timeline - .build_and_append_pdu(PduBuilder::timeline(&content), user_id, room_id, &state_lock) - .await - { - self.handle_response_error(e, room_id, user_id, &state_lock) - .await - .unwrap_or_else(default_log); + Ok(()) } - - Ok(()) - } - - async fn handle_response_error( - &self, - e: Error, - room_id: &RoomId, - user_id: &UserId, - state_lock: &RoomMutexGuard, - ) -> Result<()> { - error!("Failed to build and append admin room response PDU: \"{e}\""); - let content = RoomMessageEventContent::text_plain(format!( - "Failed to build and append admin room PDU: \"{e}\"\n\nThe original admin command \ - may have finished successfully, but we could not return the output." - )); - - self.services - .timeline - .build_and_append_pdu(PduBuilder::timeline(&content), user_id, room_id, state_lock) - .await?; - - Ok(()) - } - - pub async fn is_admin_command(&self, pdu: &PduEvent, body: &str) -> bool { - // Server-side command-escape with public echo - let is_escape = body.starts_with('\\'); - let is_public_escape = is_escape && body.trim_start_matches('\\').starts_with("!admin"); - - // Admin command with public echo (in admin room) - let server_user = &self.services.globals.server_user; - let is_public_prefix = - body.starts_with("!admin") || body.starts_with(server_user.as_str()); - - // Expected backward branch - if !is_public_escape && !is_public_prefix { - return false; - } - - // only allow public escaped commands by local admins - if is_public_escape && !self.services.globals.user_is_local(&pdu.sender) { - return false; - } - - // Check if server-side command-escape is disabled by configuration - if is_public_escape && !self.services.server.config.admin_escape_commands { - return false; - } - - // Prevent unescaped !admin from being used outside of the admin room - if is_public_prefix && !self.is_admin_room(&pdu.room_id).await { - return false; - } - - // Only senders who are admin can proceed - if !self.user_is_admin(&pdu.sender).await { - return false; - } - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as the server user - let emergency_password_set = self.services.server.config.emergency_password.is_some(); - let from_server = pdu.sender == *server_user && !emergency_password_set; - if from_server && self.is_admin_room(&pdu.room_id).await { - return false; - } - - // Authentic admin command - true - } - - #[must_use] - pub async fn is_admin_room(&self, room_id_: &RoomId) -> bool { - self.get_admin_room() - .map_ok(|room_id| room_id == room_id_) - .await - .unwrap_or(false) - } - - /// Sets the self-reference to crate::Services which will provide context to - /// the admin commands. - pub(super) fn set_services(&self, services: Option<&Arc>) { - let receiver = &mut *self.services.services.write().expect("locked for writing"); - let weak = services.map(Arc::downgrade); - *receiver = weak; + } +} + +fn escape_html(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") +} + +fn get_room_info(id: &OwnedRoomId) -> (OwnedRoomId, u64, String) { + ( + id.clone(), + services() + .rooms + .state_cache + .room_joined_count(id) + .ok() + .flatten() + .unwrap_or(0), + services() + .rooms + .state_accessor + .get_name(id) + .ok() + .flatten() + .unwrap_or_else(|| id.to_string()), + ) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn get_help_short() { get_help_inner("-h"); } + + #[test] + fn get_help_long() { get_help_inner("--help"); } + + #[test] + fn get_help_subcommand() { get_help_inner("help"); } + + fn get_help_inner(input: &str) { + let error = AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) + .unwrap_err() + .to_string(); + + // Search for a handful of keywords that suggest the help printed properly + assert!(error.contains("Usage:")); + assert!(error.contains("Commands:")); + assert!(error.contains("Options:")); } } diff --git a/src/service/admin/room.rs b/src/service/admin/room.rs new file mode 100644 index 00000000..721191b1 --- /dev/null +++ b/src/service/admin/room.rs @@ -0,0 +1,96 @@ +use std::fmt::Write as _; + +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; + +use crate::{ + service::admin::{ + escape_html, get_room_info, room_alias, room_alias::RoomAliasCommand, room_directory, + room_directory::RoomDirectoryCommand, room_moderation, room_moderation::RoomModerationCommand, PAGE_SIZE, + }, + services, Result, +}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomCommand { + /// - List all rooms the server knows about + List { + page: Option, + }, + + #[command(subcommand)] + /// - Manage moderation of remote or local rooms + Moderation(RoomModerationCommand), + + #[command(subcommand)] + /// - Manage rooms' aliases + Alias(RoomAliasCommand), + + #[command(subcommand)] + /// - Manage the room directory + Directory(RoomDirectoryCommand), +} + +pub(crate) async fn process(command: RoomCommand, body: Vec<&str>) -> Result { + match command { + RoomCommand::Alias(command) => room_alias::process(command, body).await, + + RoomCommand::Directory(command) => room_directory::process(command, body).await, + + RoomCommand::Moderation(command) => room_moderation::process(command, body).await, + + RoomCommand::List { + page, + } => { + // TODO: i know there's a way to do this with clap, but i can't seem to find it + let page = page.unwrap_or(1); + let mut rooms = services() + .rooms + .metadata + .iter_ids() + .filter_map(Result::ok) + .map(|id: OwnedRoomId| get_room_info(&id)) + .collect::>(); + rooms.sort_by_key(|r| r.1); + rooms.reverse(); + + let rooms = rooms + .into_iter() + .skip(page.saturating_sub(1) * PAGE_SIZE) + .take(PAGE_SIZE) + .collect::>(); + + if rooms.is_empty() { + return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + }; + + let output_plain = format!( + "Rooms:\n{}", + rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n") + ); + let output_html = format!( + "\n\t\t\n{}
Room list - page \ + {page}
idmembersname
", + rooms + .iter() + .fold(String::new(), |mut output, (id, members, name)| { + writeln!( + output, + "{}\t{}\t{}", + escape_html(id.as_ref()), + members, + escape_html(name) + ) + .unwrap(); + output + }) + ); + Ok(RoomMessageEventContent::text_html(output_plain, output_html)) + }, + } +} diff --git a/src/service/admin/room_alias.rs b/src/service/admin/room_alias.rs new file mode 100644 index 00000000..f1621344 --- /dev/null +++ b/src/service/admin/room_alias.rs @@ -0,0 +1,172 @@ +use std::fmt::Write as _; + +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, RoomAliasId, RoomId}; + +use crate::{service::admin::escape_html, services, Result}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomAliasCommand { + /// - Make an alias point to a room. + Set { + #[arg(short, long)] + /// Set the alias even if a room is already using it + force: bool, + + /// The room id to set the alias on + room_id: Box, + + /// The alias localpart to use (`alias`, not `#alias:servername.tld`) + room_alias_localpart: String, + }, + + /// - Remove an alias + Remove { + /// The alias localpart to remove (`alias`, not `#alias:servername.tld`) + room_alias_localpart: String, + }, + + /// - Show which room is using an alias + Which { + /// The alias localpart to look up (`alias`, not + /// `#alias:servername.tld`) + room_alias_localpart: String, + }, + + /// - List aliases currently being used + List { + /// If set, only list the aliases for this room + room_id: Option>, + }, +} + +pub(crate) async fn process(command: RoomAliasCommand, _body: Vec<&str>) -> Result { + match command { + RoomAliasCommand::Set { + ref room_alias_localpart, + .. + } + | RoomAliasCommand::Remove { + ref room_alias_localpart, + } + | RoomAliasCommand::Which { + ref room_alias_localpart, + } => { + let room_alias_str = format!("#{}:{}", room_alias_localpart, services().globals.server_name()); + let room_alias = match RoomAliasId::parse_box(room_alias_str) { + Ok(alias) => alias, + Err(err) => return Ok(RoomMessageEventContent::text_plain(format!("Failed to parse alias: {}", err))), + }; + match command { + RoomAliasCommand::Set { + force, + room_id, + .. + } => match (force, services().rooms.alias.resolve_local_alias(&room_alias)) { + (true, Ok(Some(id))) => match services().rooms.alias.set_alias(&room_alias, &room_id) { + Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( + "Successfully overwrote alias (formerly {})", + id + ))), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {}", err))), + }, + (false, Ok(Some(id))) => Ok(RoomMessageEventContent::text_plain(format!( + "Refusing to overwrite in use alias for {}, use -f or --force to overwrite", + id + ))), + (_, Ok(None)) => match services().rooms.alias.set_alias(&room_alias, &room_id) { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Successfully set alias")), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {err}"))), + }, + (_, Err(err)) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {err}"))), + }, + RoomAliasCommand::Remove { + .. + } => match services().rooms.alias.resolve_local_alias(&room_alias) { + Ok(Some(id)) => match services().rooms.alias.remove_alias(&room_alias) { + Ok(()) => Ok(RoomMessageEventContent::text_plain(format!("Removed alias from {}", id))), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Failed to remove alias: {}", err))), + }, + Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {}", err))), + }, + RoomAliasCommand::Which { + .. + } => match services().rooms.alias.resolve_local_alias(&room_alias) { + Ok(Some(id)) => Ok(RoomMessageEventContent::text_plain(format!("Alias resolves to {}", id))), + Ok(None) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to lookup alias: {}", err))), + }, + RoomAliasCommand::List { + .. + } => unreachable!(), + } + }, + RoomAliasCommand::List { + room_id, + } => { + if let Some(room_id) = room_id { + let aliases = services() + .rooms + .alias + .local_aliases_for_room(&room_id) + .collect::, _>>(); + match aliases { + Ok(aliases) => { + let plain_list = aliases.iter().fold(String::new(), |mut output, alias| { + writeln!(output, "- {alias}").unwrap(); + output + }); + + let html_list = aliases.iter().fold(String::new(), |mut output, alias| { + writeln!(output, "
  • {}
  • ", escape_html(alias.as_ref())).unwrap(); + output + }); + + let plain = format!("Aliases for {room_id}:\n{plain_list}"); + let html = format!("Aliases for {room_id}:\n
      {html_list}
    "); + Ok(RoomMessageEventContent::text_html(plain, html)) + }, + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list aliases: {}", err))), + } + } else { + let aliases = services() + .rooms + .alias + .all_local_aliases() + .collect::, _>>(); + match aliases { + Ok(aliases) => { + let server_name = services().globals.server_name(); + let plain_list = aliases + .iter() + .fold(String::new(), |mut output, (alias, id)| { + writeln!(output, "- `{alias}` -> #{id}:{server_name}").unwrap(); + output + }); + + let html_list = aliases + .iter() + .fold(String::new(), |mut output, (alias, id)| { + writeln!( + output, + "
  • {} -> #{}:{}
  • ", + escape_html(alias.as_ref()), + escape_html(id.as_ref()), + server_name + ) + .unwrap(); + output + }); + + let plain = format!("Aliases:\n{plain_list}"); + let html = format!("Aliases:\n
      {html_list}
    "); + Ok(RoomMessageEventContent::text_html(plain, html)) + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Unable to list room aliases: {e}"))), + } + } + }, + } +} diff --git a/src/service/admin/room_directory.rs b/src/service/admin/room_directory.rs new file mode 100644 index 00000000..86dc03d6 --- /dev/null +++ b/src/service/admin/room_directory.rs @@ -0,0 +1,99 @@ +use std::fmt::Write as _; + +use clap::Subcommand; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId}; + +use crate::{ + service::admin::{escape_html, get_room_info, PAGE_SIZE}, + services, Result, +}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomDirectoryCommand { + /// - Publish a room to the room directory + Publish { + /// The room id of the room to publish + room_id: Box, + }, + + /// - Unpublish a room to the room directory + Unpublish { + /// The room id of the room to unpublish + room_id: Box, + }, + + /// - List rooms that are published + List { + page: Option, + }, +} + +pub(crate) async fn process(command: RoomDirectoryCommand, _body: Vec<&str>) -> Result { + match command { + RoomDirectoryCommand::Publish { + room_id, + } => match services().rooms.directory.set_public(&room_id) { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Room published")), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {}", err))), + }, + RoomDirectoryCommand::Unpublish { + room_id, + } => match services().rooms.directory.set_not_public(&room_id) { + Ok(()) => Ok(RoomMessageEventContent::text_plain("Room unpublished")), + Err(err) => Ok(RoomMessageEventContent::text_plain(format!("Unable to update room: {}", err))), + }, + RoomDirectoryCommand::List { + page, + } => { + // TODO: i know there's a way to do this with clap, but i can't seem to find it + let page = page.unwrap_or(1); + let mut rooms = services() + .rooms + .directory + .public_rooms() + .filter_map(Result::ok) + .map(|id: OwnedRoomId| get_room_info(&id)) + .collect::>(); + rooms.sort_by_key(|r| r.1); + rooms.reverse(); + + let rooms = rooms + .into_iter() + .skip(page.saturating_sub(1) * PAGE_SIZE) + .take(PAGE_SIZE) + .collect::>(); + + if rooms.is_empty() { + return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + }; + + let output_plain = format!( + "Rooms:\n{}", + rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n") + ); + let output_html = format!( + "\n\t\t\n{}
    Room directory - page \ + {page}
    idmembersname
    ", + rooms + .iter() + .fold(String::new(), |mut output, (id, members, name)| { + writeln!( + output, + "{}\t{}\t{}", + escape_html(id.as_ref()), + members, + escape_html(name.as_ref()) + ) + .unwrap(); + output + }) + ); + Ok(RoomMessageEventContent::text_html(output_plain, output_html)) + }, + } +} diff --git a/src/service/admin/room_moderation.rs b/src/service/admin/room_moderation.rs new file mode 100644 index 00000000..18a42a37 --- /dev/null +++ b/src/service/admin/room_moderation.rs @@ -0,0 +1,562 @@ +use std::fmt::Write as _; + +use clap::Subcommand; +use ruma::{ + events::room::message::RoomMessageEventContent, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, +}; +use tracing::{debug, error, info, warn}; + +use crate::{ + api::client_server::{get_alias_helper, leave_room}, + service::admin::{escape_html, Service}, + services, Result, +}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum RoomModerationCommand { + /// - Bans a room from local users joining and evicts all our local users + /// from the room. Also blocks any invites (local and remote) for the + /// banned room. + /// + /// Server admins (users in the conduwuit admin room) will not be evicted + /// and server admins can still join the room. To evict admins too, use + /// --force (also ignores errors) To disable incoming federation of the + /// room, use --disable-federation + BanRoom { + #[arg(short, long)] + /// Evicts admins out of the room and ignores any potential errors when + /// making our local users leave the room + force: bool, + + #[arg(long)] + /// Disables incoming federation of the room after banning and evicting + /// users + disable_federation: bool, + + /// The room in the format of `!roomid:example.com` or a room alias in + /// the format of `#roomalias:example.com` + room: Box, + }, + + /// - Bans a list of rooms (room IDs and room aliases) from a newline + /// delimited codeblock similar to `user deactivate-all` + BanListOfRooms { + #[arg(short, long)] + /// Evicts admins out of the room and ignores any potential errors when + /// making our local users leave the room + force: bool, + + #[arg(long)] + /// Disables incoming federation of the room after banning and evicting + /// users + disable_federation: bool, + }, + + /// - Unbans a room to allow local users to join again + /// + /// To re-enable incoming federation of the room, use --enable-federation + UnbanRoom { + #[arg(long)] + /// Enables incoming federation of the room after unbanning + enable_federation: bool, + + /// The room in the format of `!roomid:example.com` or a room alias in + /// the format of `#roomalias:example.com` + room: Box, + }, + + /// - List of all rooms we have banned + ListBannedRooms, +} + +pub(crate) async fn process(command: RoomModerationCommand, body: Vec<&str>) -> Result { + match command { + RoomModerationCommand::BanRoom { + force, + room, + disable_federation, + } => { + debug!("Got room alias or ID: {}", room); + + let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + if let Some(admin_room_id) = Service::get_admin_room()? { + if room.to_string().eq(&admin_room_id) || room.to_string().eq(&admin_room_alias) { + return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room.")); + } + } + + let room_id = if room.is_room_id() { + let room_id = match RoomId::parse(&room) { + Ok(room_id) => room_id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse room ID {room}. Please note that this requires a full room ID \ + (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" + ))) + }, + }; + + debug!("Room specified is a room ID, banning room ID"); + + services().rooms.metadata.ban_room(&room_id, true)?; + + room_id + } else if room.is_room_alias_id() { + let room_alias = match RoomAliasId::parse(&room) { + Ok(room_alias) => room_alias, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse room ID {room}. Please note that this requires a full room ID \ + (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" + ))) + }, + }; + + debug!( + "Room specified is not a room ID, attempting to resolve room alias to a room ID locally, if not \ + using get_alias_helper to fetch room ID remotely" + ); + + let room_id = if let Some(room_id) = services().rooms.alias.resolve_local_alias(&room_alias)? { + room_id + } else { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch room ID over \ + federation" + ); + + match get_alias_helper(room_alias).await { + Ok(response) => { + debug!("Got federation response fetching room ID for room {room}: {:?}", response); + response.room_id + }, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to resolve room alias {room} to a room ID: {e}" + ))); + }, + } + }; + + services().rooms.metadata.ban_room(&room_id, true)?; + + room_id + } else { + return Ok(RoomMessageEventContent::text_plain( + "Room specified is not a room ID or room alias. Please note that this requires a full room ID \ + (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`)", + )); + }; + + debug!("Making all users leave the room {}", &room); + if force { + for local_user in services() + .rooms + .state_cache + .room_members(&room_id) + .filter_map(|user| { + user.ok().filter(|local_user| { + local_user.server_name() == services().globals.server_name() + // additional wrapped check here is to avoid adding remote users + // who are in the admin room to the list of local users (would fail auth check) + && (local_user.server_name() + == services().globals.server_name() + && services() + .users + .is_admin(local_user) + .unwrap_or(true)) // since this is a force + // operation, assume user + // is an admin if somehow + // this fails + }) + }) + .collect::>() + { + debug!( + "Attempting leave for user {} in room {} (forced, ignoring all errors, evicting admins too)", + &local_user, &room_id + ); + + _ = leave_room(&local_user, &room_id, None).await; + } + } else { + for local_user in services() + .rooms + .state_cache + .room_members(&room_id) + .filter_map(|user| { + user.ok().filter(|local_user| { + local_user.server_name() == services().globals.server_name() + // additional wrapped check here is to avoid adding remote users + // who are in the admin room to the list of local users (would fail auth check) + && (local_user.server_name() + == services().globals.server_name() + && !services() + .users + .is_admin(local_user) + .unwrap_or(false)) + }) + }) + .collect::>() + { + debug!("Attempting leave for user {} in room {}", &local_user, &room_id); + if let Err(e) = leave_room(&local_user, &room_id, None).await { + error!( + "Error attempting to make local user {} leave room {} during room banning: {}", + &local_user, &room_id, e + ); + return Ok(RoomMessageEventContent::text_plain(format!( + "Error attempting to make local user {} leave room {} during room banning (room is still \ + banned but not removing any more users): {}\nIf you would like to ignore errors, use \ + --force", + &local_user, &room_id, e + ))); + } + } + } + + if disable_federation { + services().rooms.metadata.disable_room(&room_id, true)?; + return Ok(RoomMessageEventContent::text_plain( + "Room banned, removed all our local users, and disabled incoming federation with room.", + )); + } + + Ok(RoomMessageEventContent::text_plain( + "Room banned and removed all our local users, use disable-room to stop receiving new inbound \ + federation events as well if needed.", + )) + }, + RoomModerationCommand::BanListOfRooms { + force, + disable_federation, + } => { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let rooms_s = body.clone().drain(1..body.len() - 1).collect::>(); + + let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + let mut room_ban_count = 0; + let mut room_ids: Vec = Vec::new(); + + for &room in &rooms_s { + match <&RoomOrAliasId>::try_from(room) { + Ok(room_alias_or_id) => { + if let Some(admin_room_id) = Service::get_admin_room()? { + if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(&admin_room_alias) { + info!("User specified admin room in bulk ban list, ignoring"); + continue; + } + } + + if room_alias_or_id.is_room_id() { + let room_id = match RoomId::parse(room_alias_or_id) { + Ok(room_id) => room_id, + Err(e) => { + if force { + // ignore rooms we failed to parse if we're force banning + warn!( + "Error parsing room \"{room}\" during bulk room banning, ignoring \ + error and logging here: {e}" + ); + continue; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "{room} is not a valid room ID or room alias, please fix the list and try \ + again: {e}" + ))); + }, + }; + + room_ids.push(room_id); + } + + if room_alias_or_id.is_room_alias_id() { + match RoomAliasId::parse(room_alias_or_id) { + Ok(room_alias) => { + let room_id = if let Some(room_id) = + services().rooms.alias.resolve_local_alias(&room_alias)? + { + room_id + } else { + debug!( + "We don't have this room alias to a room ID locally, attempting to \ + fetch room ID over federation" + ); + + match get_alias_helper(room_alias).await { + Ok(response) => { + debug!( + "Got federation response fetching room ID for room {room}: \ + {:?}", + response + ); + response.room_id + }, + Err(e) => { + // don't fail if force blocking + if force { + warn!("Failed to resolve room alias {room} to a room ID: {e}"); + continue; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to resolve room alias {room} to a room ID: {e}" + ))); + }, + } + }; + + room_ids.push(room_id); + }, + Err(e) => { + if force { + // ignore rooms we failed to parse if we're force deleting + error!( + "Error parsing room \"{room}\" during bulk room banning, ignoring \ + error and logging here: {e}" + ); + continue; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "{room} is not a valid room ID or room alias, please fix the list and try \ + again: {e}" + ))); + }, + } + } + }, + Err(e) => { + if force { + // ignore rooms we failed to parse if we're force deleting + error!( + "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ + logging here: {e}" + ); + continue; + } + + return Ok(RoomMessageEventContent::text_plain(format!( + "{room} is not a valid room ID or room alias, please fix the list and try again: {e}" + ))); + }, + } + } + + for room_id in room_ids { + if services().rooms.metadata.ban_room(&room_id, true).is_ok() { + debug!("Banned {room_id} successfully"); + room_ban_count += 1; + } + + debug!("Making all users leave the room {}", &room_id); + if force { + for local_user in services() + .rooms + .state_cache + .room_members(&room_id) + .filter_map(|user| { + user.ok().filter(|local_user| { + local_user.server_name() == services().globals.server_name() + // additional wrapped check here is to avoid adding remote users + // who are in the admin room to the list of local users (would fail auth check) + && (local_user.server_name() + == services().globals.server_name() + && services() + .users + .is_admin(local_user) + .unwrap_or(true)) // since this is a + // force operation, + // assume user is + // an admin if + // somehow this + // fails + }) + }) + .collect::>() + { + debug!( + "Attempting leave for user {} in room {} (forced, ignoring all errors, evicting \ + admins too)", + &local_user, room_id + ); + _ = leave_room(&local_user, &room_id, None).await; + } + } else { + for local_user in services() + .rooms + .state_cache + .room_members(&room_id) + .filter_map(|user| { + user.ok().filter(|local_user| { + local_user.server_name() == services().globals.server_name() + // additional wrapped check here is to avoid adding remote users + // who are in the admin room to the list of local users (would fail auth check) + && (local_user.server_name() + == services().globals.server_name() + && !services() + .users + .is_admin(local_user) + .unwrap_or(false)) + }) + }) + .collect::>() + { + debug!("Attempting leave for user {} in room {}", &local_user, &room_id); + if let Err(e) = leave_room(&local_user, &room_id, None).await { + error!( + "Error attempting to make local user {} leave room {} during bulk room banning: {}", + &local_user, &room_id, e + ); + return Ok(RoomMessageEventContent::text_plain(format!( + "Error attempting to make local user {} leave room {} during room banning (room \ + is still banned but not removing any more users and not banning any more rooms): \ + {}\nIf you would like to ignore errors, use --force", + &local_user, &room_id, e + ))); + } + } + } + + if disable_federation { + services().rooms.metadata.disable_room(&room_id, true)?; + } + } + + if disable_federation { + return Ok(RoomMessageEventContent::text_plain(format!( + "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and disabled \ + incoming federation with the room." + ))); + } + return Ok(RoomMessageEventContent::text_plain(format!( + "Finished bulk room ban, banned {room_ban_count} total rooms and evicted all users." + ))); + } + + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + }, + RoomModerationCommand::UnbanRoom { + room, + enable_federation, + } => { + let room_id = if room.is_room_id() { + let room_id = match RoomId::parse(&room) { + Ok(room_id) => room_id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse room ID {room}. Please note that this requires a full room ID \ + (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" + ))) + }, + }; + + debug!("Room specified is a room ID, unbanning room ID"); + + services().rooms.metadata.ban_room(&room_id, false)?; + + room_id + } else if room.is_room_alias_id() { + let room_alias = match RoomAliasId::parse(&room) { + Ok(room_alias) => room_alias, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to parse room ID {room}. Please note that this requires a full room ID \ + (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`): {e}" + ))) + }, + }; + + debug!( + "Room specified is not a room ID, attempting to resolve room alias to a room ID locally, if not \ + using get_alias_helper to fetch room ID remotely" + ); + + let room_id = if let Some(room_id) = services().rooms.alias.resolve_local_alias(&room_alias)? { + room_id + } else { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch room ID over \ + federation" + ); + + match get_alias_helper(room_alias).await { + Ok(response) => { + debug!("Got federation response fetching room ID for room {room}: {:?}", response); + response.room_id + }, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to resolve room alias {room} to a room ID: {e}" + ))); + }, + } + }; + + services().rooms.metadata.ban_room(&room_id, false)?; + + room_id + } else { + return Ok(RoomMessageEventContent::text_plain( + "Room specified is not a room ID or room alias. Please note that this requires a full room ID \ + (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias (`#roomalias:example.com`)", + )); + }; + + if enable_federation { + services().rooms.metadata.disable_room(&room_id, false)?; + return Ok(RoomMessageEventContent::text_plain("Room unbanned.")); + } + + Ok(RoomMessageEventContent::text_plain( + "Room unbanned, you may need to re-enable federation with the room using enable-room if this is a \ + remote room to make it fully functional.", + )) + }, + RoomModerationCommand::ListBannedRooms => { + let rooms = services() + .rooms + .metadata + .list_banned_rooms() + .collect::, _>>(); + + match rooms { + Ok(room_ids) => { + // TODO: add room name from our state cache if available, default to the room ID + // as the room name if we dont have it TODO: do same if we have a room alias for + // this + let plain_list = room_ids.iter().fold(String::new(), |mut output, room_id| { + writeln!(output, "- `{}`", room_id).unwrap(); + output + }); + + let html_list = room_ids.iter().fold(String::new(), |mut output, room_id| { + writeln!(output, "
  • {}
  • ", escape_html(room_id.as_ref())).unwrap(); + output + }); + + let plain = format!("Rooms:\n{}", plain_list); + let html = format!("Rooms:\n
      {}
    ", html_list); + Ok(RoomMessageEventContent::text_html(plain, html)) + }, + Err(e) => { + error!("Failed to list banned rooms: {}", e); + Ok(RoomMessageEventContent::text_plain(format!( + "Unable to list room aliases: {}", + e + ))) + }, + } + }, + } +} diff --git a/src/service/admin/server.rs b/src/service/admin/server.rs new file mode 100644 index 00000000..07519a1b --- /dev/null +++ b/src/service/admin/server.rs @@ -0,0 +1,106 @@ +use clap::Subcommand; +use ruma::events::room::message::RoomMessageEventContent; + +use crate::{services, Result}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum ServerCommand { + /// - Show configuration values + ShowConfig, + + /// - Print database memory usage statistics + MemoryUsage, + + /// - Clears all of Conduit's database caches with index smaller than the + /// amount + ClearDatabaseCaches { + amount: u32, + }, + + /// - Clears all of Conduit's service caches with index smaller than the + /// amount + ClearServiceCaches { + amount: u32, + }, + + /// - Performs an online backup of the database (only available for RocksDB + /// at the moment) + BackupDatabase, + + /// - List database backups + ListBackups, + + /// - List database files + ListDatabaseFiles, +} + +pub(crate) async fn process(command: ServerCommand, _body: Vec<&str>) -> Result { + match command { + ServerCommand::ShowConfig => { + // Construct and send the response + Ok(RoomMessageEventContent::text_plain(format!("{}", services().globals.config))) + }, + ServerCommand::MemoryUsage => { + let response1 = services().memory_usage().await; + let response2 = services().globals.db.memory_usage(); + + Ok(RoomMessageEventContent::text_plain(format!( + "Services:\n{response1}\n\nDatabase:\n{response2}" + ))) + }, + ServerCommand::ClearDatabaseCaches { + amount, + } => { + services().globals.db.clear_caches(amount); + + Ok(RoomMessageEventContent::text_plain("Done.")) + }, + ServerCommand::ClearServiceCaches { + amount, + } => { + services().clear_caches(amount).await; + + Ok(RoomMessageEventContent::text_plain("Done.")) + }, + ServerCommand::ListBackups => { + let result = services().globals.db.backup_list()?; + + if result.is_empty() { + Ok(RoomMessageEventContent::text_plain("No backups found.")) + } else { + Ok(RoomMessageEventContent::text_plain(result)) + } + }, + ServerCommand::BackupDatabase => { + if !cfg!(feature = "rocksdb") { + return Ok(RoomMessageEventContent::text_plain( + "Only RocksDB supports online backups in conduwuit.", + )); + } + + let mut result = tokio::task::spawn_blocking(move || match services().globals.db.backup() { + Ok(()) => String::new(), + Err(e) => (*e).to_string(), + }) + .await + .unwrap(); + + if result.is_empty() { + result = services().globals.db.backup_list()?; + } + + Ok(RoomMessageEventContent::text_plain(&result)) + }, + ServerCommand::ListDatabaseFiles => { + if !cfg!(feature = "rocksdb") { + return Ok(RoomMessageEventContent::text_plain( + "Only RocksDB supports listing files in conduwuit.", + )); + } + + let result = services().globals.db.file_list()?; + Ok(RoomMessageEventContent::notice_html(String::new(), result)) + }, + } +} diff --git a/src/service/admin/user.rs b/src/service/admin/user.rs new file mode 100644 index 00000000..11441e86 --- /dev/null +++ b/src/service/admin/user.rs @@ -0,0 +1,406 @@ +use std::{fmt::Write as _, sync::Arc}; + +use clap::Subcommand; +use itertools::Itertools; +use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId, UserId}; +use tracing::{error, info, warn}; + +use crate::{ + api::client_server::{join_room_by_id_helper, leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, + service::admin::{escape_html, get_room_info}, + services, utils, Result, +}; + +#[cfg_attr(test, derive(Debug))] +#[derive(Subcommand)] +pub(crate) enum UserCommand { + /// - Create a new user + Create { + /// Username of the new user + username: String, + /// Password of the new user, if unspecified one is generated + password: Option, + }, + + /// - Reset user password + ResetPassword { + /// Username of the user for whom the password should be reset + username: String, + }, + + /// - Deactivate a user + /// + /// User will not be removed from all rooms by default. + /// Use --leave-rooms to force the user to leave all rooms + Deactivate { + #[arg(short, long)] + leave_rooms: bool, + user_id: Box, + }, + + /// - Deactivate a list of users + /// + /// Recommended to use in conjunction with list-local-users. + /// + /// Users will not be removed from joined rooms by default. + /// Can be overridden with --leave-rooms flag. + /// Removing a mass amount of users from a room may cause a significant + /// amount of leave events. The time to leave rooms may depend significantly + /// on joined rooms and servers. + /// + /// This command needs a newline separated list of users provided in a + /// Markdown code block below the command. + DeactivateAll { + #[arg(short, long)] + /// Remove users from their joined rooms + leave_rooms: bool, + #[arg(short, long)] + /// Also deactivate admin accounts + force: bool, + }, + + /// - List local users in the database + List, + + /// - Lists all the rooms (local and remote) that the specified user is + /// joined in + ListJoinedRooms { + user_id: Box, + }, +} + +pub(crate) async fn process(command: UserCommand, body: Vec<&str>) -> Result { + match command { + UserCommand::List => match services().users.list_local_users() { + Ok(users) => { + let mut msg = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + Ok(RoomMessageEventContent::text_plain(&msg)) + }, + Err(e) => Ok(RoomMessageEventContent::text_plain(e.to_string())), + }, + UserCommand::Create { + username, + password, + } => { + let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {e}" + ))) + }, + }; + if user_id.is_historical() { + return Ok(RoomMessageEventContent::text_plain(format!( + "Userid {user_id} is not allowed due to historical" + ))); + } + if services().users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain(format!("Userid {user_id} already exists"))); + } + // Create user + services().users.create(&user_id, Some(password.as_str()))?; + + // Default to pretty displayname + let mut displayname = user_id.localpart().to_owned(); + + // If `new_user_displayname_suffix` is set, registration will push whatever + // content is set to the user's display name with a space before it + if !services().globals.new_user_displayname_suffix().is_empty() { + displayname.push_str(&(" ".to_owned() + services().globals.new_user_displayname_suffix())); + } + + services() + .users + .set_displayname(&user_id, Some(displayname)) + .await?; + + // Initial account data + services().account_data.update( + None, + &user_id, + ruma::events::GlobalAccountDataEventType::PushRules + .to_string() + .into(), + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), + }, + }) + .expect("to json value always works"), + )?; + + if !services().globals.config.auto_join_rooms.is_empty() { + for room in &services().globals.config.auto_join_rooms { + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room)? + { + warn!("Skipping room {room} to automatically join as we have never joined before."); + continue; + } + + if let Some(room_id_server_name) = room.server_name() { + match join_room_by_id_helper( + Some(&user_id), + room, + Some("Automatically joining this room upon registration".to_owned()), + &[room_id_server_name.to_owned(), services().globals.server_name().to_owned()], + None, + ) + .await + { + Ok(_) => { + info!("Automatically joined room {room} for user {user_id}"); + }, + Err(e) => { + // don't return this error so we don't fail registrations + error!("Failed to automatically join room {room} for user {user_id}: {e}"); + }, + }; + } + } + } + + // we dont add a device since we're not the user, just the creator + + // Inhibit login does not work for guests + Ok(RoomMessageEventContent::text_plain(format!( + "Created user with user_id: {user_id} and password: `{password}`" + ))) + }, + UserCommand::Deactivate { + leave_rooms, + user_id, + } => { + let user_id = Arc::::from(user_id); + + // check if user belongs to our server + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} does not belong to our server." + ))); + } + + // don't deactivate the conduit service account + if user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain( + "Not allowed to deactivate the Conduit service account.", + )); + } + + if services().users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!("Making {user_id} leave all rooms before deactivation...")); + + services().users.deactivate_account(&user_id)?; + + if leave_rooms { + leave_all_rooms(&user_id).await?; + } + + Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} has been deactivated" + ))) + } else { + Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} doesn't exist on this server" + ))) + } + }, + UserCommand::ResetPassword { + username, + } => { + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {e}" + ))) + }, + }; + + // check if user belongs to our server + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain(format!( + "User {user_id} does not belong to our server." + ))); + } + + // Check if the specified user is valid + if !services().users.exists(&user_id)? + || user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain("The specified user does not exist!")); + } + + let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); + + match services() + .users + .set_password(&user_id, Some(new_password.as_str())) + { + Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( + "Successfully reset the password for user {user_id}: `{new_password}`" + ))), + Err(e) => Ok(RoomMessageEventContent::text_plain(format!( + "Couldn't reset the password for user {user_id}: {e}" + ))), + } + }, + UserCommand::DeactivateAll { + leave_rooms, + force, + } => { + if body.len() > 2 && body[0].trim().starts_with("```") && body.last().unwrap().trim() == "```" { + let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut user_ids: Vec<&UserId> = Vec::new(); + + for &username in &usernames { + match <&UserId>::try_from(username) { + Ok(user_id) => user_ids.push(user_id), + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "{username} is not a valid username: {e}" + ))) + }, + } + } + + let mut deactivation_count = 0; + let mut admins = Vec::new(); + + if !force { + user_ids.retain(|&user_id| match services().users.is_admin(user_id) { + Ok(is_admin) => { + if is_admin { + admins.push(user_id.localpart()); + false + } else { + true + } + }, + Err(_) => false, + }); + } + + for &user_id in &user_ids { + // check if user belongs to our server and skips over non-local users + if user_id.server_name() != services().globals.server_name() { + continue; + } + + // don't deactivate the conduit service account + if user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("conduit user exists") + { + continue; + } + + // user does not exist on our server + if !services().users.exists(user_id)? { + continue; + } + + if services().users.deactivate_account(user_id).is_ok() { + deactivation_count += 1; + } + } + + if leave_rooms { + for &user_id in &user_ids { + _ = leave_all_rooms(user_id).await; + } + } + + if admins.is_empty() { + Ok(RoomMessageEventContent::text_plain(format!( + "Deactivated {deactivation_count} accounts." + ))) + } else { + Ok(RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin \ + accounts", + deactivation_count, + admins.join(", ") + ))) + } + } else { + Ok(RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + )) + } + }, + UserCommand::ListJoinedRooms { + user_id, + } => { + if user_id.server_name() != services().globals.server_name() { + return Ok(RoomMessageEventContent::text_plain("User does not belong to our server.")); + } + + if !services().users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain("User does not exist on this server.")); + } + + let mut rooms: Vec<(OwnedRoomId, u64, String)> = services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(Result::ok) + .map(|room_id| get_room_info(&room_id)) + .sorted_unstable() + .dedup() + .collect(); + + if rooms.is_empty() { + return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); + } + + rooms.sort_by_key(|r| r.1); + rooms.reverse(); + + let output_plain = format!( + "Rooms {user_id} Joined:\n{}", + rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n") + ); + let output_html = format!( + "\n\t\t\n{}
    Rooms {user_id} \ + Joined
    idmembersname
    ", + rooms + .iter() + .fold(String::new(), |mut output, (id, members, name)| { + writeln!( + output, + "{}\t{}\t{}", + escape_html(id.as_ref()), + members, + escape_html(name) + ) + .unwrap(); + output + }) + ); + Ok(RoomMessageEventContent::text_html(output_plain, output_html)) + }, + } +} diff --git a/src/service/announcements/mod.rs b/src/service/announcements/mod.rs deleted file mode 100644 index 4df8971b..00000000 --- a/src/service/announcements/mod.rs +++ /dev/null @@ -1,169 +0,0 @@ -//! # Announcements service -//! -//! This service is responsible for checking for announcements and sending them -//! to the client. -//! -//! It is used to send announcements to the admin room and logs. -//! Annuncements are stored in /docs/static/announcements right now. -//! The highest seen announcement id is stored in the database. When the -//! announcement check is run, all announcements with an ID higher than those -//! seen before are printed to the console and sent to the admin room. -//! -//! Old announcements should be deleted to avoid spamming the room on first -//! install. -//! -//! Announcements are displayed as markdown in the admin room, but plain text in -//! the console. - -use std::{sync::Arc, time::Duration}; - -use async_trait::async_trait; -use conduwuit::{Result, Server, debug, info, warn}; -use database::{Deserialized, Map}; -use ruma::events::room::message::RoomMessageEventContent; -use serde::Deserialize; -use tokio::{ - sync::Notify, - time::{MissedTickBehavior, interval}, -}; - -use crate::{Dep, admin, client, globals}; - -pub struct Service { - interval: Duration, - interrupt: Notify, - db: Arc, - services: Services, -} - -struct Services { - admin: Dep, - client: Dep, - globals: Dep, - server: Arc, -} - -#[derive(Debug, Deserialize)] -struct CheckForAnnouncementsResponse { - announcements: Vec, -} - -#[derive(Debug, Deserialize)] -struct CheckForAnnouncementsResponseEntry { - id: u64, - date: Option, - message: String, -} - -const CHECK_FOR_ANNOUNCEMENTS_URL: &str = - "https://continuwuity.org/.well-known/continuwuity/announcements"; -const CHECK_FOR_ANNOUNCEMENTS_INTERVAL: u64 = 7200; // 2 hours -const LAST_CHECK_FOR_ANNOUNCEMENTS_ID: &[u8; 25] = b"last_seen_announcement_id"; -// In conduwuit, this was under b"a" - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - interval: Duration::from_secs(CHECK_FOR_ANNOUNCEMENTS_INTERVAL), - interrupt: Notify::new(), - db: args.db["global"].clone(), - services: Services { - globals: args.depend::("globals"), - admin: args.depend::("admin"), - client: args.depend::("client"), - server: args.server.clone(), - }, - })) - } - - #[tracing::instrument(skip_all, name = "announcements", level = "debug")] - async fn worker(self: Arc) -> Result<()> { - if !self.services.globals.allow_announcements_check() { - debug!("Disabling announcements check"); - return Ok(()); - } - - let mut i = interval(self.interval); - i.set_missed_tick_behavior(MissedTickBehavior::Delay); - i.reset_after(self.interval); - loop { - tokio::select! { - () = self.interrupt.notified() => break, - _ = i.tick() => (), - } - - if let Err(e) = self.check().await { - warn!(%e, "Failed to check for announcements"); - } - } - - Ok(()) - } - - fn interrupt(&self) { self.interrupt.notify_waiters(); } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - #[tracing::instrument(skip_all)] - async fn check(&self) -> Result<()> { - debug_assert!(self.services.server.running(), "server must not be shutting down"); - - let response = self - .services - .client - .default - .get(CHECK_FOR_ANNOUNCEMENTS_URL) - .send() - .await? - .text() - .await?; - - let response = serde_json::from_str::(&response)?; - for announcement in &response.announcements { - if announcement.id > self.last_check_for_announcements_id().await { - self.handle(announcement).await; - self.update_check_for_announcements_id(announcement.id); - } - } - - Ok(()) - } - - #[tracing::instrument(skip_all)] - async fn handle(&self, announcement: &CheckForAnnouncementsResponseEntry) { - if let Some(date) = &announcement.date { - info!("[announcements] {date} {:#}", announcement.message); - } else { - info!("[announcements] {:#}", announcement.message); - } - - self.services - .admin - .send_message(RoomMessageEventContent::text_markdown(format!( - "### New announcement{}\n\n{}", - announcement - .date - .as_ref() - .map_or_else(String::new, |date| format!(" - `{date}`")), - announcement.message - ))) - .await - .ok(); - } - - #[inline] - pub fn update_check_for_announcements_id(&self, id: u64) { - self.db.raw_put(LAST_CHECK_FOR_ANNOUNCEMENTS_ID, id); - } - - pub async fn last_check_for_announcements_id(&self) -> u64 { - self.db - .get(LAST_CHECK_FOR_ANNOUNCEMENTS_ID) - .await - .deserialized() - .unwrap_or(0_u64) - } -} diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs new file mode 100644 index 00000000..52c8b34d --- /dev/null +++ b/src/service/appservice/data.rs @@ -0,0 +1,21 @@ +use ruma::api::appservice::Registration; + +use crate::Result; + +pub trait Data: Send + Sync { + /// Registers an appservice and returns the ID to the caller + fn register_appservice(&self, yaml: Registration) -> Result; + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + fn unregister_appservice(&self, service_name: &str) -> Result<()>; + + fn get_registration(&self, id: &str) -> Result>; + + fn iter_ids<'a>(&'a self) -> Result> + 'a>>; + + fn all(&self) -> Result>; +} diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 7be8a471..ea387881 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,107 +1,166 @@ -mod namespace_regex; -mod registration_info; +mod data; -use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc}; +use std::collections::BTreeMap; -use async_trait::async_trait; -use conduwuit::{Result, err, utils::stream::IterStream}; -use database::Map; -use futures::{Future, FutureExt, Stream, TryStreamExt}; -use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; -use tokio::sync::{RwLock, RwLockReadGuard}; +pub(crate) use data::Data; +use futures_util::Future; +use regex::RegexSet; +use ruma::{ + api::appservice::{Namespace, Registration}, + RoomAliasId, RoomId, UserId, +}; +use tokio::sync::RwLock; -pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; -use crate::{Dep, sending}; +use crate::{services, Result}; + +/// Compiled regular expressions for a namespace +#[derive(Clone, Debug)] +pub struct NamespaceRegex { + pub exclusive: Option, + pub non_exclusive: Option, +} + +impl NamespaceRegex { + /// Checks if this namespace has rights to a namespace + pub fn is_match(&self, heystack: &str) -> bool { + if self.is_exclusive_match(heystack) { + return true; + } + + if let Some(non_exclusive) = &self.non_exclusive { + if non_exclusive.is_match(heystack) { + return true; + } + } + false + } + + /// Checks if this namespace has exlusive rights to a namespace + pub fn is_exclusive_match(&self, heystack: &str) -> bool { + if let Some(exclusive) = &self.exclusive { + if exclusive.is_match(heystack) { + return true; + } + } + false + } +} + +impl RegistrationInfo { + pub fn is_user_match(&self, user_id: &UserId) -> bool { + self.users.is_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() + } + + pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { + self.users.is_exclusive_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() + } +} + +impl TryFrom> for NamespaceRegex { + type Error = regex::Error; + + fn try_from(value: Vec) -> Result { + let mut exclusive = vec![]; + let mut non_exclusive = vec![]; + + for namespace in value { + if namespace.exclusive { + exclusive.push(namespace.regex); + } else { + non_exclusive.push(namespace.regex); + } + } + + Ok(NamespaceRegex { + exclusive: if exclusive.is_empty() { + None + } else { + Some(RegexSet::new(exclusive)?) + }, + non_exclusive: if non_exclusive.is_empty() { + None + } else { + Some(RegexSet::new(non_exclusive)?) + }, + }) + } +} + +/// Appservice registration combined with its compiled regular expressions. +#[derive(Clone, Debug)] +pub struct RegistrationInfo { + pub registration: Registration, + pub users: NamespaceRegex, + pub aliases: NamespaceRegex, + pub rooms: NamespaceRegex, +} + +impl TryFrom for RegistrationInfo { + type Error = regex::Error; + + fn try_from(value: Registration) -> Result { + Ok(RegistrationInfo { + users: value.namespaces.users.clone().try_into()?, + aliases: value.namespaces.aliases.clone().try_into()?, + rooms: value.namespaces.rooms.clone().try_into()?, + registration: value, + }) + } +} pub struct Service { - registration_info: RwLock, - services: Services, - db: Data, -} - -struct Services { - sending: Dep, -} - -struct Data { - id_appserviceregistrations: Arc, -} - -type Registrations = BTreeMap; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - registration_info: RwLock::new(BTreeMap::new()), - services: Services { - sending: args.depend::("sending"), - }, - db: Data { - id_appserviceregistrations: args.db["id_appserviceregistrations"].clone(), - }, - })) - } - - async fn worker(self: Arc) -> Result { - // Inserting registrations into cache - self.iter_db_ids() - .try_for_each(async |appservice| { - self.registration_info - .write() - .await - .insert(appservice.0, appservice.1.try_into()?); - - Ok(()) - }) - .await - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, + registration_info: RwLock>, } impl Service { + pub fn build(db: &'static dyn Data) -> Result { + let mut registration_info = BTreeMap::new(); + // Inserting registrations into cache + for appservice in db.all()? { + registration_info.insert( + appservice.0, + appservice + .1 + .try_into() + .expect("Should be validated on registration"), + ); + } + + Ok(Self { + db, + registration_info: RwLock::new(registration_info), + }) + } + /// Registers an appservice and returns the ID to the caller - pub async fn register_appservice( - &self, - registration: &Registration, - appservice_config_body: &str, - ) -> Result { + pub async fn register_appservice(&self, yaml: Registration) -> Result { //TODO: Check for collisions between exclusive appservice namespaces - self.registration_info + services() + .appservice + .registration_info .write() .await - .insert(registration.id.clone(), registration.clone().try_into()?); + .insert(yaml.id.clone(), yaml.clone().try_into()?); - self.db - .id_appserviceregistrations - .insert(®istration.id, appservice_config_body); - - Ok(()) + self.db.register_appservice(yaml) } /// Remove an appservice registration /// /// # Arguments /// - /// * `service_name` - the registration ID of the appservice - pub async fn unregister_appservice(&self, appservice_id: &str) -> Result { - // removes the appservice registration info - self.registration_info + /// * `service_name` - the name you send to register the service previously + pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> { + services() + .appservice + .registration_info .write() .await - .remove(appservice_id) - .ok_or_else(|| err!("Appservice not found"))?; + .remove(service_name) + .ok_or_else(|| crate::Error::AdminCommand("Appservice not found"))?; - // remove the appservice from the database - self.db.id_appserviceregistrations.del(appservice_id); - - // deletes all active requests for the appservice if there are any so we stop - // sending to the URL - self.services - .sending - .cleanup_events(Some(appservice_id), None, None) - .await + self.db.unregister_appservice(service_name) } pub async fn get_registration(&self, id: &str) -> Option { @@ -113,6 +172,15 @@ impl Service { .map(|info| info.registration) } + pub async fn iter_ids(&self) -> Vec { + self.registration_info + .read() + .await + .keys() + .cloned() + .collect() + } + pub async fn find_from_token(&self, token: &str) -> Option { self.read() .await @@ -138,9 +206,6 @@ impl Service { } /// Checks if a given room id matches any exclusive appservice regex - /// - /// TODO: use this? - #[allow(dead_code)] pub async fn is_exclusive_room_id(&self, room_id: &RoomId) -> bool { self.read() .await @@ -148,33 +213,7 @@ impl Service { .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) } - pub fn iter_ids(&self) -> impl Stream + Send { - self.read() - .map(|info| info.keys().cloned().collect::>()) - .map(IntoIterator::into_iter) - .map(IterStream::stream) - .flatten_stream() - } - - pub fn iter_db_ids(&self) -> impl Stream> + Send { - self.db - .id_appserviceregistrations - .keys() - .and_then(move |id: &str| async move { - Ok((id.to_owned(), self.get_db_registration(id).await?)) - }) - } - - pub async fn get_db_registration(&self, id: &str) -> Result { - self.db - .id_appserviceregistrations - .get(id) - .await - .and_then(|ref bytes| serde_yaml::from_slice(bytes).map_err(Into::into)) - .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) - } - - pub fn read(&self) -> impl Future> + Send { + pub fn read(&self) -> impl Future>> { self.registration_info.read() } } diff --git a/src/service/appservice/namespace_regex.rs b/src/service/appservice/namespace_regex.rs deleted file mode 100644 index fe0fd91f..00000000 --- a/src/service/appservice/namespace_regex.rs +++ /dev/null @@ -1,70 +0,0 @@ -use conduwuit::Result; -use regex::RegexSet; -use ruma::api::appservice::Namespace; - -/// Compiled regular expressions for a namespace -#[derive(Clone, Debug)] -pub struct NamespaceRegex { - pub exclusive: Option, - pub non_exclusive: Option, -} - -impl NamespaceRegex { - /// Checks if this namespace has rights to a namespace - #[inline] - #[must_use] - pub fn is_match(&self, heystack: &str) -> bool { - if self.is_exclusive_match(heystack) { - return true; - } - - if let Some(non_exclusive) = &self.non_exclusive { - if non_exclusive.is_match(heystack) { - return true; - } - } - false - } - - /// Checks if this namespace has exlusive rights to a namespace - #[inline] - #[must_use] - pub fn is_exclusive_match(&self, heystack: &str) -> bool { - if let Some(exclusive) = &self.exclusive { - if exclusive.is_match(heystack) { - return true; - } - } - false - } -} - -impl TryFrom> for NamespaceRegex { - type Error = regex::Error; - - fn try_from(value: Vec) -> Result { - let mut exclusive = Vec::with_capacity(value.len()); - let mut non_exclusive = Vec::with_capacity(value.len()); - - for namespace in value { - if namespace.exclusive { - exclusive.push(namespace.regex); - } else { - non_exclusive.push(namespace.regex); - } - } - - Ok(Self { - exclusive: if exclusive.is_empty() { - None - } else { - Some(RegexSet::new(exclusive)?) - }, - non_exclusive: if non_exclusive.is_empty() { - None - } else { - Some(RegexSet::new(non_exclusive)?) - }, - }) - } -} diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs deleted file mode 100644 index a511f58d..00000000 --- a/src/service/appservice/registration_info.rs +++ /dev/null @@ -1,41 +0,0 @@ -use conduwuit::Result; -use ruma::{UserId, api::appservice::Registration}; - -use super::NamespaceRegex; - -/// Appservice registration combined with its compiled regular expressions. -#[derive(Clone, Debug)] -pub struct RegistrationInfo { - pub registration: Registration, - pub users: NamespaceRegex, - pub aliases: NamespaceRegex, - pub rooms: NamespaceRegex, -} - -impl RegistrationInfo { - #[must_use] - pub fn is_user_match(&self, user_id: &UserId) -> bool { - self.users.is_match(user_id.as_str()) - || self.registration.sender_localpart == user_id.localpart() - } - - #[inline] - #[must_use] - pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { - self.users.is_exclusive_match(user_id.as_str()) - || self.registration.sender_localpart == user_id.localpart() - } -} - -impl TryFrom for RegistrationInfo { - type Error = regex::Error; - - fn try_from(value: Registration) -> Result { - Ok(Self { - users: value.namespaces.users.clone().try_into()?, - aliases: value.namespaces.aliases.clone().try_into()?, - rooms: value.namespaces.rooms.clone().try_into()?, - registration: value, - }) - } -} diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs deleted file mode 100644 index 1aeeb492..00000000 --- a/src/service/client/mod.rs +++ /dev/null @@ -1,215 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use conduwuit::{Config, Result, err, implement, trace}; -use either::Either; -use ipaddress::IPAddress; -use reqwest::redirect; - -use crate::{resolver, service}; - -pub struct Service { - pub default: reqwest::Client, - pub url_preview: reqwest::Client, - pub extern_media: reqwest::Client, - pub well_known: reqwest::Client, - pub federation: reqwest::Client, - pub synapse: reqwest::Client, - pub sender: reqwest::Client, - pub appservice: reqwest::Client, - pub pusher: reqwest::Client, - - pub cidr_range_denylist: Vec, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let resolver = args.require::("resolver"); - - let url_preview_bind_addr = config - .url_preview_bound_interface - .clone() - .and_then(Either::left); - - let url_preview_bind_iface = config - .url_preview_bound_interface - .clone() - .and_then(Either::right); - - Ok(Arc::new(Self { - default: base(config)? - .dns_resolver(resolver.resolver.clone()) - .build()?, - - url_preview: base(config) - .and_then(|builder| { - builder_interface(builder, url_preview_bind_iface.as_deref()) - })? - .local_address(url_preview_bind_addr) - .dns_resolver(resolver.resolver.clone()) - .redirect(redirect::Policy::limited(3)) - .build()?, - - extern_media: base(config)? - .dns_resolver(resolver.resolver.clone()) - .redirect(redirect::Policy::limited(3)) - .build()?, - - well_known: base(config)? - .dns_resolver(resolver.resolver.clone()) - .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) - .read_timeout(Duration::from_secs(config.well_known_timeout)) - .timeout(Duration::from_secs(config.well_known_timeout)) - .pool_max_idle_per_host(0) - .redirect(redirect::Policy::limited(4)) - .build()?, - - federation: base(config)? - .dns_resolver(resolver.resolver.hooked.clone()) - .read_timeout(Duration::from_secs(config.federation_timeout)) - .pool_max_idle_per_host(config.federation_idle_per_host.into()) - .pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout)) - .redirect(redirect::Policy::limited(3)) - .build()?, - - synapse: base(config)? - .dns_resolver(resolver.resolver.hooked.clone()) - .read_timeout(Duration::from_secs(305)) - .pool_max_idle_per_host(0) - .redirect(redirect::Policy::limited(3)) - .build()?, - - sender: base(config)? - .dns_resolver(resolver.resolver.hooked.clone()) - .read_timeout(Duration::from_secs(config.sender_timeout)) - .timeout(Duration::from_secs(config.sender_timeout)) - .pool_max_idle_per_host(1) - .pool_idle_timeout(Duration::from_secs(config.sender_idle_timeout)) - .redirect(redirect::Policy::limited(2)) - .build()?, - - appservice: base(config)? - .dns_resolver(resolver.resolver.clone()) - .connect_timeout(Duration::from_secs(5)) - .read_timeout(Duration::from_secs(config.appservice_timeout)) - .timeout(Duration::from_secs(config.appservice_timeout)) - .pool_max_idle_per_host(1) - .pool_idle_timeout(Duration::from_secs(config.appservice_idle_timeout)) - .redirect(redirect::Policy::limited(2)) - .build()?, - - pusher: base(config)? - .dns_resolver(resolver.resolver.clone()) - .pool_max_idle_per_host(1) - .pool_idle_timeout(Duration::from_secs(config.pusher_idle_timeout)) - .redirect(redirect::Policy::limited(2)) - .build()?, - - cidr_range_denylist: config - .ip_range_denylist - .iter() - .map(IPAddress::parse) - .inspect(|cidr| trace!("Denied CIDR range: {cidr:?}")) - .collect::>() - .map_err(|e| err!(Config("ip_range_denylist", e)))?, - })) - } - - fn name(&self) -> &str { service::make_name(std::module_path!()) } -} - -fn base(config: &Config) -> Result { - let mut builder = reqwest::Client::builder() - .hickory_dns(true) - .connect_timeout(Duration::from_secs(config.request_conn_timeout)) - .read_timeout(Duration::from_secs(config.request_timeout)) - .timeout(Duration::from_secs(config.request_total_timeout)) - .pool_idle_timeout(Duration::from_secs(config.request_idle_timeout)) - .pool_max_idle_per_host(config.request_idle_per_host.into()) - .user_agent(conduwuit::version::user_agent()) - .redirect(redirect::Policy::limited(6)) - .danger_accept_invalid_certs(config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure) - .connection_verbose(cfg!(debug_assertions)); - - #[cfg(feature = "gzip_compression")] - { - builder = if config.gzip_compression { - builder.gzip(true) - } else { - builder.gzip(false).no_gzip() - }; - }; - - #[cfg(feature = "brotli_compression")] - { - builder = if config.brotli_compression { - builder.brotli(true) - } else { - builder.brotli(false).no_brotli() - }; - }; - - #[cfg(feature = "zstd_compression")] - { - builder = if config.zstd_compression { - builder.zstd(true) - } else { - builder.zstd(false).no_zstd() - }; - }; - - #[cfg(not(feature = "gzip_compression"))] - { - builder = builder.no_gzip(); - }; - - #[cfg(not(feature = "brotli_compression"))] - { - builder = builder.no_brotli(); - }; - - #[cfg(not(feature = "zstd_compression"))] - { - builder = builder.no_zstd(); - }; - - match config.proxy.to_proxy()? { - | Some(proxy) => Ok(builder.proxy(proxy)), - | _ => Ok(builder), - } -} - -#[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] -fn builder_interface( - builder: reqwest::ClientBuilder, - config: Option<&str>, -) -> Result { - if let Some(iface) = config { - Ok(builder.interface(iface)) - } else { - Ok(builder) - } -} - -#[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))] -fn builder_interface( - builder: reqwest::ClientBuilder, - config: Option<&str>, -) -> Result { - use conduwuit::Err; - - if let Some(iface) = config { - Err!("Binding to network-interface {iface:?} by name is not supported on this platform.") - } else { - Ok(builder) - } -} - -#[inline] -#[must_use] -#[implement(Service)] -pub fn valid_cidr_range(&self, ip: &IPAddress) -> bool { - self.cidr_range_denylist - .iter() - .all(|cidr| !cidr.includes(ip)) -} diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs deleted file mode 100644 index fd0d8764..00000000 --- a/src/service/config/mod.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::{iter, ops::Deref, path::Path, sync::Arc}; - -use async_trait::async_trait; -use conduwuit::{ - Result, Server, - config::{Config, check}, - error, implement, -}; - -pub struct Service { - server: Arc, -} - -const SIGNAL: &str = "SIGUSR1"; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { server: args.server.clone() })) - } - - async fn worker(self: Arc) -> Result { - while self.server.running() { - if self.server.signal.subscribe().recv().await == Ok(SIGNAL) { - if let Err(e) = self.handle_reload() { - error!("Failed to reload config: {e}"); - } - } - } - - Ok(()) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Deref for Service { - type Target = Arc; - - #[inline] - fn deref(&self) -> &Self::Target { &self.server.config } -} - -#[implement(Service)] -fn handle_reload(&self) -> Result { - if self.server.config.config_reload_signal { - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) - .expect("failed to notify systemd of reloading state"); - - self.reload(iter::empty())?; - - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) - .expect("failed to notify systemd of ready state"); - } - - Ok(()) -} - -#[implement(Service)] -pub fn reload<'a, I>(&self, paths: I) -> Result> -where - I: Iterator, -{ - let old = self.server.config.clone(); - let new = Config::load(paths).and_then(|raw| Config::new(&raw))?; - - check::reload(&old, &new)?; - self.server.config.update(new) -} diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs deleted file mode 100644 index 3a61f710..00000000 --- a/src/service/emergency/mod.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use async_trait::async_trait; -use conduwuit::{Result, error, warn}; -use ruma::{ - events::{ - GlobalAccountDataEvent, GlobalAccountDataEventType, push_rules::PushRulesEventContent, - }, - push::Ruleset, -}; - -use crate::{Dep, account_data, config, globals, users}; - -pub struct Service { - services: Services, -} - -struct Services { - account_data: Dep, - config: Dep, - globals: Dep, - users: Dep, -} - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - account_data: args.depend::("account_data"), - config: args.depend::("config"), - - globals: args.depend::("globals"), - users: args.depend::("users"), - }, - })) - } - - async fn worker(self: Arc) -> Result { - if self.services.globals.is_read_only() { - return Ok(()); - } - - self.set_emergency_access().await.inspect_err(|e| { - error!("Could not set the configured emergency password for the server user: {e}"); - }) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - /// Sets the emergency password and push rules for the server user account - /// in case emergency password is set - async fn set_emergency_access(&self) -> Result { - let server_user = &self.services.globals.server_user; - - self.services - .users - .set_password(server_user, self.services.config.emergency_password.as_deref())?; - - let (ruleset, pwd_set) = match self.services.config.emergency_password { - | Some(_) => (Ruleset::server_default(server_user), true), - | None => (Ruleset::new(), false), - }; - - self.services - .account_data - .update( - None, - server_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(&GlobalAccountDataEvent { - content: PushRulesEventContent { global: ruleset }, - }) - .expect("to json value always works"), - ) - .await?; - - if pwd_set { - warn!( - "The server account emergency password is set! Please unset it as soon as you \ - finish admin account recovery! You will be logged out of the server service \ - account when you finish." - ); - Ok(()) - } else { - // logs out any users still in the server service account and removes sessions - self.services.users.deactivate_account(server_user).await - } - } -} diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs deleted file mode 100644 index 1d1d1154..00000000 --- a/src/service/federation/execute.rs +++ /dev/null @@ -1,296 +0,0 @@ -use std::{fmt::Debug, mem}; - -use bytes::Bytes; -use conduwuit::{ - Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, - error::inspect_debug_log, implement, trace, utils::string::EMPTY, -}; -use http::{HeaderValue, header::AUTHORIZATION}; -use ipaddress::IPAddress; -use reqwest::{Client, Method, Request, Response, Url}; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, - api::{ - EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - client::error::Error as RumaError, federation::authentication::XMatrix, - }, - serde::Base64, -}; - -use crate::resolver::actual::ActualDest; - -/// Sends a request to a federation server -#[implement(super::Service)] -#[tracing::instrument(skip_all, name = "request", level = "debug")] -pub async fn execute(&self, dest: &ServerName, request: T) -> Result -where - T: OutgoingRequest + Debug + Send, -{ - let client = &self.services.client.federation; - self.execute_on(client, dest, request).await -} - -/// Like execute() but with a very large timeout -#[implement(super::Service)] -#[tracing::instrument(skip_all, name = "synapse", level = "debug")] -pub async fn execute_synapse( - &self, - dest: &ServerName, - request: T, -) -> Result -where - T: OutgoingRequest + Debug + Send, -{ - let client = &self.services.client.synapse; - self.execute_on(client, dest, request).await -} - -#[implement(super::Service)] -#[tracing::instrument( - name = "fed", - level = INFO_SPAN_LEVEL, - skip(self, client, request), - )] -pub async fn execute_on( - &self, - client: &Client, - dest: &ServerName, - request: T, -) -> Result -where - T: OutgoingRequest + Send, -{ - if !self.services.server.config.allow_federation { - return Err!(Config("allow_federation", "Federation is disabled.")); - } - - if self.services.moderation.is_remote_server_forbidden(dest) { - return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); - } - - let actual = self.services.resolver.get_actual_dest(dest).await?; - let request = into_http_request::(&actual, request)?; - let request = self.prepare(dest, request)?; - self.perform::(dest, &actual, request, client).await -} - -#[implement(super::Service)] -async fn perform( - &self, - dest: &ServerName, - actual: &ActualDest, - request: Request, - client: &Client, -) -> Result -where - T: OutgoingRequest + Send, -{ - let url = request.url().clone(); - let method = request.method().clone(); - - debug!(?method, ?url, "Sending request"); - match client.execute(request).await { - | Ok(response) => handle_response::(dest, actual, &method, &url, response).await, - | Err(error) => - Err(handle_error(actual, &method, &url, error).expect_err("always returns error")), - } -} - -#[implement(super::Service)] -fn prepare(&self, dest: &ServerName, mut request: http::Request>) -> Result { - self.sign_request(&mut request, dest); - - let request = Request::try_from(request)?; - self.validate_url(request.url())?; - self.services.server.check_running()?; - - Ok(request) -} - -#[implement(super::Service)] -fn validate_url(&self, url: &Url) -> Result<()> { - if let Some(url_host) = url.host_str() { - if let Ok(ip) = IPAddress::parse(url_host) { - trace!("Checking request URL IP {ip:?}"); - self.services.resolver.validate_ip(&ip)?; - } - } - - Ok(()) -} - -async fn handle_response( - dest: &ServerName, - actual: &ActualDest, - method: &Method, - url: &Url, - response: Response, -) -> Result -where - T: OutgoingRequest + Send, -{ - let response = into_http_response(dest, actual, method, url, response).await?; - - T::IncomingResponse::try_from_http_response(response) - .map_err(|e| err!(BadServerResponse("Server returned bad 200 response: {e:?}"))) -} - -async fn into_http_response( - dest: &ServerName, - actual: &ActualDest, - method: &Method, - url: &Url, - mut response: Response, -) -> Result> { - let status = response.status(); - trace!( - ?status, ?method, - request_url = ?url, - response_url = ?response.url(), - "Received response from {}", - actual.string(), - ); - - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - // TODO: handle timeout - trace!("Waiting for response body..."); - let body = response - .bytes() - .await - .inspect_err(inspect_debug_log) - .unwrap_or_else(|_| Vec::new().into()); - - let http_response = http_response_builder - .body(body) - .expect("reqwest body is valid http body"); - - debug!("Got {status:?} for {method} {url}"); - if !status.is_success() { - return Err(Error::Federation( - dest.to_owned(), - RumaError::from_http_response(http_response), - )); - } - - Ok(http_response) -} - -fn handle_error( - actual: &ActualDest, - method: &Method, - url: &Url, - mut e: reqwest::Error, -) -> Result { - if e.is_timeout() || e.is_connect() { - e = e.without_url(); - debug_warn!("{e:?}"); - } else if e.is_redirect() { - debug_error!( - method = ?method, - url = ?url, - final_url = ?e.url(), - "Redirect loop {}: {}", - actual.host, - e, - ); - } else { - debug_error!("{e:?}"); - } - - Err(e.into()) -} - -#[implement(super::Service)] -fn sign_request(&self, http_request: &mut http::Request>, dest: &ServerName) { - type Member = (String, Value); - type Value = CanonicalJsonValue; - type Object = CanonicalJsonObject; - - let origin = &self.services.server.name; - let body = http_request.body(); - let uri = http_request - .uri() - .path_and_query() - .expect("http::Request missing path_and_query"); - - let mut req: Object = if !body.is_empty() { - let content: CanonicalJsonValue = - serde_json::from_slice(body).expect("failed to serialize body"); - - let authorization: [Member; 5] = [ - ("content".into(), content), - ("destination".into(), dest.as_str().into()), - ("method".into(), http_request.method().as_str().into()), - ("origin".into(), origin.as_str().into()), - ("uri".into(), uri.to_string().into()), - ]; - - authorization.into() - } else { - let authorization: [Member; 4] = [ - ("destination".into(), dest.as_str().into()), - ("method".into(), http_request.method().as_str().into()), - ("origin".into(), origin.as_str().into()), - ("uri".into(), uri.to_string().into()), - ]; - - authorization.into() - }; - - self.services - .server_keys - .sign_json(&mut req) - .expect("request signing failed"); - - let signatures = req["signatures"] - .as_object() - .and_then(|object| object[origin.as_str()].as_object()) - .expect("origin signatures object"); - - let key: &ServerSigningKeyId = signatures - .keys() - .next() - .map(|k| k.as_str().try_into()) - .expect("at least one signature from this origin") - .expect("keyid is json string"); - - let sig: Base64 = signatures - .values() - .next() - .map(|s| s.as_str().map(Base64::parse)) - .expect("at least one signature from this origin") - .expect("signature is json string") - .expect("signature is valid base64"); - - let x_matrix = XMatrix::new(origin.into(), dest.into(), key.into(), sig); - let authorization = HeaderValue::from(&x_matrix); - let authorization = http_request - .headers_mut() - .insert(AUTHORIZATION, authorization); - - debug_assert!(authorization.is_none(), "Authorization header already present"); -} - -fn into_http_request(actual: &ActualDest, request: T) -> Result>> -where - T: OutgoingRequest + Send, -{ - const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_11]; - const SATIR: SendAccessToken<'_> = SendAccessToken::IfRequired(EMPTY); - - let http_request = request - .try_into_http_request::>(actual.string().as_str(), SATIR, &VERSIONS) - .map_err(|e| err!(BadServerResponse("Invalid destination: {e:?}")))?; - - Ok(http_request) -} diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs deleted file mode 100644 index 15521875..00000000 --- a/src/service/federation/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -mod execute; - -use std::sync::Arc; - -use conduwuit::{Result, Server}; - -use crate::{Dep, client, moderation, resolver, server_keys}; - -pub struct Service { - services: Services, -} - -struct Services { - server: Arc, - client: Dep, - resolver: Dep, - server_keys: Dep, - moderation: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - server: args.server.clone(), - client: args.depend::("client"), - resolver: args.depend::("resolver"), - server_keys: args.depend::("server_keys"), - moderation: args.depend::("moderation"), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} diff --git a/src/service/globals/client.rs b/src/service/globals/client.rs new file mode 100644 index 00000000..3335cd16 --- /dev/null +++ b/src/service/globals/client.rs @@ -0,0 +1,134 @@ +use std::{sync::Arc, time::Duration}; + +use reqwest::redirect; + +use crate::{service::globals::resolver, Config, Result}; + +pub struct Client { + pub default: reqwest::Client, + pub url_preview: reqwest::Client, + pub well_known: reqwest::Client, + pub federation: reqwest::Client, + pub sender: reqwest::Client, + pub appservice: reqwest::Client, + pub pusher: reqwest::Client, +} + +impl Client { + pub fn new(config: &Config, resolver: &Arc) -> Client { + Client { + default: Self::base(config) + .unwrap() + .dns_resolver(resolver.clone()) + .build() + .unwrap(), + + url_preview: Self::base(config) + .unwrap() + .dns_resolver(resolver.clone()) + .redirect(redirect::Policy::limited(3)) + .build() + .unwrap(), + + well_known: Self::base(config) + .unwrap() + .dns_resolver(resolver.hooked.clone()) + .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) + .timeout(Duration::from_secs(config.well_known_timeout)) + .pool_max_idle_per_host(0) + .redirect(redirect::Policy::limited(4)) + .build() + .unwrap(), + + federation: Self::base(config) + .unwrap() + .dns_resolver(resolver.hooked.clone()) + .timeout(Duration::from_secs(config.federation_timeout)) + .pool_max_idle_per_host(config.federation_idle_per_host.into()) + .pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout)) + .redirect(redirect::Policy::limited(3)) + .build() + .unwrap(), + + sender: Self::base(config) + .unwrap() + .dns_resolver(resolver.hooked.clone()) + .timeout(Duration::from_secs(config.sender_timeout)) + .pool_max_idle_per_host(1) + .pool_idle_timeout(Duration::from_secs(config.sender_idle_timeout)) + .redirect(redirect::Policy::limited(2)) + .build() + .unwrap(), + + appservice: Self::base(config) + .unwrap() + .dns_resolver(resolver.clone()) + .connect_timeout(Duration::from_secs(5)) + .timeout(Duration::from_secs(config.appservice_timeout)) + .pool_max_idle_per_host(1) + .pool_idle_timeout(Duration::from_secs(config.appservice_idle_timeout)) + .redirect(redirect::Policy::limited(2)) + .build() + .unwrap(), + + pusher: Self::base(config) + .unwrap() + .dns_resolver(resolver.clone()) + .pool_max_idle_per_host(1) + .pool_idle_timeout(Duration::from_secs(config.pusher_idle_timeout)) + .redirect(redirect::Policy::limited(2)) + .build() + .unwrap(), + } + } + + fn base(config: &Config) -> Result { + let version = match option_env!("CONDUIT_VERSION_EXTRA") { + Some(extra) => format!("{} ({})", env!("CARGO_PKG_VERSION"), extra), + None => env!("CARGO_PKG_VERSION").to_owned(), + }; + + let mut builder = reqwest::Client::builder() + .hickory_dns(true) + .timeout(Duration::from_secs(config.request_timeout)) + .connect_timeout(Duration::from_secs(config.request_conn_timeout)) + .pool_max_idle_per_host(config.request_idle_per_host.into()) + .pool_idle_timeout(Duration::from_secs(config.request_idle_timeout)) + .user_agent("Conduwuit".to_owned() + "/" + &version) + .redirect(redirect::Policy::limited(6)); + + #[cfg(feature = "gzip_compression")] + { + builder = if config.gzip_compression { + builder.gzip(true) + } else { + builder.gzip(false).no_gzip() + }; + }; + + #[cfg(feature = "brotli_compression")] + { + builder = if config.brotli_compression { + builder.brotli(true) + } else { + builder.brotli(false).no_brotli() + }; + }; + + #[cfg(not(feature = "gzip_compression"))] + { + builder = builder.no_gzip(); + }; + + #[cfg(not(feature = "brotli_compression"))] + { + builder = builder.no_brotli(); + }; + + if let Some(proxy) = config.proxy.to_proxy()? { + Ok(builder.proxy(proxy)) + } else { + Ok(builder) + } + } +} diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 21c09252..15c29094 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,75 +1,41 @@ -use std::sync::{Arc, RwLock}; +use std::{collections::BTreeMap, error::Error}; -use conduwuit::{Result, utils}; -use database::{Database, Deserialized, Map}; +use async_trait::async_trait; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, OwnedServerSigningKeyId, ServerName, UserId, +}; -pub struct Data { - global: Arc, - counter: RwLock, - pub(super) db: Arc, -} - -const COUNTER: &[u8] = b"c"; - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - global: db["global"].clone(), - counter: RwLock::new( - Self::stored_count(&db["global"]).expect("initialized global counter"), - ), - db: args.db.clone(), - } - } - - pub fn next_count(&self) -> Result { - let _cork = self.db.cork(); - let mut lock = self.counter.write().expect("locked"); - let counter: &mut u64 = &mut lock; - debug_assert!( - *counter == Self::stored_count(&self.global).expect("database failure"), - "counter mismatch" - ); - - *counter = counter - .checked_add(1) - .expect("counter must not overflow u64"); - - self.global.insert(COUNTER, counter.to_be_bytes()); - - Ok(*counter) - } - - #[inline] - pub fn current_count(&self) -> u64 { - let lock = self.counter.read().expect("locked"); - let counter: &u64 = &lock; - debug_assert!( - *counter == Self::stored_count(&self.global).expect("database failure"), - "counter mismatch" - ); - - *counter - } - - fn stored_count(global: &Arc) -> Result { - global - .get_blocking(COUNTER) - .as_deref() - .map_or(Ok(0_u64), utils::u64_from_bytes) - } - - pub async fn database_version(&self) -> u64 { - self.global - .get(b"version") - .await - .deserialized() - .unwrap_or(0) - } - - #[inline] - pub fn bump_database_version(&self, new_version: u64) { - self.global.raw_put(b"version", new_version); - } +use crate::{database::Cork, Result}; + +#[async_trait] +pub trait Data: Send + Sync { + fn next_count(&self) -> Result; + fn current_count(&self) -> Result; + fn last_check_for_updates_id(&self) -> Result; + fn update_check_for_updates_id(&self, id: u64) -> Result<()>; + #[allow(unused_qualifications)] // async traits + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + fn cleanup(&self) -> Result<()>; + fn flush(&self) -> Result<()>; + fn cork(&self) -> Result; + fn cork_and_flush(&self) -> Result; + fn cork_and_sync(&self) -> Result; + fn memory_usage(&self) -> String; + fn clear_caches(&self, amount: u32); + fn load_keypair(&self) -> Result; + fn remove_keypair(&self) -> Result<()>; + fn add_signing_key( + &self, origin: &ServerName, new_keys: ServerSigningKeys, + ) -> Result>; + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found + /// for the server. + fn signing_keys_for(&self, origin: &ServerName) -> Result>; + fn database_version(&self) -> Result; + fn bump_database_version(&self, new_version: u64) -> Result<()>; + fn backup(&self) -> Result<(), Box> { unimplemented!() } + fn backup_list(&self) -> Result { Ok(String::new()) } + fn file_list(&self) -> Result { Ok(String::new()) } } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index a23a4c21..874ba22e 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,187 +1,446 @@ -mod data; - use std::{ - collections::HashMap, - fmt::Write, - sync::{Arc, RwLock}, + collections::{BTreeMap, HashMap}, + fs, + future::Future, + path::PathBuf, + sync::{ + atomic::{self, AtomicBool}, + Arc, + }, time::Instant, }; -use async_trait::async_trait; -use conduwuit::{Result, Server, error, utils::bytes::pretty}; -use data::Data; +use argon2::Argon2; +use base64::{engine::general_purpose, Engine as _}; +pub use data::Data; +use hickory_resolver::TokioAsyncResolver; use regex::RegexSet; -use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; +use ruma::{ + api::{ + client::{discovery::discover_support::ContactRole, sync::sync_events}, + federation::discovery::{ServerSigningKeys, VerifyKey}, + }, + serde::Base64, + DeviceId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, + RoomVersionId, ServerName, UserId, +}; +use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore}; +use tracing::{error, info}; +use tracing_subscriber::{EnvFilter, Registry}; +use url::Url; -use crate::service; +use crate::{services, Config, Result}; -pub struct Service { - pub db: Data, - server: Arc, - - pub bad_event_ratelimiter: Arc>>, - pub server_user: OwnedUserId, - pub admin_alias: OwnedRoomAliasId, - pub turn_secret: String, - pub registration_token: Option, -} +pub mod client; +mod data; +pub mod resolver; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +type SyncHandle = ( + Option, // since + Receiver>>, // rx +); -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - let db = Data::new(&args); - let config = &args.server.config; +pub struct Service<'a> { + pub db: &'static dyn Data, - let turn_secret = - config - .turn_secret_file - .as_ref() - .map_or(config.turn_secret.clone(), |path| { - std::fs::read_to_string(path).unwrap_or_else(|e| { - error!("Failed to read the TURN secret file: {e}"); + pub tracing_reload_handle: tracing_subscriber::reload::Handle, + pub config: Config, + keypair: Arc, + jwt_decoding_key: Option, + pub resolver: Arc, + pub client: client::Client, + pub stable_room_versions: Vec, + pub unstable_room_versions: Vec, + pub bad_event_ratelimiter: Arc>>, + pub bad_signature_ratelimiter: Arc, RateLimitState>>>, + pub bad_query_ratelimiter: Arc>>, + pub servername_ratelimiter: Arc>>>, + pub sync_receivers: RwLock>, + pub roomid_mutex_insert: RwLock>>>, + pub roomid_mutex_state: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub roomid_federationhandletime: RwLock>, + pub stateres_mutex: Arc>, + pub(crate) rotate: RotationHandler, - config.turn_secret.clone() - }) - }); - - let registration_token = config.registration_token_file.as_ref().map_or( - config.registration_token.clone(), - |path| { - let Ok(token) = std::fs::read_to_string(path).inspect_err(|e| { - error!("Failed to read the registration token file: {e}"); - }) else { - return config.registration_token.clone(); - }; - - Some(token) - }, - ); - - Ok(Arc::new(Self { - db, - server: args.server.clone(), - bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &args.server.name)) - .expect("#admins:server_name is valid alias name"), - server_user: UserId::parse_with_server_name( - String::from("conduit"), - &args.server.name, - ) - .expect("@conduit:server_name is valid"), - turn_secret, - registration_token, - })) - } - - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read()?.iter().fold( - (0_usize, 0_usize), - |(mut count, mut bytes), (event_id, _)| { - bytes = bytes.saturating_add(event_id.capacity()); - bytes = bytes.saturating_add(size_of::()); - count = count.saturating_add(1); - (count, bytes) - }, - ); - - writeln!(out, "bad_event_ratelimiter: {ber_count} ({})", pretty(ber_bytes))?; - - Ok(()) - } - - async fn clear_cache(&self) { - self.bad_event_ratelimiter - .write() - .expect("locked for writing") - .clear(); - } - - fn name(&self) -> &str { service::make_name(std::module_path!()) } + pub shutdown: AtomicBool, + pub argon: Argon2<'a>, } -impl Service { - #[inline] +/// Handles "rotation" of long-polling requests. "Rotation" in this context is +/// similar to "rotation" of log files and the like. +/// +/// This is utilized to have sync workers return early and release read locks on +/// the database. +pub(crate) struct RotationHandler(broadcast::Sender<()>, ()); + +impl RotationHandler { + pub fn new() -> Self { + let (s, _r) = broadcast::channel(1); + Self(s, ()) + } + + pub fn watch(&self) -> impl Future { + let mut r = self.0.subscribe(); + + async move { + _ = r.recv().await; + } + } + + pub fn fire(&self) { _ = self.0.send(()); } +} + +impl Default for RotationHandler { + fn default() -> Self { Self::new() } +} + +impl Service<'_> { + pub fn load( + db: &'static dyn Data, config: &Config, + tracing_reload_handle: tracing_subscriber::reload::Handle, + ) -> Result { + let keypair = db.load_keypair(); + + let keypair = match keypair { + Ok(k) => k, + Err(e) => { + error!("Keypair invalid. Deleting..."); + db.remove_keypair()?; + return Err(e); + }, + }; + + let jwt_decoding_key = config + .jwt_secret + .as_ref() + .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); + + let resolver = Arc::new(resolver::Resolver::new(config)); + + // Supported and stable room versions + let stable_room_versions = vec![ + RoomVersionId::V6, + RoomVersionId::V7, + RoomVersionId::V8, + RoomVersionId::V9, + RoomVersionId::V10, + RoomVersionId::V11, + ]; + // Experimental, partially supported room versions + let unstable_room_versions = vec![RoomVersionId::V2, RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; + + // 19456 Kib blocks, iterations = 2, parallelism = 1 for more info https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id + let argon = Argon2::new( + argon2::Algorithm::Argon2id, + argon2::Version::default(), + argon2::Params::new(19456, 2, 1, None).expect("valid parameters"), + ); + + let mut s = Self { + tracing_reload_handle, + db, + config: config.clone(), + keypair: Arc::new(keypair), + resolver: resolver.clone(), + client: client::Client::new(config, &resolver), + jwt_decoding_key, + stable_room_versions, + unstable_room_versions, + bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + roomid_mutex_state: RwLock::new(HashMap::new()), + roomid_mutex_insert: RwLock::new(HashMap::new()), + roomid_mutex_federation: RwLock::new(HashMap::new()), + roomid_federationhandletime: RwLock::new(HashMap::new()), + stateres_mutex: Arc::new(Mutex::new(())), + sync_receivers: RwLock::new(HashMap::new()), + rotate: RotationHandler::new(), + shutdown: AtomicBool::new(false), + argon, + }; + + fs::create_dir_all(s.get_media_folder())?; + + if !s + .supported_room_versions() + .contains(&s.config.default_room_version) + { + error!(config=?s.config.default_room_version, fallback=?crate::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); + s.config.default_room_version = crate::config::default_default_room_version(); + }; + + Ok(s) + } + + /// Returns this server's keypair. + pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { &self.keypair } + + #[tracing::instrument(skip(self))] pub fn next_count(&self) -> Result { self.db.next_count() } - #[inline] - pub fn current_count(&self) -> Result { Ok(self.db.current_count()) } + #[tracing::instrument(skip(self))] + pub fn current_count(&self) -> Result { self.db.current_count() } - #[inline] - pub fn server_name(&self) -> &ServerName { self.server.name.as_ref() } + #[tracing::instrument(skip(self))] + pub fn last_check_for_updates_id(&self) -> Result { self.db.last_check_for_updates_id() } + + #[tracing::instrument(skip(self))] + pub fn update_check_for_updates_id(&self, id: u64) -> Result<()> { self.db.update_check_for_updates_id(id) } + + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + self.db.watch(user_id, device_id).await + } + + pub fn cleanup(&self) -> Result<()> { self.db.cleanup() } + + pub fn flush(&self) -> Result<()> { self.db.flush() } + + pub fn server_name(&self) -> &ServerName { self.config.server_name.as_ref() } + + pub fn max_request_size(&self) -> u32 { self.config.max_request_size } + + pub fn max_fetch_prev_events(&self) -> u16 { self.config.max_fetch_prev_events } + + pub fn allow_registration(&self) -> bool { self.config.allow_registration } + + pub fn allow_guest_registration(&self) -> bool { self.config.allow_guest_registration } + + pub fn allow_guests_auto_join_rooms(&self) -> bool { self.config.allow_guests_auto_join_rooms } + + pub fn log_guest_registrations(&self) -> bool { self.config.log_guest_registrations } + + pub fn allow_encryption(&self) -> bool { self.config.allow_encryption } + + pub fn allow_federation(&self) -> bool { self.config.allow_federation } pub fn allow_public_room_directory_over_federation(&self) -> bool { - self.server - .config - .allow_public_room_directory_over_federation + self.config.allow_public_room_directory_over_federation } - pub fn allow_device_name_federation(&self) -> bool { - self.server.config.allow_device_name_federation + pub fn allow_public_room_directory_without_auth(&self) -> bool { + self.config.allow_public_room_directory_without_auth } - pub fn allow_room_creation(&self) -> bool { self.server.config.allow_room_creation } + pub fn allow_device_name_federation(&self) -> bool { self.config.allow_device_name_federation } - pub fn new_user_displayname_suffix(&self) -> &String { - &self.server.config.new_user_displayname_suffix + pub fn allow_room_creation(&self) -> bool { self.config.allow_room_creation } + + pub fn allow_unstable_room_versions(&self) -> bool { self.config.allow_unstable_room_versions } + + pub fn default_room_version(&self) -> RoomVersionId { self.config.default_room_version.clone() } + + pub fn new_user_displayname_suffix(&self) -> &String { &self.config.new_user_displayname_suffix } + + pub fn allow_check_for_updates(&self) -> bool { self.config.allow_check_for_updates } + + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } + + pub fn query_trusted_key_servers_first(&self) -> bool { self.config.query_trusted_key_servers_first } + + pub fn dns_resolver(&self) -> &TokioAsyncResolver { &self.resolver.resolver } + + pub fn query_all_nameservers(&self) -> bool { self.config.query_all_nameservers } + + pub fn actual_destinations(&self) -> &Arc> { &self.resolver.destinations } + + pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { self.jwt_decoding_key.as_ref() } + + pub fn turn_password(&self) -> &String { &self.config.turn_password } + + pub fn turn_ttl(&self) -> u64 { self.config.turn_ttl } + + pub fn turn_uris(&self) -> &[String] { &self.config.turn_uris } + + pub fn turn_username(&self) -> &String { &self.config.turn_username } + + pub fn turn_secret(&self) -> &String { &self.config.turn_secret } + + pub fn auto_join_rooms(&self) -> &[OwnedRoomId] { &self.config.auto_join_rooms } + + pub fn allow_profile_lookup_federation_requests(&self) -> bool { + self.config.allow_profile_lookup_federation_requests } - pub fn allow_announcements_check(&self) -> bool { - self.server.config.allow_announcements_check - } + pub fn notification_push_path(&self) -> &String { &self.config.notification_push_path } - pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.server.config.trusted_servers } - - pub fn turn_password(&self) -> &String { &self.server.config.turn_password } - - pub fn turn_ttl(&self) -> u64 { self.server.config.turn_ttl } - - pub fn turn_uris(&self) -> &[String] { &self.server.config.turn_uris } - - pub fn turn_username(&self) -> &String { &self.server.config.turn_username } - - pub fn notification_push_path(&self) -> &String { &self.server.config.notification_push_path } + pub fn emergency_password(&self) -> &Option { &self.config.emergency_password } pub fn url_preview_domain_contains_allowlist(&self) -> &Vec { - &self.server.config.url_preview_domain_contains_allowlist + &self.config.url_preview_domain_contains_allowlist } pub fn url_preview_domain_explicit_allowlist(&self) -> &Vec { - &self.server.config.url_preview_domain_explicit_allowlist + &self.config.url_preview_domain_explicit_allowlist } pub fn url_preview_domain_explicit_denylist(&self) -> &Vec { - &self.server.config.url_preview_domain_explicit_denylist + &self.config.url_preview_domain_explicit_denylist } - pub fn url_preview_url_contains_allowlist(&self) -> &Vec { - &self.server.config.url_preview_url_contains_allowlist + pub fn url_preview_url_contains_allowlist(&self) -> &Vec { &self.config.url_preview_url_contains_allowlist } + + pub fn url_preview_max_spider_size(&self) -> usize { self.config.url_preview_max_spider_size } + + pub fn url_preview_check_root_domain(&self) -> bool { self.config.url_preview_check_root_domain } + + pub fn forbidden_alias_names(&self) -> &RegexSet { &self.config.forbidden_alias_names } + + pub fn forbidden_usernames(&self) -> &RegexSet { &self.config.forbidden_usernames } + + pub fn allow_local_presence(&self) -> bool { self.config.allow_local_presence } + + pub fn allow_incoming_presence(&self) -> bool { self.config.allow_incoming_presence } + + pub fn allow_outgoing_presence(&self) -> bool { self.config.allow_outgoing_presence } + + pub fn presence_idle_timeout_s(&self) -> u64 { self.config.presence_idle_timeout_s } + + pub fn presence_offline_timeout_s(&self) -> u64 { self.config.presence_offline_timeout_s } + + pub fn allow_incoming_read_receipts(&self) -> bool { self.config.allow_incoming_read_receipts } + + pub fn allow_outgoing_read_receipts(&self) -> bool { self.config.allow_outgoing_read_receipts } + + pub fn rocksdb_log_level(&self) -> &String { &self.config.rocksdb_log_level } + + pub fn rocksdb_max_log_file_size(&self) -> usize { self.config.rocksdb_max_log_file_size } + + pub fn rocksdb_log_time_to_roll(&self) -> usize { self.config.rocksdb_log_time_to_roll } + + pub fn rocksdb_optimize_for_spinning_disks(&self) -> bool { self.config.rocksdb_optimize_for_spinning_disks } + + pub fn rocksdb_parallelism_threads(&self) -> usize { self.config.rocksdb_parallelism_threads } + + pub fn rocksdb_compression_algo(&self) -> &String { &self.config.rocksdb_compression_algo } + + pub fn rocksdb_compression_level(&self) -> i32 { self.config.rocksdb_compression_level } + + pub fn rocksdb_bottommost_compression_level(&self) -> i32 { self.config.rocksdb_bottommost_compression_level } + + pub fn prevent_media_downloads_from(&self) -> &[OwnedServerName] { &self.config.prevent_media_downloads_from } + + pub fn forbidden_remote_server_names(&self) -> &[OwnedServerName] { &self.config.forbidden_remote_server_names } + + pub fn forbidden_remote_room_directory_server_names(&self) -> &[OwnedServerName] { + &self.config.forbidden_remote_room_directory_server_names } - pub fn url_preview_max_spider_size(&self) -> usize { - self.server.config.url_preview_max_spider_size + pub fn ip_range_denylist(&self) -> &[String] { &self.config.ip_range_denylist } + + pub fn well_known_support_page(&self) -> &Option { &self.config.well_known.support_page } + + pub fn well_known_support_role(&self) -> &Option { &self.config.well_known.support_role } + + pub fn well_known_support_email(&self) -> &Option { &self.config.well_known.support_email } + + pub fn well_known_support_mxid(&self) -> &Option { &self.config.well_known.support_mxid } + + pub fn block_non_admin_invites(&self) -> bool { self.config.block_non_admin_invites } + + pub fn supported_room_versions(&self) -> Vec { + let mut room_versions: Vec = vec![]; + room_versions.extend(self.stable_room_versions.clone()); + if self.allow_unstable_room_versions() { + room_versions.extend(self.unstable_room_versions.clone()); + }; + room_versions } - pub fn url_preview_check_root_domain(&self) -> bool { - self.server.config.url_preview_check_root_domain + /// TODO: the key valid until timestamp (`valid_until_ts`) is only honored + /// in room version > 4 + /// + /// Remove the outdated keys and insert the new ones. + /// + /// This doesn't actually check that the keys provided are newer than the + /// old set. + pub fn add_signing_key( + &self, origin: &ServerName, new_keys: ServerSigningKeys, + ) -> Result> { + self.db.add_signing_key(origin, new_keys) } - pub fn forbidden_alias_names(&self) -> &RegexSet { &self.server.config.forbidden_alias_names } + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found + /// for the server. + pub fn signing_keys_for(&self, origin: &ServerName) -> Result> { + let mut keys = self.db.signing_keys_for(origin)?; + if origin == self.server_name() { + keys.insert( + format!("ed25519:{}", services().globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), + VerifyKey { + key: Base64::new(self.keypair.public_key().to_vec()), + }, + ); + } - pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } - - /// checks if `user_id` is local to us via server_name comparison - #[inline] - pub fn user_is_local(&self, user_id: &UserId) -> bool { - self.server_is_ours(user_id.server_name()) + Ok(keys) } - #[inline] - pub fn server_is_ours(&self, server_name: &ServerName) -> bool { - server_name == self.server_name() + pub fn database_version(&self) -> Result { self.db.database_version() } + + pub fn bump_database_version(&self, new_version: u64) -> Result<()> { self.db.bump_database_version(new_version) } + + pub fn get_media_folder(&self) -> PathBuf { + let mut r = PathBuf::new(); + r.push(self.config.database_path.clone()); + r.push("media"); + r } - #[inline] - pub fn is_read_only(&self) -> bool { self.db.db.is_read_only() } + /// new SHA256 file name media function, requires "sha256_media" feature + /// flag enabled and database migrated uses SHA256 hash of the base64 key as + /// the file name + #[cfg(feature = "sha256_media")] + pub fn get_media_file_new(&self, key: &[u8]) -> PathBuf { + let mut r = PathBuf::new(); + r.push(self.config.database_path.clone()); + r.push("media"); + // Using the hash of the base64 key as the filename + // This is to prevent the total length of the path from exceeding the maximum + // length in most filesystems + r.push(general_purpose::URL_SAFE_NO_PAD.encode(::digest(key))); + r + } + + /// old base64 file name media function + /// This is the old version of `get_media_file` that uses the full base64 + /// key as the filename. + pub fn get_media_file(&self, key: &[u8]) -> PathBuf { + let mut r = PathBuf::new(); + r.push(self.config.database_path.clone()); + r.push("media"); + r.push(general_purpose::URL_SAFE_NO_PAD.encode(key)); + r + } + + pub fn well_known_client(&self) -> &Option { &self.config.well_known.client } + + pub fn well_known_server(&self) -> &Option { &self.config.well_known.server } + + pub fn unix_socket_path(&self) -> &Option { &self.config.unix_socket_path } + + pub fn shutdown(&self) { + self.shutdown.store(true, atomic::Ordering::Relaxed); + // On shutdown + + if self.unix_socket_path().is_some() { + match &self.unix_socket_path() { + Some(path) => { + fs::remove_file(path).unwrap(); + }, + None => error!( + "Unable to remove socket file at {:?} during shutdown.", + &self.unix_socket_path() + ), + }; + }; + + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + services().globals.rotate.fire(); + } } diff --git a/src/service/globals/resolver.rs b/src/service/globals/resolver.rs new file mode 100644 index 00000000..d958309d --- /dev/null +++ b/src/service/globals/resolver.rs @@ -0,0 +1,124 @@ +use std::{ + collections::HashMap, + future, iter, + net::{IpAddr, SocketAddr}, + sync::{Arc, RwLock as StdRwLock}, + time::Duration, +}; + +use hickory_resolver::TokioAsyncResolver; +use hyper::client::connect::dns::Name; +use reqwest::dns::{Addrs, Resolve, Resolving}; +use ruma::OwnedServerName; +use tokio::sync::RwLock; +use tracing::error; + +use crate::{service::sending::FedDest, Config, Error}; + +pub type WellKnownMap = HashMap; +pub type TlsNameMap = HashMap, u16)>; + +pub struct Resolver { + pub destinations: Arc>, // actual_destination, host + pub overrides: Arc>, + pub resolver: Arc, + pub hooked: Arc, +} + +pub struct Hooked { + pub overrides: Arc>, + pub resolver: Arc, +} + +impl Resolver { + pub(crate) fn new(config: &Config) -> Self { + let (sys_conf, mut opts) = hickory_resolver::system_conf::read_system_conf() + .map_err(|e| { + error!("Failed to set up hickory dns resolver with system config: {}", e); + Error::bad_config("Failed to set up hickory dns resolver with system config.") + }) + .unwrap(); + + let mut conf = hickory_resolver::config::ResolverConfig::new(); + + if let Some(domain) = sys_conf.domain() { + conf.set_domain(domain.clone()); + } + + for sys_conf in sys_conf.search() { + conf.add_search(sys_conf.clone()); + } + + for sys_conf in sys_conf.name_servers() { + let mut ns = sys_conf.clone(); + + ns.trust_negative_responses = !config.query_all_nameservers; + + conf.add_name_server(ns); + } + + opts.cache_size = config.dns_cache_entries as usize; + opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain)); + opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30)); + opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl)); + opts.positive_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 7)); + opts.timeout = Duration::from_secs(config.dns_timeout); + opts.attempts = config.dns_attempts as usize; + opts.try_tcp_on_error = config.dns_tcp_fallback; + opts.num_concurrent_reqs = 1; + opts.shuffle_dns_servers = true; + opts.rotate = true; + + let resolver = Arc::new(TokioAsyncResolver::tokio(conf, opts)); + let overrides = Arc::new(StdRwLock::new(TlsNameMap::new())); + Resolver { + destinations: Arc::new(RwLock::new(WellKnownMap::new())), + overrides: overrides.clone(), + resolver: resolver.clone(), + hooked: Arc::new(Hooked { + overrides, + resolver, + }), + } + } +} + +impl Resolve for Resolver { + fn resolve(&self, name: Name) -> Resolving { resolve_to_reqwest(self.resolver.clone(), name) } +} + +impl Resolve for Hooked { + fn resolve(&self, name: Name) -> Resolving { + self.overrides + .read() + .unwrap() + .get(name.as_str()) + .map_or_else( + || resolve_to_reqwest(self.resolver.clone(), name), + |(override_name, port)| cached_to_reqwest(override_name, *port), + ) + } +} + +fn cached_to_reqwest(override_name: &[IpAddr], port: u16) -> Resolving { + override_name + .first() + .map(|first_name| -> Resolving { + let saddr = SocketAddr::new(*first_name, port); + let result: Box + Send> = Box::new(iter::once(saddr)); + Box::pin(future::ready(Ok(result))) + }) + .unwrap() +} + +fn resolve_to_reqwest(resolver: Arc, name: Name) -> Resolving { + Box::pin(async move { + let results = resolver + .lookup_ip(name.as_str()) + .await? + .into_iter() + .map(|ip| SocketAddr::new(ip, 0)); + + Ok(Box::new(results) as Addrs) + }) +} diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs new file mode 100644 index 00000000..ac595a6b --- /dev/null +++ b/src/service/key_backups/data.rs @@ -0,0 +1,47 @@ +use std::collections::BTreeMap; + +use ruma::{ + api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + serde::Raw, + OwnedRoomId, RoomId, UserId, +}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result; + + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()>; + + fn update_backup(&self, user_id: &UserId, version: &str, backup_metadata: &Raw) -> Result; + + fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; + + fn get_latest_backup(&self, user_id: &UserId) -> Result)>>; + + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>>; + + fn add_key( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, + ) -> Result<()>; + + fn count_keys(&self, user_id: &UserId, version: &str) -> Result; + + fn get_etag(&self, user_id: &UserId, version: &str) -> Result; + + fn get_all(&self, user_id: &UserId, version: &str) -> Result>; + + fn get_room( + &self, user_id: &UserId, version: &str, room_id: &RoomId, + ) -> Result>>; + + fn get_session( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, + ) -> Result>>; + + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; + + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()>; + + fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) -> Result<()>; +} diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 1bf048ef..f4bb5c3b 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,298 +1,83 @@ -use std::{collections::BTreeMap, sync::Arc}; +mod data; +use std::collections::BTreeMap; -use conduwuit::{ - Err, Result, err, implement, - utils::stream::{ReadyExt, TryIgnore}, -}; -use database::{Deserialized, Ignore, Interfix, Json, Map}; -use futures::StreamExt; +pub(crate) use data::Data; use ruma::{ - OwnedRoomId, RoomId, UserId, api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, + OwnedRoomId, RoomId, UserId, }; -use crate::{Dep, globals}; +use crate::Result; pub struct Service { - db: Data, - services: Services, + pub db: &'static dyn Data, } -struct Data { - backupid_algorithm: Arc, - backupid_etag: Arc, - backupkeyid_backup: Arc, -} - -struct Services { - globals: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - backupid_algorithm: args.db["backupid_algorithm"].clone(), - backupid_etag: args.db["backupid_etag"].clone(), - backupkeyid_backup: args.db["backupkeyid_backup"].clone(), - }, - services: Services { - globals: args.depend::("globals"), - }, - })) +impl Service { + pub fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw) -> Result { + self.db.create_backup(user_id, backup_metadata) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -pub fn create_backup( - &self, - user_id: &UserId, - backup_metadata: &Raw, -) -> Result { - let version = self.services.globals.next_count()?.to_string(); - let count = self.services.globals.next_count()?; - - let key = (user_id, &version); - self.db.backupid_algorithm.put(key, Json(backup_metadata)); - - self.db.backupid_etag.put(key, count); - - Ok(version) -} - -#[implement(Service)] -pub async fn delete_backup(&self, user_id: &UserId, version: &str) { - let key = (user_id, version); - self.db.backupid_algorithm.del(key); - self.db.backupid_etag.del(key); - - let key = (user_id, version, Interfix); - self.db - .backupkeyid_backup - .keys_prefix_raw(&key) - .ignore_err() - .ready_for_each(|outdated_key| { - self.db.backupkeyid_backup.remove(outdated_key); - }) - .await; -} - -#[implement(Service)] -pub async fn update_backup<'a>( - &self, - user_id: &UserId, - version: &'a str, - backup_metadata: &Raw, -) -> Result<&'a str> { - let key = (user_id, version); - if self.db.backupid_algorithm.qry(&key).await.is_err() { - return Err!(Request(NotFound("Tried to update nonexistent backup."))); + pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { + self.db.delete_backup(user_id, version) } - let count = self.services.globals.next_count().unwrap(); - self.db.backupid_etag.put(key, count); - self.db - .backupid_algorithm - .put_raw(key, backup_metadata.json().get()); - - Ok(version) -} - -#[implement(Service)] -pub async fn get_latest_backup_version(&self, user_id: &UserId) -> Result { - type Key<'a> = (&'a UserId, &'a str); - - let last_possible_key = (user_id, u64::MAX); - self.db - .backupid_algorithm - .rev_keys_from(&last_possible_key) - .ignore_err() - .ready_take_while(|(user_id_, _): &Key<'_>| *user_id_ == user_id) - .map(|(_, version): Key<'_>| version.to_owned()) - .next() - .await - .ok_or_else(|| err!(Request(NotFound("No backup versions found")))) -} - -#[implement(Service)] -pub async fn get_latest_backup( - &self, - user_id: &UserId, -) -> Result<(String, Raw)> { - type Key<'a> = (&'a UserId, &'a str); - type KeyVal<'a> = (Key<'a>, Raw); - - let last_possible_key = (user_id, u64::MAX); - self.db - .backupid_algorithm - .rev_stream_from(&last_possible_key) - .ignore_err() - .ready_take_while(|((user_id_, _), _): &KeyVal<'_>| *user_id_ == user_id) - .map(|((_, version), algorithm): KeyVal<'_>| (version.to_owned(), algorithm)) - .next() - .await - .ok_or_else(|| err!(Request(NotFound("No backup found")))) -} - -#[implement(Service)] -pub async fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { - let key = (user_id, version); - self.db.backupid_algorithm.qry(&key).await.deserialized() -} - -#[implement(Service)] -pub async fn add_key( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - session_id: &str, - key_data: &Raw, -) -> Result<()> { - let key = (user_id, version); - if self.db.backupid_algorithm.qry(&key).await.is_err() { - return Err!(Request(NotFound("Tried to update nonexistent backup."))); + pub fn update_backup( + &self, user_id: &UserId, version: &str, backup_metadata: &Raw, + ) -> Result { + self.db.update_backup(user_id, version, backup_metadata) } - let count = self.services.globals.next_count().unwrap(); - self.db.backupid_etag.put(key, count); + pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + self.db.get_latest_backup_version(user_id) + } - let key = (user_id, version, room_id, session_id); - self.db - .backupkeyid_backup - .put_raw(key, key_data.json().get()); + pub fn get_latest_backup(&self, user_id: &UserId) -> Result)>> { + self.db.get_latest_backup(user_id) + } - Ok(()) -} - -#[implement(Service)] -pub async fn count_keys(&self, user_id: &UserId, version: &str) -> usize { - let prefix = (user_id, version); - self.db - .backupkeyid_backup - .keys_prefix_raw(&prefix) - .count() - .await -} - -#[implement(Service)] -pub async fn get_etag(&self, user_id: &UserId, version: &str) -> String { - let key = (user_id, version); - self.db - .backupid_etag - .qry(&key) - .await - .deserialized::() - .as_ref() - .map(ToString::to_string) - .expect("Backup has no etag.") -} - -#[implement(Service)] -pub async fn get_all( - &self, - user_id: &UserId, - version: &str, -) -> BTreeMap { - type Key<'a> = (Ignore, Ignore, &'a RoomId, &'a str); - type KeyVal<'a> = (Key<'a>, Raw); - - let mut rooms = BTreeMap::::new(); - let default = || RoomKeyBackup { sessions: BTreeMap::new() }; - - let prefix = (user_id, version, Interfix); - self.db - .backupkeyid_backup - .stream_prefix(&prefix) - .ignore_err() - .ready_for_each(|((_, _, room_id, session_id), key_backup_data): KeyVal<'_>| { - rooms - .entry(room_id.into()) - .or_insert_with(default) - .sessions - .insert(session_id.into(), key_backup_data); - }) - .await; - - rooms -} - -#[implement(Service)] -pub async fn get_room( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, -) -> BTreeMap> { - type KeyVal<'a> = ((Ignore, Ignore, Ignore, &'a str), Raw); - - let prefix = (user_id, version, room_id, Interfix); - self.db - .backupkeyid_backup - .stream_prefix(&prefix) - .ignore_err() - .map(|((.., session_id), key_backup_data): KeyVal<'_>| { - (session_id.to_owned(), key_backup_data) - }) - .collect() - .await -} - -#[implement(Service)] -pub async fn get_session( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - session_id: &str, -) -> Result> { - let key = (user_id, version, room_id, session_id); - - self.db.backupkeyid_backup.qry(&key).await.deserialized() -} - -#[implement(Service)] -pub async fn delete_all_keys(&self, user_id: &UserId, version: &str) { - let key = (user_id, version, Interfix); - self.db - .backupkeyid_backup - .keys_prefix_raw(&key) - .ignore_err() - .ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key)) - .await; -} - -#[implement(Service)] -pub async fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) { - let key = (user_id, version, room_id, Interfix); - self.db - .backupkeyid_backup - .keys_prefix_raw(&key) - .ignore_err() - .ready_for_each(|outdated_key| { - self.db.backupkeyid_backup.remove(outdated_key); - }) - .await; -} - -#[implement(Service)] -pub async fn delete_room_key( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - session_id: &str, -) { - let key = (user_id, version, room_id, session_id); - self.db - .backupkeyid_backup - .keys_prefix_raw(&key) - .ignore_err() - .ready_for_each(|outdated_key| { - self.db.backupkeyid_backup.remove(outdated_key); - }) - .await; + pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { + self.db.get_backup(user_id, version) + } + + pub fn add_key( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, + ) -> Result<()> { + self.db + .add_key(user_id, version, room_id, session_id, key_data) + } + + pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { self.db.count_keys(user_id, version) } + + pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { self.db.get_etag(user_id, version) } + + pub fn get_all(&self, user_id: &UserId, version: &str) -> Result> { + self.db.get_all(user_id, version) + } + + pub fn get_room( + &self, user_id: &UserId, version: &str, room_id: &RoomId, + ) -> Result>> { + self.db.get_room(user_id, version, room_id) + } + + pub fn get_session( + &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, + ) -> Result>> { + self.db.get_session(user_id, version, room_id, session_id) + } + + pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { + self.db.delete_all_keys(user_id, version) + } + + pub fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { + self.db.delete_room_keys(user_id, version, room_id) + } + + pub fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) -> Result<()> { + self.db + .delete_room_key(user_id, version, room_id, session_id) + } } diff --git a/src/service/manager.rs b/src/service/manager.rs deleted file mode 100644 index 3cdf5945..00000000 --- a/src/service/manager.rs +++ /dev/null @@ -1,196 +0,0 @@ -use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; - -use conduwuit::{Err, Error, Result, Server, debug, debug_warn, error, trace, utils::time, warn}; -use futures::{FutureExt, TryFutureExt}; -use tokio::{ - sync::{Mutex, MutexGuard}, - task::{JoinHandle, JoinSet}, - time::sleep, -}; - -use crate::{Services, service, service::Service}; - -pub(crate) struct Manager { - manager: Mutex>>>, - workers: Mutex, - server: Arc, - service: Arc, -} - -type Workers = JoinSet; -type WorkerResult = (Arc, Result<()>); -type WorkersLocked<'a> = MutexGuard<'a, Workers>; - -const RESTART_DELAY_MS: u64 = 2500; - -impl Manager { - pub(super) fn new(services: &Services) -> Arc { - Arc::new(Self { - manager: Mutex::new(None), - workers: Mutex::new(JoinSet::new()), - server: services.server.clone(), - service: services.service.clone(), - }) - } - - pub(super) async fn poll(&self) -> Result<()> { - if let Some(manager) = &mut *self.manager.lock().await { - trace!("Polling service manager..."); - return manager.await?; - } - - Ok(()) - } - - pub(super) async fn start(self: Arc) -> Result<()> { - let mut workers = self.workers.lock().await; - - debug!("Starting service manager..."); - let self_ = self.clone(); - _ = self.manager.lock().await.insert( - self.server - .runtime() - .spawn(async move { self_.worker().await }), - ); - - // we can't hold the lock during the iteration with start_worker so the values - // are snapshotted here - let services: Vec> = self - .service - .read() - .expect("locked for reading") - .values() - .map(|val| val.0.upgrade()) - .map(|arc| arc.expect("services available for manager startup")) - .collect(); - - debug!("Starting service workers..."); - for service in services { - self.start_worker(&mut workers, &service).await?; - } - - Ok(()) - } - - pub(super) async fn stop(&self) { - if let Some(manager) = self.manager.lock().await.take() { - debug!("Waiting for service manager..."); - if let Err(e) = manager.await { - error!("Manager shutdown error: {e:?}"); - } - } - } - - async fn worker(&self) -> Result<()> { - loop { - let mut workers = self.workers.lock().await; - tokio::select! { - result = workers.join_next() => match result { - Some(Ok(result)) => self.handle_result(&mut workers, result).await?, - Some(Err(error)) => self.handle_abort(&mut workers, Error::from(error)).await?, - None => break, - } - } - } - - debug!("Worker manager finished"); - Ok(()) - } - - async fn handle_abort(&self, _workers: &mut WorkersLocked<'_>, error: Error) -> Result<()> { - // not supported until service can be associated with abort - unimplemented!("unexpected worker task abort {error:?}"); - } - - async fn handle_result( - &self, - workers: &mut WorkersLocked<'_>, - result: WorkerResult, - ) -> Result<()> { - let (service, result) = result; - match result { - | Ok(()) => self.handle_finished(workers, &service).await, - | Err(error) => self.handle_error(workers, &service, error).await, - } - } - - async fn handle_finished( - &self, - _workers: &mut WorkersLocked<'_>, - service: &Arc, - ) -> Result<()> { - debug!("service {:?} worker finished", service.name()); - Ok(()) - } - - async fn handle_error( - &self, - workers: &mut WorkersLocked<'_>, - service: &Arc, - error: Error, - ) -> Result<()> { - let name = service.name(); - error!("service {name:?} aborted: {error}"); - - if !self.server.running() { - debug_warn!("service {name:?} error ignored on shutdown."); - return Ok(()); - } - - if !error.is_panic() { - return Err(error); - } - - let delay = Duration::from_millis(RESTART_DELAY_MS); - warn!("service {name:?} worker restarting after {} delay", time::pretty(delay)); - sleep(delay).await; - - self.start_worker(workers, service).await - } - - /// Start the worker in a task for the service. - async fn start_worker( - &self, - workers: &mut WorkersLocked<'_>, - service: &Arc, - ) -> Result<()> { - if !self.server.running() { - return Err!( - "Service {:?} worker not starting during server shutdown.", - service.name() - ); - } - - debug!("Service {:?} worker starting...", service.name()); - workers.spawn_on(worker(service.clone()), self.server.runtime()); - - Ok(()) - } -} - -/// Base frame for service worker. This runs in a tokio::task. All errors and -/// panics from the worker are caught and returned cleanly. The JoinHandle -/// should never error with a panic, and if so it should propagate, but it may -/// error with an Abort which the manager should handle along with results to -/// determine if the worker should be restarted. -#[tracing::instrument( - parent = None, - level = "trace", - skip_all, - fields(service = %service.name()), -)] -async fn worker(service: Arc) -> WorkerResult { - let service_ = Arc::clone(&service); - let result = AssertUnwindSafe(service_.worker()) - .catch_unwind() - .map_err(Error::from_panic); - - let result = if service.unconstrained() { - tokio::task::unconstrained(result).await - } else { - result.await - }; - - // flattens JoinError for panic into worker's Error - (service, result.unwrap_or_else(Err)) -} diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs deleted file mode 100644 index 91e00228..00000000 --- a/src/service/media/blurhash.rs +++ /dev/null @@ -1,179 +0,0 @@ -#[cfg(feature = "blurhashing")] -use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; -use conduwuit::{Result, implement}; - -use super::Service; - -#[implement(Service)] -#[cfg(not(feature = "blurhashing"))] -pub fn create_blurhash( - &self, - _file: &[u8], - _content_type: Option<&str>, - _file_name: Option<&str>, -) -> Result> { - conduwuit::debug_warn!("blurhashing on upload support was not compiled"); - - Ok(None) -} - -#[implement(Service)] -#[cfg(feature = "blurhashing")] -pub fn create_blurhash( - &self, - file: &[u8], - content_type: Option<&str>, - file_name: Option<&str>, -) -> Result> { - let config = BlurhashConfig::from(self.services.server.config.blurhashing); - - // since 0 means disabled blurhashing, skipped blurhashing - if config.size_limit == 0 { - return Ok(None); - } - - get_blurhash_from_request(file, content_type, file_name, config) - .map_err(|e| conduwuit::err!(debug_error!("blurhashing error: {e}"))) - .map(Some) -} - -/// Returns the blurhash or a blurhash error which implements Display. -#[tracing::instrument( - name = "blurhash", - level = "debug", - skip(data), - fields( - bytes = data.len(), - ), -)] -#[cfg(feature = "blurhashing")] -fn get_blurhash_from_request( - data: &[u8], - mime: Option<&str>, - filename: Option<&str>, - config: BlurhashConfig, -) -> Result { - // Get format image is supposed to be in - let format = get_format_from_data_mime_and_filename(data, mime, filename)?; - - // Get the image reader for said image format - let decoder = get_image_decoder_with_format_and_data(format, data)?; - - // Check image size makes sense before unpacking whole image - if is_image_above_size_limit(&decoder, config) { - return Err(BlurhashingError::ImageTooLarge); - } - - let image = image::DynamicImage::from_decoder(decoder)?; - - blurhash_an_image(&image, config) -} - -/// Gets the Image Format value from the data,mime, and filename -/// It first checks if the mime is a valid image format -/// Then it checks if the filename has a format, otherwise just guess based on -/// the binary data Assumes that mime and filename extension won't be for a -/// different file format than file. -#[cfg(feature = "blurhashing")] -fn get_format_from_data_mime_and_filename( - data: &[u8], - mime: Option<&str>, - filename: Option<&str>, -) -> Result { - let extension = filename - .map(std::path::Path::new) - .and_then(std::path::Path::extension) - .map(std::ffi::OsStr::to_string_lossy); - - mime.or(extension.as_deref()) - .and_then(image::ImageFormat::from_mime_type) - .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) -} - -#[cfg(feature = "blurhashing")] -fn get_image_decoder_with_format_and_data( - image_format: image::ImageFormat, - data: &[u8], -) -> Result, BlurhashingError> { - let mut image_reader = image::ImageReader::new(std::io::Cursor::new(data)); - image_reader.set_format(image_format); - Ok(Box::new(image_reader.into_decoder()?)) -} - -#[cfg(feature = "blurhashing")] -fn is_image_above_size_limit( - decoder: &T, - blurhash_config: BlurhashConfig, -) -> bool { - decoder.total_bytes() >= blurhash_config.size_limit -} - -#[cfg(feature = "blurhashing")] -#[tracing::instrument(name = "encode", level = "debug", skip_all)] -#[inline] -fn blurhash_an_image( - image: &image::DynamicImage, - blurhash_config: BlurhashConfig, -) -> Result { - Ok(blurhash::encode_image( - blurhash_config.components_x, - blurhash_config.components_y, - &image.to_rgba8(), - )?) -} - -#[derive(Clone, Copy, Debug)] -pub struct BlurhashConfig { - pub components_x: u32, - pub components_y: u32, - - /// size limit in bytes - pub size_limit: u64, -} - -#[cfg(feature = "blurhashing")] -impl From for BlurhashConfig { - fn from(value: CoreBlurhashConfig) -> Self { - Self { - components_x: value.components_x, - components_y: value.components_y, - size_limit: value.blurhash_max_raw_size, - } - } -} - -#[derive(Debug)] -#[cfg(feature = "blurhashing")] -pub enum BlurhashingError { - HashingLibError(Box), - #[cfg(feature = "blurhashing")] - ImageError(Box), - ImageTooLarge, -} - -#[cfg(feature = "blurhashing")] -impl From for BlurhashingError { - fn from(value: image::ImageError) -> Self { Self::ImageError(Box::new(value)) } -} - -#[cfg(feature = "blurhashing")] -impl From for BlurhashingError { - fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } -} - -#[cfg(feature = "blurhashing")] -impl std::fmt::Display for BlurhashingError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Blurhash Error:")?; - match &self { - | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, - | Self::HashingLibError(e) => - write!(f, "There was an error with the blurhashing library => {e}")?, - #[cfg(feature = "blurhashing")] - | Self::ImageError(e) => - write!(f, "There was an error with the image loading library => {e}")?, - } - - Ok(()) - } -} diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 0ccd844f..7cbde755 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,278 +1,25 @@ -use std::{sync::Arc, time::Duration}; +use crate::Result; -use conduwuit::{ - Err, Result, debug, debug_info, err, - utils::{ReadyExt, str_from_bytes, stream::TryIgnore, string_from_bytes}, -}; -use database::{Database, Interfix, Map}; -use futures::StreamExt; -use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; - -use super::{preview::UrlPreviewData, thumbnail::Dim}; - -pub(crate) struct Data { - mediaid_file: Arc, - mediaid_user: Arc, - url_previews: Arc, -} - -#[derive(Debug)] -pub(super) struct Metadata { - pub(super) content_disposition: Option, - pub(super) content_type: Option, - pub(super) key: Vec, -} - -impl Data { - pub(super) fn new(db: &Arc) -> Self { - Self { - mediaid_file: db["mediaid_file"].clone(), - mediaid_user: db["mediaid_user"].clone(), - url_previews: db["url_previews"].clone(), - } - } - - pub(super) fn create_file_metadata( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - dim: &Dim, - content_disposition: Option<&ContentDisposition>, +pub trait Data: Send + Sync { + fn create_file_metadata( + &self, sender_user: Option<&str>, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>, - ) -> Result> { - let dim: &[u32] = &[dim.width, dim.height]; - let key = (mxc, dim, content_disposition, content_type); - let key = database::serialize_key(key)?; - self.mediaid_file.insert(&key, []); - if let Some(user) = user { - let key = (mxc, user); - self.mediaid_user.put_raw(key, user); - } + ) -> Result>; - Ok(key.to_vec()) - } + fn delete_file_mxc(&self, mxc: String) -> Result<()>; - pub(super) async fn delete_file_mxc(&self, mxc: &Mxc<'_>) { - debug!("MXC URI: {mxc}"); + /// Returns content_disposition, content_type and the metadata key. + fn search_file_metadata( + &self, mxc: String, width: u32, height: u32, + ) -> Result<(Option, Option, Vec)>; - let prefix = (mxc, Interfix); - self.mediaid_file - .keys_prefix_raw(&prefix) - .ignore_err() - .ready_for_each(|key| self.mediaid_file.remove(key)) - .await; + fn search_mxc_metadata_prefix(&self, mxc: String) -> Result>>; - self.mediaid_user - .stream_prefix_raw(&prefix) - .ignore_err() - .ready_for_each(|(key, val)| { - debug_assert!( - key.starts_with(mxc.to_string().as_bytes()), - "key should start with the mxc" - ); + fn get_all_media_keys(&self) -> Result>>; - let user = str_from_bytes(val).unwrap_or_default(); - debug_info!("Deleting key {key:?} which was uploaded by user {user}"); + fn remove_url_preview(&self, url: &str) -> Result<()>; - self.mediaid_user.remove(key); - }) - .await; - } + fn set_url_preview(&self, url: &str, data: &super::UrlPreviewData, timestamp: std::time::Duration) -> Result<()>; - /// Searches for all files with the given MXC - pub(super) async fn search_mxc_metadata_prefix(&self, mxc: &Mxc<'_>) -> Result>> { - debug!("MXC URI: {mxc}"); - - let prefix = (mxc, Interfix); - let keys: Vec> = self - .mediaid_file - .keys_prefix_raw(&prefix) - .ignore_err() - .map(<[u8]>::to_vec) - .collect() - .await; - - if keys.is_empty() { - return Err!(Database("Failed to find any keys in database for `{mxc}`",)); - } - - debug!("Got the following keys: {keys:?}"); - - Ok(keys) - } - - pub(super) async fn search_file_metadata( - &self, - mxc: &Mxc<'_>, - dim: &Dim, - ) -> Result { - let dim: &[u32] = &[dim.width, dim.height]; - let prefix = (mxc, dim, Interfix); - - let key = self - .mediaid_file - .keys_prefix_raw(&prefix) - .ignore_err() - .map(ToOwned::to_owned) - .next() - .await - .ok_or_else(|| err!(Request(NotFound("Media not found"))))?; - - let mut parts = key.rsplit(|&b| b == 0xFF); - - let content_type = parts - .next() - .map(string_from_bytes) - .transpose() - .map_err(|e| err!(Database(error!(?mxc, "Content-type is invalid: {e}"))))?; - - let content_disposition = parts - .next() - .map(Some) - .ok_or_else(|| err!(Database(error!(?mxc, "Media ID in db is invalid."))))? - .filter(|bytes| !bytes.is_empty()) - .map(string_from_bytes) - .transpose() - .map_err(|e| err!(Database(error!(?mxc, "Content-type is invalid: {e}"))))? - .as_deref() - .map(str::parse) - .transpose()?; - - Ok(Metadata { content_disposition, content_type, key }) - } - - /// Gets all the MXCs associated with a user - pub(super) async fn get_all_user_mxcs(&self, user_id: &UserId) -> Vec { - self.mediaid_user - .stream() - .ignore_err() - .ready_filter_map(|(key, user): (&str, &UserId)| { - (user == user_id).then(|| key.into()) - }) - .collect() - .await - } - - /// Gets all the media keys in our database (this includes all the metadata - /// associated with it such as width, height, content-type, etc) - pub(crate) async fn get_all_media_keys(&self) -> Vec> { - self.mediaid_file - .raw_keys() - .ignore_err() - .map(<[u8]>::to_vec) - .collect() - .await - } - - #[inline] - pub(super) fn remove_url_preview(&self, url: &str) -> Result<()> { - self.url_previews.remove(url.as_bytes()); - Ok(()) - } - - pub(super) fn set_url_preview( - &self, - url: &str, - data: &UrlPreviewData, - timestamp: Duration, - ) -> Result<()> { - let mut value = Vec::::new(); - value.extend_from_slice(×tamp.as_secs().to_be_bytes()); - value.push(0xFF); - value.extend_from_slice( - data.title - .as_ref() - .map(String::as_bytes) - .unwrap_or_default(), - ); - value.push(0xFF); - value.extend_from_slice( - data.description - .as_ref() - .map(String::as_bytes) - .unwrap_or_default(), - ); - value.push(0xFF); - value.extend_from_slice( - data.image - .as_ref() - .map(String::as_bytes) - .unwrap_or_default(), - ); - value.push(0xFF); - value.extend_from_slice(&data.image_size.unwrap_or(0).to_be_bytes()); - value.push(0xFF); - value.extend_from_slice(&data.image_width.unwrap_or(0).to_be_bytes()); - value.push(0xFF); - value.extend_from_slice(&data.image_height.unwrap_or(0).to_be_bytes()); - - self.url_previews.insert(url.as_bytes(), &value); - - Ok(()) - } - - pub(super) async fn get_url_preview(&self, url: &str) -> Result { - let values = self.url_previews.get(url).await?; - - let mut values = values.split(|&b| b == 0xFF); - - let _ts = values.next(); - /* if we ever decide to use timestamp, this is here. - match values.next().map(|b| u64::from_be_bytes(b.try_into().expect("valid BE array"))) { - Some(0) => None, - x => x, - };*/ - - let title = match values - .next() - .and_then(|b| String::from_utf8(b.to_vec()).ok()) - { - | Some(s) if s.is_empty() => None, - | x => x, - }; - let description = match values - .next() - .and_then(|b| String::from_utf8(b.to_vec()).ok()) - { - | Some(s) if s.is_empty() => None, - | x => x, - }; - let image = match values - .next() - .and_then(|b| String::from_utf8(b.to_vec()).ok()) - { - | Some(s) if s.is_empty() => None, - | x => x, - }; - let image_size = match values - .next() - .map(|b| usize::from_be_bytes(b.try_into().unwrap_or_default())) - { - | Some(0) => None, - | x => x, - }; - let image_width = match values - .next() - .map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default())) - { - | Some(0) => None, - | x => x, - }; - let image_height = match values - .next() - .map(|b| u32::from_be_bytes(b.try_into().unwrap_or_default())) - { - | Some(0) => None, - | x => x, - }; - - Ok(UrlPreviewData { - title, - description, - image, - image_size, - image_width, - image_height, - }) - } + fn get_url_preview(&self, url: &str) -> Option; } diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs deleted file mode 100644 index 5fd628cd..00000000 --- a/src/service/media/migrations.rs +++ /dev/null @@ -1,166 +0,0 @@ -use std::{ - collections::HashSet, - ffi::{OsStr, OsString}, - fs::{self}, - path::PathBuf, - sync::Arc, - time::Instant, -}; - -use conduwuit::{ - Config, Result, debug, debug_info, debug_warn, error, info, - utils::{ReadyExt, stream::TryIgnore}, - warn, -}; - -use crate::Services; - -/// Migrates a media directory from legacy base64 file names to sha2 file names. -/// All errors are fatal. Upon success the database is keyed to not perform this -/// again. -pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { - let db = &services.db; - let config = &services.server.config; - - warn!("Migrating legacy base64 file names to sha256 file names"); - let mediaid_file = &db["mediaid_file"]; - - // Move old media files to new names - let mut changes = Vec::<(PathBuf, PathBuf)>::new(); - mediaid_file - .raw_keys() - .ignore_err() - .ready_for_each(|key| { - let old = services.media.get_media_file_b64(key); - let new = services.media.get_media_file_sha256(key); - debug!(?key, ?old, ?new, num = changes.len(), "change"); - changes.push((old, new)); - }) - .await; - - // move the file to the new location - for (old_path, path) in changes { - if old_path.exists() { - tokio::fs::rename(&old_path, &path).await?; - if config.media_compat_file_link { - tokio::fs::symlink(&path, &old_path).await?; - } - } - } - - db["global"].insert(b"feat_sha256_media", []); - info!("Finished applying sha256_media"); - Ok(()) -} - -/// Check is run on startup for prior-migrated media directories. This handles: -/// - Going back and forth to non-sha256 legacy binaries (e.g. upstream). -/// - Deletion of artifacts in the media directory which will then fall out of -/// sync with the database. -pub(crate) async fn checkup_sha256_media(services: &Services) -> Result<()> { - use crate::media::encode_key; - - debug!("Checking integrity of media directory"); - let db = &services.db; - let media = &services.media; - let config = &services.server.config; - let mediaid_file = &db["mediaid_file"]; - let mediaid_user = &db["mediaid_user"]; - let dbs = (mediaid_file, mediaid_user); - let timer = Instant::now(); - - let dir = media.get_media_dir(); - let files: HashSet = fs::read_dir(dir)? - .filter_map(|ent| ent.map_or(None, |ent| Some(ent.path().into_os_string()))) - .collect(); - - for key in media.db.get_all_media_keys().await { - let new_path = media.get_media_file_sha256(&key).into_os_string(); - let old_path = media.get_media_file_b64(&key).into_os_string(); - if let Err(e) = handle_media_check(&dbs, config, &files, &key, &new_path, &old_path).await - { - error!( - media_id = ?encode_key(&key), ?new_path, ?old_path, - "Failed to resolve media check failure: {e}" - ); - } - } - - debug_info!( - elapsed = ?timer.elapsed(), - "Finished checking media directory" - ); - - Ok(()) -} - -async fn handle_media_check( - dbs: &(&Arc, &Arc), - config: &Config, - files: &HashSet, - key: &[u8], - new_path: &OsStr, - old_path: &OsStr, -) -> Result<()> { - use crate::media::encode_key; - - let (mediaid_file, mediaid_user) = dbs; - - let new_exists = files.contains(new_path); - let old_exists = files.contains(old_path); - let old_is_symlink = || async { - tokio::fs::symlink_metadata(old_path) - .await - .is_ok_and(|md| md.is_symlink()) - }; - - if config.prune_missing_media && !old_exists && !new_exists { - error!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Media is missing at all paths. Removing from database..." - ); - - mediaid_file.remove(key); - mediaid_user.remove(key); - } - - if config.media_compat_file_link && !old_exists && new_exists { - debug_warn!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Media found but missing legacy link. Fixing..." - ); - - tokio::fs::symlink(&new_path, &old_path).await?; - } - - if config.media_compat_file_link && !new_exists && old_exists { - debug_warn!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Legacy media found without sha256 migration. Fixing..." - ); - - debug_assert!( - old_is_symlink().await, - "Legacy media not expected to be a symlink without an existing sha256 migration." - ); - - tokio::fs::rename(&old_path, &new_path).await?; - tokio::fs::symlink(&new_path, &old_path).await?; - } - - if !config.media_compat_file_link && old_exists && old_is_symlink().await { - debug_warn!( - media_id = ?encode_key(key), ?new_path, ?old_path, - "Legacy link found but compat disabled. Cleansing symlink..." - ); - - debug_assert!( - new_exists, - "sha256 migration into new file expected prior to cleaning legacy symlink here." - ); - - tokio::fs::remove_file(&old_path).await?; - } - - Ok(()) -} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index d053ba54..6d3ef196 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,435 +1,568 @@ -pub mod blurhash; mod data; -pub(super) mod migrations; -mod preview; -mod remote; -mod tests; -mod thumbnail; -use std::{path::PathBuf, sync::Arc, time::SystemTime}; +use std::{collections::HashMap, io::Cursor, sync::Arc, time::SystemTime}; -use async_trait::async_trait; -use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{ - Err, Result, Server, debug, debug_error, debug_info, debug_warn, err, error, trace, - utils::{self, MutexMap}, - warn, -}; -use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; +pub(crate) use data::Data; +use image::imageops::FilterType; +use ruma::{OwnedMxcUri, OwnedUserId}; +use serde::Serialize; use tokio::{ - fs, + fs::{self, File}, io::{AsyncReadExt, AsyncWriteExt, BufReader}, + sync::{Mutex, RwLock}, }; +use tracing::{debug, error}; -use self::data::{Data, Metadata}; -pub use self::thumbnail::Dim; -use crate::{Dep, client, globals, moderation, sending}; +use crate::{services, utils, Error, Result}; #[derive(Debug)] pub struct FileMeta { - pub content: Option>, + pub content_disposition: Option, pub content_type: Option, - pub content_disposition: Option, + pub file: Vec, +} + +#[derive(Serialize, Default)] +pub struct UrlPreviewData { + #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:title"))] + pub title: Option, + #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:description"))] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image"))] + pub image: Option, + #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "matrix:image:size"))] + pub image_size: Option, + #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image:width"))] + pub image_width: Option, + #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image:height"))] + pub image_height: Option, } pub struct Service { - url_preview_mutex: MutexMap, - pub(super) db: Data, - services: Services, -} - -struct Services { - server: Arc, - client: Dep, - globals: Dep, - sending: Dep, - moderation: Dep, -} - -/// generated MXC ID (`media-id`) length -pub const MXC_LENGTH: usize = 32; - -/// Cache control for immutable objects. -pub const CACHE_CONTROL_IMMUTABLE: &str = "public,max-age=31536000,immutable"; - -/// Default cross-origin resource policy. -pub const CORP_CROSS_ORIGIN: &str = "cross-origin"; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - url_preview_mutex: MutexMap::new(), - db: Data::new(args.db), - services: Services { - server: args.server.clone(), - client: args.depend::("client"), - globals: args.depend::("globals"), - sending: args.depend::("sending"), - moderation: args.depend::("moderation"), - }, - })) - } - - async fn worker(self: Arc) -> Result<()> { - self.create_media_dir().await?; - - Ok(()) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, + pub url_preview_mutex: RwLock>>>, } impl Service { /// Uploads a file. pub async fn create( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - content_disposition: Option<&ContentDisposition>, - content_type: Option<&str>, - file: &[u8], + &self, sender_user: Option, mxc: String, content_disposition: Option<&str>, + content_type: Option<&str>, file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail - let key = self.db.create_file_metadata( - mxc, - user, - &Dim::default(), - content_disposition, - content_type, - )?; + let key = if let Some(user) = sender_user { + self.db + .create_file_metadata(Some(user.as_str()), mxc, 0, 0, content_disposition, content_type)? + } else { + self.db + .create_file_metadata(None, mxc, 0, 0, content_disposition, content_type)? + }; - //TODO: Dangling metadata in database if creation fails - let mut f = self.create_media_file(&key).await?; + let path; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] + { + path = services().globals.get_media_file_new(&key); + }; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + path = services().globals.get_media_file(&key); + }; + + let mut f = File::create(path).await?; f.write_all(file).await?; Ok(()) } /// Deletes a file in the database and from the media directory via an MXC - pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> { - match self.db.search_mxc_metadata_prefix(mxc).await { - | Ok(keys) => { - for key in keys { - trace!(?mxc, "MXC Key: {key:?}"); - debug_info!(?mxc, "Deleting from filesystem"); + pub async fn delete(&self, mxc: String) -> Result<()> { + if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc.clone()) { + for key in keys { + let file_path; - if let Err(e) = self.remove_media_file(&key).await { - debug_error!(?mxc, "Failed to remove media file: {e}"); - } + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] + { + file_path = services().globals.get_media_file_new(&key); + }; - debug_info!(?mxc, "Deleting from database"); - self.db.delete_file_mxc(mxc).await; - } + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + file_path = services().globals.get_media_file(&key); + }; - Ok(()) - }, - | _ => { - Err!(Database(error!( - "Failed to find any media keys for MXC {mxc} in our database." - ))) - }, + debug!("Got local file path: {:?}", file_path); + + debug!("Deleting local file {:?} from filesystem, original MXC: {}", file_path, mxc); + fs::remove_file(file_path).await?; + + debug!("Deleting MXC {mxc} from database"); + self.db.delete_file_mxc(mxc.clone())?; + } + + Ok(()) + } else { + error!("Failed to find any media keys for MXC \"{mxc}\" in our database (MXC does not exist)"); + Err(Error::bad_database( + "Failed to find any media keys for the provided MXC in our database (MXC does not exist)", + )) } } - /// Deletes all media by the specified user - /// - /// currently, this is only practical for local users - pub async fn delete_from_user(&self, user: &UserId) -> Result { - let mxcs = self.db.get_all_user_mxcs(user).await; - let mut deletion_count: usize = 0; + /// Uploads or replaces a file thumbnail. + #[allow(clippy::too_many_arguments)] + pub async fn upload_thumbnail( + &self, sender_user: Option, mxc: String, content_disposition: Option<&str>, + content_type: Option<&str>, width: u32, height: u32, file: &[u8], + ) -> Result<()> { + let key = if let Some(user) = sender_user { + self.db + .create_file_metadata(Some(user.as_str()), mxc, width, height, content_disposition, content_type)? + } else { + self.db + .create_file_metadata(None, mxc, width, height, content_disposition, content_type)? + }; - for mxc in mxcs { - let Ok(mxc) = mxc.as_str().try_into().inspect_err(|e| { - debug_error!(?mxc, "Failed to parse MXC URI from database: {e}"); - }) else { - continue; - }; + let path; - debug_info!(%deletion_count, "Deleting MXC {mxc} by user {user} from database and filesystem"); - match self.delete(&mxc).await { - | Ok(()) => { - deletion_count = deletion_count.saturating_add(1); - }, - | Err(e) => { - debug_error!(%deletion_count, "Failed to delete {mxc} from user {user}, ignoring error: {e}"); - }, - } - } + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] + { + path = services().globals.get_media_file_new(&key); + }; - Ok(deletion_count) + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + path = services().globals.get_media_file(&key); + }; + + let mut f = File::create(path).await?; + f.write_all(file).await?; + + Ok(()) } /// Downloads a file. - pub async fn get(&self, mxc: &Mxc<'_>) -> Result> { - match self.db.search_file_metadata(mxc, &Dim::default()).await { - | Ok(Metadata { content_disposition, content_type, key }) => { - let mut content = Vec::with_capacity(8192); - let path = self.get_media_file(&key); - BufReader::new(fs::File::open(path).await?) - .read_to_end(&mut content) - .await?; + pub async fn get(&self, mxc: String) -> Result> { + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { + let path; - Ok(Some(FileMeta { - content: Some(content), - content_type, - content_disposition, - })) - }, - | _ => Ok(None), - } - } - - /// Gets all the MXC URIs in our media database - pub async fn get_all_mxcs(&self) -> Result> { - let all_keys = self.db.get_all_media_keys().await; - - let mut mxcs = Vec::with_capacity(all_keys.len()); - - for key in all_keys { - trace!("Full MXC key from database: {key:?}"); - - let mut parts = key.split(|&b| b == 0xFF); - let mxc = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|e| { - err!(Database(error!( - "Failed to parse MXC unicode bytes from our database: {e}" - ))) - }) - }) - .transpose()?; - - let Some(mxc_s) = mxc else { - debug_warn!( - ?mxc, - "Parsed MXC URL unicode bytes from database but is still invalid" - ); - continue; + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] + { + path = services().globals.get_media_file_new(&key); }; - trace!("Parsed MXC key to URL: {mxc_s}"); - let mxc = OwnedMxcUri::from(mxc_s); + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + path = services().globals.get_media_file(&key); + }; - if mxc.is_valid() { - mxcs.push(mxc); - } else { - debug_warn!("{mxc:?} from database was found to not be valid"); - } + let mut file = Vec::new(); + BufReader::new(File::open(path).await?) + .read_to_end(&mut file) + .await?; + + Ok(Some(FileMeta { + content_disposition, + content_type, + file, + })) + } else { + Ok(None) } - - Ok(mxcs) } /// Deletes all remote only media files in the given at or after - /// time/duration. Returns a usize with the amount of media files deleted. - pub async fn delete_all_remote_media_at_after_time( - &self, - time: SystemTime, - before: bool, - after: bool, - yes_i_want_to_delete_local_media: bool, - ) -> Result { - let all_keys = self.db.get_all_media_keys().await; - let mut remote_mxcs = Vec::with_capacity(all_keys.len()); + /// time/duration. Returns a u32 with the amount of media files deleted. + pub async fn delete_all_remote_media_at_after_time(&self, time: String) -> Result { + if let Ok(all_keys) = self.db.get_all_media_keys() { + let user_duration: SystemTime = match cyborgtime::parse_duration(&time) { + Ok(duration) => { + debug!("Parsed duration: {:?}", duration); + debug!("System time now: {:?}", SystemTime::now()); + SystemTime::now() - duration + }, + Err(e) => { + error!("Failed to parse user-specified time duration: {}", e); + return Err(Error::bad_database("Failed to parse user-specified time duration.")); + }, + }; - for key in all_keys { - trace!("Full MXC key from database: {key:?}"); - let mut parts = key.split(|&b| b == 0xFF); - let mxc = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|e| { - err!(Database(error!( - "Failed to parse MXC unicode bytes from our database: {e}" - ))) + let mut remote_mxcs: Vec = vec![]; + + for key in all_keys { + debug!("Full MXC key from database: {:?}", key); + + // we need to get the MXC URL from the first part of the key (the first 0xff / + // 255 push). this is all necessary because of conduit using magic keys for + // media + let mut parts = key.split(|&b| b == 0xFF); + let mxc = parts + .next() + .map(|bytes| { + utils::string_from_bytes(bytes).map_err(|e| { + error!("Failed to parse MXC unicode bytes from our database: {}", e); + Error::bad_database("Failed to parse MXC unicode bytes from our database") + }) }) - }) - .transpose()?; + .transpose()?; - let Some(mxc_s) = mxc else { - debug_warn!( - ?mxc, - "Parsed MXC URL unicode bytes from database but is still invalid" - ); - continue; - }; + let Some(mxc_s) = mxc else { + return Err(Error::bad_database( + "Parsed MXC URL unicode bytes from database but still is None", + )); + }; - trace!("Parsed MXC key to URL: {mxc_s}"); - let mxc = OwnedMxcUri::from(mxc_s); - if (mxc.server_name() == Ok(self.services.globals.server_name()) - && !yes_i_want_to_delete_local_media) - || !mxc.is_valid() + debug!("Parsed MXC key to URL: {}", mxc_s); + + let mxc = OwnedMxcUri::from(mxc_s); + if mxc.server_name() == Ok(services().globals.server_name()) { + debug!("Ignoring local media MXC: {}", mxc); + // ignore our own MXC URLs as this would be local media. + continue; + } + + let path; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] + { + path = services().globals.get_media_file_new(&key); + }; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + path = services().globals.get_media_file(&key); + }; + + debug!("MXC path: {:?}", path); + + let file_metadata = fs::metadata(path.clone()).await?; + debug!("File metadata: {:?}", file_metadata); + + let file_created_at = file_metadata.created()?; + debug!("File created at: {:?}", file_created_at); + + if file_created_at >= user_duration { + debug!("File is within user duration, pushing to list of file paths and keys to delete."); + remote_mxcs.push(mxc.to_string()); + } + } + + debug!( + "Finished going through all our media in database for eligible keys to delete, checking if these are \ + empty" + ); + + if remote_mxcs.is_empty() { + return Err(Error::bad_database("Did not found any eligible MXCs to delete.")); + } + + debug!("Deleting media now in the past \"{:?}\".", user_duration); + + let mut deletion_count = 0; + + for mxc in remote_mxcs { + debug!("Deleting MXC {mxc} from database and filesystem"); + self.delete(mxc).await?; + deletion_count += 1; + } + + Ok(deletion_count) + } else { + Err(Error::bad_database( + "Failed to get all our media keys (filesystem or database issue?).", + )) + } + } + + /// Returns width, height of the thumbnail and whether it should be cropped. + /// Returns None when the server should send the original file. + pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> { + match (width, height) { + (0..=32, 0..=32) => Some((32, 32, true)), + (0..=96, 0..=96) => Some((96, 96, true)), + (0..=320, 0..=240) => Some((320, 240, false)), + (0..=640, 0..=480) => Some((640, 480, false)), + (0..=800, 0..=600) => Some((800, 600, false)), + _ => None, + } + } + + /// Downloads a file's thumbnail. + /// + /// Here's an example on how it works: + /// + /// - Client requests an image with width=567, height=567 + /// - Server rounds that up to (800, 600), so it doesn't have to save too + /// many thumbnails + /// - Server rounds that up again to (958, 600) to fix the aspect ratio + /// (only for width,height>96) + /// - Server creates the thumbnail and sends it to the user + /// + /// For width,height <= 96 the server uses another thumbnailing algorithm + /// which crops the image afterwards. + pub async fn get_thumbnail(&self, mxc: String, width: u32, height: u32) -> Result> { + let (width, height, crop) = self + .thumbnail_properties(width, height) + .unwrap_or((0, 0, false)); // 0, 0 because that's the original file + + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), width, height) { + // Using saved thumbnail + let path; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] { - debug!("Ignoring local or broken media MXC: {mxc}"); - continue; - } - - let path = self.get_media_file(&key); - - let file_metadata = match fs::metadata(path.clone()).await { - | Ok(file_metadata) => file_metadata, - | Err(e) => { - error!( - "Failed to obtain file metadata for MXC {mxc} at file path \ - \"{path:?}\", skipping: {e}" - ); - continue; - }, + path = services().globals.get_media_file_new(&key); }; - trace!(%mxc, ?path, "File metadata: {file_metadata:?}"); - - let file_created_at = match file_metadata.created() { - | Ok(value) => value, - | Err(err) if err.kind() == std::io::ErrorKind::Unsupported => { - debug!("btime is unsupported, using mtime instead"); - file_metadata.modified()? - }, - | Err(err) => { - error!("Could not delete MXC {mxc} at path {path:?}: {err:?}. Skipping..."); - continue; - }, + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + path = services().globals.get_media_file(&key); }; - debug!("File created at: {file_created_at:?}"); + let mut file = Vec::new(); + File::open(path).await?.read_to_end(&mut file).await?; - if file_created_at >= time && before { - debug!( - "File is within (before) user duration, pushing to list of file paths and \ - keys to delete." - ); - remote_mxcs.push(mxc.to_string()); - } else if file_created_at <= time && after { - debug!( - "File is not within (after) user duration, pushing to list of file paths \ - and keys to delete." - ); - remote_mxcs.push(mxc.to_string()); - } - } + Ok(Some(FileMeta { + content_disposition, + content_type, + file: file.clone(), + })) + } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), 0, 0) { + // Generate a thumbnail + let path; - if remote_mxcs.is_empty() { - return Err!(Database("Did not found any eligible MXCs to delete.")); - } - - debug_info!("Deleting media now in the past {time:?}"); - - let mut deletion_count: usize = 0; - - for mxc in remote_mxcs { - let Ok(mxc) = mxc.as_str().try_into() else { - debug_warn!("Invalid MXC in database, skipping"); - continue; + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] + { + path = services().globals.get_media_file_new(&key); }; - debug_info!("Deleting MXC {mxc} from database and filesystem"); + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + path = services().globals.get_media_file(&key); + }; - match self.delete(&mxc).await { - | Ok(()) => { - deletion_count = deletion_count.saturating_add(1); - }, - | Err(e) => { - warn!("Failed to delete {mxc}, ignoring error and skipping: {e}"); - continue; - }, + let mut file = Vec::new(); + File::open(path).await?.read_to_end(&mut file).await?; + + if let Ok(image) = image::load_from_memory(&file) { + let original_width = image.width(); + let original_height = image.height(); + if width > original_width || height > original_height { + return Ok(Some(FileMeta { + content_disposition, + content_type, + file: file.clone(), + })); + } + + let thumbnail = if crop { + image.resize_to_fill(width, height, FilterType::CatmullRom) + } else { + let (exact_width, exact_height) = { + // Copied from image::dynimage::resize_dimensions + // + // https://github.com/image-rs/image/blob/6edf8ae492c4bb1dacb41da88681ea74dab1bab3/src/math/utils.rs#L5-L11 + // Calculates the width and height an image should be + // resized to. This preserves aspect ratio, and based + // on the `fill` parameter will either fill the + // dimensions to fit inside the smaller constraint + // (will overflow the specified bounds on one axis to + // preserve aspect ratio), or will shrink so that both + // dimensions are completely contained within the given + // `width` and `height`, with empty space on one axis. + let ratio = u64::from(original_width) * u64::from(height); + let nratio = u64::from(width) * u64::from(original_height); + + let use_width = nratio <= ratio; + let intermediate = if use_width { + u64::from(original_height) * u64::from(width) / u64::from(original_width) + } else { + u64::from(original_width) * u64::from(height) / u64::from(original_height) + }; + if use_width { + if u32::try_from(intermediate).is_ok() { + (width, intermediate as u32) + } else { + ((u64::from(width) * u64::from(u32::MAX) / intermediate) as u32, u32::MAX) + } + } else if u32::try_from(intermediate).is_ok() { + (intermediate as u32, height) + } else { + (u32::MAX, (u64::from(height) * u64::from(u32::MAX) / intermediate) as u32) + } + }; + + image.thumbnail_exact(exact_width, exact_height) + }; + + let mut thumbnail_bytes = Vec::new(); + thumbnail.write_to(&mut Cursor::new(&mut thumbnail_bytes), image::ImageFormat::Png)?; + + // Save thumbnail in database so we don't have to generate it again next time + let thumbnail_key = self.db.create_file_metadata( + None, + mxc, + width, + height, + content_disposition.as_deref(), + content_type.as_deref(), + )?; + + let path; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(feature = "sha256_media")] + { + path = services().globals.get_media_file_new(&thumbnail_key); + }; + + #[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental + #[cfg(not(feature = "sha256_media"))] + { + path = services().globals.get_media_file(&thumbnail_key); + }; + + let mut f = File::create(path).await?; + f.write_all(&thumbnail_bytes).await?; + + Ok(Some(FileMeta { + content_disposition, + content_type, + file: thumbnail_bytes.clone(), + })) + } else { + // Couldn't parse file to generate thumbnail, send original + Ok(Some(FileMeta { + content_disposition, + content_type, + file: file.clone(), + })) } + } else { + Ok(None) } - - Ok(deletion_count) } - pub async fn create_media_dir(&self) -> Result<()> { - let dir = self.get_media_dir(); - Ok(fs::create_dir_all(dir).await?) + pub async fn get_url_preview(&self, url: &str) -> Option { self.db.get_url_preview(url) } + + pub async fn remove_url_preview(&self, url: &str) -> Result<()> { + // TODO: also remove the downloaded image + self.db.remove_url_preview(url) } - async fn remove_media_file(&self, key: &[u8]) -> Result<()> { - let path = self.get_media_file(key); - let legacy = self.get_media_file_b64(key); - debug!(?key, ?path, ?legacy, "Removing media file"); - - let file_rm = fs::remove_file(&path); - let legacy_rm = fs::remove_file(&legacy); - let (file_rm, legacy_rm) = tokio::join!(file_rm, legacy_rm); - if let Err(e) = legacy_rm { - if self.services.server.config.media_compat_file_link { - debug_error!(?key, ?legacy, "Failed to remove legacy media symlink: {e}"); - } - } - - Ok(file_rm?) - } - - async fn create_media_file(&self, key: &[u8]) -> Result { - let path = self.get_media_file(key); - debug!(?key, ?path, "Creating media file"); - - let file = fs::File::create(&path).await?; - if self.services.server.config.media_compat_file_link { - let legacy = self.get_media_file_b64(key); - if let Err(e) = fs::symlink(&path, &legacy).await { - debug_error!( - key = ?encode_key(key), ?path, ?legacy, - "Failed to create legacy media symlink: {e}" - ); - } - } - - Ok(file) - } - - #[inline] - pub async fn get_metadata(&self, mxc: &Mxc<'_>) -> Option { - self.db - .search_file_metadata(mxc, &Dim::default()) - .await - .map(|metadata| FileMeta { - content_disposition: metadata.content_disposition, - content_type: metadata.content_type, - content: None, - }) - .ok() - } - - #[inline] - #[must_use] - pub fn get_media_file(&self, key: &[u8]) -> PathBuf { self.get_media_file_sha256(key) } - - /// new SHA256 file name media function. requires database migrated. uses - /// SHA256 hash of the base64 key as the file name - #[must_use] - pub fn get_media_file_sha256(&self, key: &[u8]) -> PathBuf { - let mut r = self.get_media_dir(); - // Using the hash of the base64 key as the filename - // This is to prevent the total length of the path from exceeding the maximum - // length in most filesystems - let digest = ::digest(key); - let encoded = encode_key(&digest); - r.push(encoded); - r - } - - /// old base64 file name media function - /// This is the old version of `get_media_file` that uses the full base64 - /// key as the filename. - #[must_use] - pub fn get_media_file_b64(&self, key: &[u8]) -> PathBuf { - let mut r = self.get_media_dir(); - let encoded = encode_key(key); - r.push(encoded); - r - } - - #[must_use] - pub fn get_media_dir(&self) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.services.server.config.database_path.clone()); - r.push("media"); - r + pub async fn set_url_preview(&self, url: &str, data: &UrlPreviewData) -> Result<()> { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("valid system time"); + self.db.set_url_preview(url, data, now) } } -#[inline] -#[must_use] -pub fn encode_key(key: &[u8]) -> String { general_purpose::URL_SAFE_NO_PAD.encode(key) } +#[cfg(test)] +mod tests { + use std::path::PathBuf; + + use base64::{engine::general_purpose, Engine as _}; + + use super::*; + + struct MockedKVDatabase; + + impl Data for MockedKVDatabase { + fn create_file_metadata( + &self, _sender_user: Option<&str>, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result> { + // copied from src/database/key_value/media.rs + let mut key = mxc.as_bytes().to_vec(); + key.push(0xFF); + key.extend_from_slice(&width.to_be_bytes()); + key.extend_from_slice(&height.to_be_bytes()); + key.push(0xFF); + key.extend_from_slice( + content_disposition + .as_ref() + .map(|f| f.as_bytes()) + .unwrap_or_default(), + ); + key.push(0xFF); + key.extend_from_slice( + content_type + .as_ref() + .map(|c| c.as_bytes()) + .unwrap_or_default(), + ); + + Ok(key) + } + + fn delete_file_mxc(&self, _mxc: String) -> Result<()> { todo!() } + + fn search_mxc_metadata_prefix(&self, _mxc: String) -> Result>> { todo!() } + + fn get_all_media_keys(&self) -> Result>> { todo!() } + + fn search_file_metadata( + &self, _mxc: String, _width: u32, _height: u32, + ) -> Result<(Option, Option, Vec)> { + todo!() + } + + fn remove_url_preview(&self, _url: &str) -> Result<()> { todo!() } + + fn set_url_preview(&self, _url: &str, _data: &UrlPreviewData, _timestamp: std::time::Duration) -> Result<()> { + todo!() + } + + fn get_url_preview(&self, _url: &str) -> Option { todo!() } + } + + #[tokio::test] + #[cfg(feature = "sha256_media")] + async fn long_file_names_works() { + static DB: MockedKVDatabase = MockedKVDatabase; + let media = Service { + db: &DB, + url_preview_mutex: RwLock::new(HashMap::new()), + }; + + let mxc = "mxc://example.com/ascERGshawAWawugaAcauga".to_owned(); + let width = 100; + let height = 100; + let content_disposition = "attachment; filename=\"this is a very long file name with spaces and special \ + characters like äöüß and even emoji like 🦀.png\""; + let content_type = "image/png"; + let key = media + .db + .create_file_metadata(None, mxc, width, height, Some(content_disposition), Some(content_type)) + .unwrap(); + let mut r = PathBuf::new(); + r.push("/tmp"); + r.push("media"); + // r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); + // use the sha256 hash of the key as the file name instead of the key itself + // this is because the base64 encoded key can be longer than 255 characters. + r.push(general_purpose::URL_SAFE_NO_PAD.encode(::digest(key))); + // Check that the file path is not longer than 255 characters + // (255 is the maximum length of a file path on most file systems) + assert!( + r.to_str().unwrap().len() <= 255, + "File path is too long: {}", + r.to_str().unwrap().len() + ); + } +} diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs deleted file mode 100644 index 91660a58..00000000 --- a/src/service/media/preview.rs +++ /dev/null @@ -1,306 +0,0 @@ -//! URL Previews -//! -//! This functionality is gated by 'url_preview', but not at the unit level for -//! historical and simplicity reasons. Instead the feature gates the inclusion -//! of dependencies and nulls out results through the existing interface when -//! not featured. - -use std::time::SystemTime; - -use conduwuit::{Err, Result, debug, err}; -use conduwuit_core::implement; -use ipaddress::IPAddress; -use serde::Serialize; -use url::Url; - -use super::Service; - -#[derive(Serialize, Default)] -pub struct UrlPreviewData { - #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:title"))] - pub title: Option, - #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:description"))] - pub description: Option, - #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image"))] - pub image: Option, - #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "matrix:image:size"))] - pub image_size: Option, - #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image:width"))] - pub image_width: Option, - #[serde(skip_serializing_if = "Option::is_none", rename(serialize = "og:image:height"))] - pub image_height: Option, -} - -#[implement(Service)] -pub async fn remove_url_preview(&self, url: &str) -> Result<()> { - // TODO: also remove the downloaded image - self.db.remove_url_preview(url) -} - -#[implement(Service)] -pub async fn set_url_preview(&self, url: &str, data: &UrlPreviewData) -> Result<()> { - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("valid system time"); - self.db.set_url_preview(url, data, now) -} - -#[implement(Service)] -pub async fn get_url_preview(&self, url: &Url) -> Result { - if let Ok(preview) = self.db.get_url_preview(url.as_str()).await { - return Ok(preview); - } - - // ensure that only one request is made per URL - let _request_lock = self.url_preview_mutex.lock(url.as_str()).await; - - match self.db.get_url_preview(url.as_str()).await { - | Ok(preview) => Ok(preview), - | Err(_) => self.request_url_preview(url).await, - } -} - -#[implement(Service)] -async fn request_url_preview(&self, url: &Url) -> Result { - if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated")) { - if !self.services.client.valid_cidr_range(&ip) { - return Err!(Request(Forbidden("Requesting from this address is forbidden"))); - } - } - - let client = &self.services.client.url_preview; - let response = client.head(url.as_str()).send().await?; - - debug!(?url, "URL preview response headers: {:?}", response.headers()); - - if let Some(remote_addr) = response.remote_addr() { - debug!(?url, "URL preview response remote address: {:?}", remote_addr); - - if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - if !self.services.client.valid_cidr_range(&ip) { - return Err!(Request(Forbidden("Requesting from this address is forbidden"))); - } - } - } - - let Some(content_type) = response.headers().get(reqwest::header::CONTENT_TYPE) else { - return Err!(Request(Unknown("Unknown or invalid Content-Type header"))); - }; - - let content_type = content_type - .to_str() - .map_err(|e| err!(Request(Unknown("Unknown or invalid Content-Type header: {e}"))))?; - - let data = match content_type { - | html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, - | img if img.starts_with("image/") => self.download_image(url.as_str()).await?, - | _ => return Err!(Request(Unknown("Unsupported Content-Type"))), - }; - - self.set_url_preview(url.as_str(), &data).await?; - - Ok(data) -} - -#[cfg(feature = "url_preview")] -#[implement(Service)] -pub async fn download_image(&self, url: &str) -> Result { - use conduwuit::utils::random_string; - use image::ImageReader; - use ruma::Mxc; - - let image = self.services.client.url_preview.get(url).send().await?; - let image = image.bytes().await?; - let mxc = Mxc { - server_name: self.services.globals.server_name(), - media_id: &random_string(super::MXC_LENGTH), - }; - - self.create(&mxc, None, None, None, &image).await?; - - let cursor = std::io::Cursor::new(&image); - let (width, height) = match ImageReader::new(cursor).with_guessed_format() { - | Err(_) => (None, None), - | Ok(reader) => match reader.into_dimensions() { - | Err(_) => (None, None), - | Ok((width, height)) => (Some(width), Some(height)), - }, - }; - - Ok(UrlPreviewData { - image: Some(mxc.to_string()), - image_size: Some(image.len()), - image_width: width, - image_height: height, - ..Default::default() - }) -} - -#[cfg(not(feature = "url_preview"))] -#[implement(Service)] -pub async fn download_image(&self, _url: &str) -> Result { - Err!(FeatureDisabled("url_preview")) -} - -#[cfg(feature = "url_preview")] -#[implement(Service)] -async fn download_html(&self, url: &str) -> Result { - use webpage::HTML; - - let client = &self.services.client.url_preview; - let mut response = client.get(url).send().await?; - - let mut bytes: Vec = Vec::new(); - while let Some(chunk) = response.chunk().await? { - bytes.extend_from_slice(&chunk); - if bytes.len() > self.services.globals.url_preview_max_spider_size() { - debug!( - "Response body from URL {} exceeds url_preview_max_spider_size ({}), not \ - processing the rest of the response body and assuming our necessary data is in \ - this range.", - url, - self.services.globals.url_preview_max_spider_size() - ); - break; - } - } - let body = String::from_utf8_lossy(&bytes); - let Ok(html) = HTML::from_string(body.to_string(), Some(url.to_owned())) else { - return Err!(Request(Unknown("Failed to parse HTML"))); - }; - - let mut data = match html.opengraph.images.first() { - | None => UrlPreviewData::default(), - | Some(obj) => self.download_image(&obj.url).await?, - }; - - let props = html.opengraph.properties; - - /* use OpenGraph title/description, but fall back to HTML if not available */ - data.title = props.get("title").cloned().or(html.title); - data.description = props.get("description").cloned().or(html.description); - - Ok(data) -} - -#[cfg(not(feature = "url_preview"))] -#[implement(Service)] -async fn download_html(&self, _url: &str) -> Result { - Err!(FeatureDisabled("url_preview")) -} - -#[implement(Service)] -pub fn url_preview_allowed(&self, url: &Url) -> bool { - if ["http", "https"] - .iter() - .all(|&scheme| scheme != url.scheme().to_lowercase()) - { - debug!("Ignoring non-HTTP/HTTPS URL to preview: {}", url); - return false; - } - - let host = match url.host_str() { - | None => { - debug!("Ignoring URL preview for a URL that does not have a host (?): {}", url); - return false; - }, - | Some(h) => h.to_owned(), - }; - - let allowlist_domain_contains = self - .services - .globals - .url_preview_domain_contains_allowlist(); - let allowlist_domain_explicit = self - .services - .globals - .url_preview_domain_explicit_allowlist(); - let denylist_domain_explicit = self.services.globals.url_preview_domain_explicit_denylist(); - let allowlist_url_contains = self.services.globals.url_preview_url_contains_allowlist(); - - if allowlist_domain_contains.contains(&"*".to_owned()) - || allowlist_domain_explicit.contains(&"*".to_owned()) - || allowlist_url_contains.contains(&"*".to_owned()) - { - debug!("Config key contains * which is allowing all URL previews. Allowing URL {}", url); - return true; - } - - if !host.is_empty() { - if denylist_domain_explicit.contains(&host) { - debug!( - "Host {} is not allowed by url_preview_domain_explicit_denylist (check 1/4)", - &host - ); - return false; - } - - if allowlist_domain_explicit.contains(&host) { - debug!( - "Host {} is allowed by url_preview_domain_explicit_allowlist (check 2/4)", - &host - ); - return true; - } - - if allowlist_domain_contains - .iter() - .any(|domain_s| domain_s.contains(&host.clone())) - { - debug!( - "Host {} is allowed by url_preview_domain_contains_allowlist (check 3/4)", - &host - ); - return true; - } - - if allowlist_url_contains - .iter() - .any(|url_s| url.to_string().contains(url_s)) - { - debug!("URL {} is allowed by url_preview_url_contains_allowlist (check 4/4)", &host); - return true; - } - - // check root domain if available and if user has root domain checks - if self.services.globals.url_preview_check_root_domain() { - debug!("Checking root domain"); - match host.split_once('.') { - | None => return false, - | Some((_, root_domain)) => { - if denylist_domain_explicit.contains(&root_domain.to_owned()) { - debug!( - "Root domain {} is not allowed by \ - url_preview_domain_explicit_denylist (check 1/3)", - &root_domain - ); - return true; - } - - if allowlist_domain_explicit.contains(&root_domain.to_owned()) { - debug!( - "Root domain {} is allowed by url_preview_domain_explicit_allowlist \ - (check 2/3)", - &root_domain - ); - return true; - } - - if allowlist_domain_contains - .iter() - .any(|domain_s| domain_s.contains(&root_domain.to_owned())) - { - debug!( - "Root domain {} is allowed by url_preview_domain_contains_allowlist \ - (check 3/3)", - &root_domain - ); - return true; - } - }, - } - } - } - - false -} diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs deleted file mode 100644 index a1e874d8..00000000 --- a/src/service/media/remote.rs +++ /dev/null @@ -1,446 +0,0 @@ -use std::{fmt::Debug, time::Duration}; - -use conduwuit::{ - Err, Error, Result, debug_warn, err, implement, - utils::content_disposition::make_content_disposition, -}; -use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE, HeaderValue}; -use ruma::{ - Mxc, ServerName, UserId, - api::{ - OutgoingRequest, - client::{ - error::ErrorKind::{NotFound, Unrecognized}, - media, - }, - federation, - federation::authenticated_media::{Content, FileOrLocation}, - }, -}; - -use super::{Dim, FileMeta}; - -#[implement(super::Service)] -pub async fn fetch_remote_thumbnail( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - timeout_ms: Duration, - dim: &Dim, -) -> Result { - self.check_fetch_authorized(mxc)?; - - let result = self - .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) - .await; - - if let Err(Error::Request(NotFound, ..)) = &result { - return self - .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) - .await; - } - - result -} - -#[implement(super::Service)] -pub async fn fetch_remote_content( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - timeout_ms: Duration, -) -> Result { - self.check_fetch_authorized(mxc)?; - - let result = self - .fetch_content_authenticated(mxc, user, server, timeout_ms) - .await; - - if let Err(Error::Request(NotFound, ..)) = &result { - return self - .fetch_content_unauthenticated(mxc, user, server, timeout_ms) - .await; - } - - result -} - -#[implement(super::Service)] -async fn fetch_thumbnail_authenticated( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - timeout_ms: Duration, - dim: &Dim, -) -> Result { - use federation::authenticated_media::get_content_thumbnail::v1::{Request, Response}; - - let request = Request { - media_id: mxc.media_id.into(), - method: dim.method.clone().into(), - width: dim.width.into(), - height: dim.height.into(), - animated: true.into(), - timeout_ms, - }; - - let Response { content, .. } = self.federation_request(mxc, user, server, request).await?; - - match content { - | FileOrLocation::File(content) => - self.handle_thumbnail_file(mxc, user, dim, content).await, - | FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, - } -} - -#[implement(super::Service)] -async fn fetch_content_authenticated( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - timeout_ms: Duration, -) -> Result { - use federation::authenticated_media::get_content::v1::{Request, Response}; - - let request = Request { - media_id: mxc.media_id.into(), - timeout_ms, - }; - - let Response { content, .. } = self.federation_request(mxc, user, server, request).await?; - - match content { - | FileOrLocation::File(content) => self.handle_content_file(mxc, user, content).await, - | FileOrLocation::Location(location) => self.handle_location(mxc, user, &location).await, - } -} - -#[allow(deprecated)] -#[implement(super::Service)] -async fn fetch_thumbnail_unauthenticated( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - timeout_ms: Duration, - dim: &Dim, -) -> Result { - use media::get_content_thumbnail::v3::{Request, Response}; - - let request = Request { - allow_remote: true, - allow_redirect: true, - animated: true.into(), - method: dim.method.clone().into(), - width: dim.width.into(), - height: dim.height.into(), - server_name: mxc.server_name.into(), - media_id: mxc.media_id.into(), - timeout_ms, - }; - - let Response { - file, content_type, content_disposition, .. - } = self.federation_request(mxc, user, server, request).await?; - - let content = Content { file, content_type, content_disposition }; - - self.handle_thumbnail_file(mxc, user, dim, content).await -} - -#[allow(deprecated)] -#[implement(super::Service)] -async fn fetch_content_unauthenticated( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - timeout_ms: Duration, -) -> Result { - use media::get_content::v3::{Request, Response}; - - let request = Request { - allow_remote: true, - allow_redirect: true, - server_name: mxc.server_name.into(), - media_id: mxc.media_id.into(), - timeout_ms, - }; - - let Response { - file, content_type, content_disposition, .. - } = self.federation_request(mxc, user, server, request).await?; - - let content = Content { file, content_type, content_disposition }; - - self.handle_content_file(mxc, user, content).await -} - -#[implement(super::Service)] -async fn handle_thumbnail_file( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - dim: &Dim, - content: Content, -) -> Result { - let content_disposition = make_content_disposition( - content.content_disposition.as_ref(), - content.content_type.as_deref(), - None, - ); - - self.upload_thumbnail( - mxc, - user, - Some(&content_disposition), - content.content_type.as_deref(), - dim, - &content.file, - ) - .await - .map(|()| FileMeta { - content: Some(content.file), - content_type: content.content_type.map(Into::into), - content_disposition: Some(content_disposition), - }) -} - -#[implement(super::Service)] -async fn handle_content_file( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - content: Content, -) -> Result { - let content_disposition = make_content_disposition( - content.content_disposition.as_ref(), - content.content_type.as_deref(), - None, - ); - - self.create( - mxc, - user, - Some(&content_disposition), - content.content_type.as_deref(), - &content.file, - ) - .await - .map(|()| FileMeta { - content: Some(content.file), - content_type: content.content_type.map(Into::into), - content_disposition: Some(content_disposition), - }) -} - -#[implement(super::Service)] -async fn handle_location( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - location: &str, -) -> Result { - self.location_request(location).await.map_err(|error| { - err!(Request(NotFound( - debug_warn!(%mxc, ?user, ?location, ?error, "Fetching media from location failed") - ))) - }) -} - -#[implement(super::Service)] -async fn location_request(&self, location: &str) -> Result { - let response = self - .services - .client - .extern_media - .get(location) - .send() - .await?; - - let content_type = response - .headers() - .get(CONTENT_TYPE) - .map(HeaderValue::to_str) - .and_then(Result::ok) - .map(str::to_owned); - - let content_disposition = response - .headers() - .get(CONTENT_DISPOSITION) - .map(HeaderValue::as_bytes) - .map(TryFrom::try_from) - .and_then(Result::ok); - - response - .bytes() - .await - .map(Vec::from) - .map_err(Into::into) - .map(|content| FileMeta { - content: Some(content), - content_type: content_type.clone(), - content_disposition: Some(make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - None, - )), - }) -} - -#[implement(super::Service)] -async fn federation_request( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - request: Request, -) -> Result -where - Request: OutgoingRequest + Send + Debug, -{ - self.services - .sending - .send_federation_request(server.unwrap_or(mxc.server_name), request) - .await - .map_err(|error| handle_federation_error(mxc, user, server, error)) -} - -// Handles and adjusts the error for the caller to determine if they should -// request the fallback endpoint or give up. -fn handle_federation_error( - mxc: &Mxc<'_>, - user: Option<&UserId>, - server: Option<&ServerName>, - error: Error, -) -> Error { - let fallback = || { - err!(Request(NotFound( - debug_error!(%mxc, ?user, ?server, ?error, "Remote media not found") - ))) - }; - - // Matrix server responses for fallback always taken. - if error.kind() == NotFound || error.kind() == Unrecognized { - return fallback(); - } - - // If we get these from any middleware we'll try the other endpoint rather than - // giving up too early. - if error.status_code().is_redirection() - || error.status_code().is_client_error() - || error.status_code().is_server_error() - { - return fallback(); - } - - // Reached for 5xx errors. This is where we don't fallback given the likelyhood - // the other endpoint will also be a 5xx and we're wasting time. - error -} - -#[implement(super::Service)] -#[allow(deprecated)] -pub async fn fetch_remote_thumbnail_legacy( - &self, - body: &media::get_content_thumbnail::v3::Request, -) -> Result { - let mxc = Mxc { - server_name: &body.server_name, - media_id: &body.media_id, - }; - - self.check_legacy_freeze()?; - self.check_fetch_authorized(&mxc)?; - let reponse = self - .services - .sending - .send_federation_request(mxc.server_name, media::get_content_thumbnail::v3::Request { - allow_remote: body.allow_remote, - height: body.height, - width: body.width, - method: body.method.clone(), - server_name: body.server_name.clone(), - media_id: body.media_id.clone(), - timeout_ms: body.timeout_ms, - allow_redirect: body.allow_redirect, - animated: body.animated, - }) - .await?; - - let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - self.upload_thumbnail(&mxc, None, None, reponse.content_type.as_deref(), &dim, &reponse.file) - .await?; - - Ok(reponse) -} - -#[implement(super::Service)] -#[allow(deprecated)] -pub async fn fetch_remote_content_legacy( - &self, - mxc: &Mxc<'_>, - allow_redirect: bool, - timeout_ms: Duration, -) -> Result { - self.check_legacy_freeze()?; - self.check_fetch_authorized(mxc)?; - let response = self - .services - .sending - .send_federation_request(mxc.server_name, media::get_content::v3::Request { - allow_remote: true, - server_name: mxc.server_name.into(), - media_id: mxc.media_id.into(), - timeout_ms, - allow_redirect, - }) - .await?; - - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); - - self.create( - mxc, - None, - Some(&content_disposition), - response.content_type.as_deref(), - &response.file, - ) - .await?; - - Ok(response) -} - -#[implement(super::Service)] -fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { - if self - .services - .moderation - .is_remote_server_media_downloads_forbidden(mxc.server_name) - { - // we'll lie to the client and say the blocked server's media was not found and - // log. the client has no way of telling anyways so this is a security bonus. - debug_warn!(%mxc, "Received request for media on blocklisted server"); - return Err!(Request(NotFound("Media not found."))); - } - - Ok(()) -} - -#[implement(super::Service)] -fn check_legacy_freeze(&self) -> Result<()> { - self.services - .server - .config - .freeze_legacy_media - .then_some(()) - .ok_or(err!(Request(NotFound("Remote media is frozen.")))) -} diff --git a/src/service/media/tests.rs b/src/service/media/tests.rs deleted file mode 100644 index 651e0ade..00000000 --- a/src/service/media/tests.rs +++ /dev/null @@ -1,105 +0,0 @@ -#![cfg(test)] - -#[tokio::test] -#[cfg(disable)] //TODO: fixme -async fn long_file_names_works() { - use std::path::PathBuf; - - use base64::{Engine as _, engine::general_purpose}; - - use super::*; - - struct MockedKVDatabase; - - impl Data for MockedKVDatabase { - fn create_file_metadata( - &self, - _sender_user: Option<&str>, - mxc: String, - width: u32, - height: u32, - content_disposition: Option<&str>, - content_type: Option<&str>, - ) -> Result> { - // copied from src/database/key_value/media.rs - let mut key = mxc.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(&width.to_be_bytes()); - key.extend_from_slice(&height.to_be_bytes()); - key.push(0xFF); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xFF); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); - - Ok(key) - } - - fn delete_file_mxc(&self, _mxc: String) -> Result<()> { todo!() } - - fn search_mxc_metadata_prefix(&self, _mxc: String) -> Result>> { todo!() } - - fn get_all_media_keys(&self) -> Vec> { todo!() } - - fn search_file_metadata( - &self, - _mxc: String, - _width: u32, - _height: u32, - ) -> Result<(Option, Option, Vec)> { - todo!() - } - - fn remove_url_preview(&self, _url: &str) -> Result<()> { todo!() } - - fn set_url_preview( - &self, - _url: &str, - _data: &UrlPreviewData, - _timestamp: std::time::Duration, - ) -> Result<()> { - todo!() - } - - fn get_url_preview(&self, _url: &str) -> Option { todo!() } - } - - let db: Arc = Arc::new(MockedKVDatabase); - let mxc = "mxc://example.com/ascERGshawAWawugaAcauga".to_owned(); - let width = 100; - let height = 100; - let content_disposition = "attachment; filename=\"this is a very long file name with spaces \ - and special characters like äöüß and even emoji like 🦀.png\""; - let content_type = "image/png"; - let key = db - .create_file_metadata( - None, - mxc, - width, - height, - Some(content_disposition), - Some(content_type), - ) - .unwrap(); - let mut r = PathBuf::from("/tmp/media"); - // r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); - // use the sha256 hash of the key as the file name instead of the key itself - // this is because the base64 encoded key can be longer than 255 characters. - r.push(general_purpose::URL_SAFE_NO_PAD.encode(::digest(key))); - // Check that the file path is not longer than 255 characters - // (255 is the maximum length of a file path on most file systems) - assert!( - r.to_str().unwrap().len() <= 255, - "File path is too long: {}", - r.to_str().unwrap().len() - ); -} diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs deleted file mode 100644 index e5a98774..00000000 --- a/src/service/media/thumbnail.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! Media Thumbnails -//! -//! This functionality is gated by 'media_thumbnail', but not at the unit level -//! for historical and simplicity reasons. Instead the feature gates the -//! inclusion of dependencies and nulls out results using the existing interface -//! when not featured. - -use std::{cmp, num::Saturating as Sat}; - -use conduwuit::{Result, checked, err, implement}; -use ruma::{Mxc, UInt, UserId, http_headers::ContentDisposition, media::Method}; -use tokio::{ - fs, - io::{AsyncReadExt, AsyncWriteExt}, -}; - -use super::{FileMeta, data::Metadata}; - -/// Dimension specification for a thumbnail. -#[derive(Debug)] -pub struct Dim { - pub width: u32, - pub height: u32, - pub method: Method, -} - -impl super::Service { - /// Uploads or replaces a file thumbnail. - #[allow(clippy::too_many_arguments)] - pub async fn upload_thumbnail( - &self, - mxc: &Mxc<'_>, - user: Option<&UserId>, - content_disposition: Option<&ContentDisposition>, - content_type: Option<&str>, - dim: &Dim, - file: &[u8], - ) -> Result<()> { - let key = - self.db - .create_file_metadata(mxc, user, dim, content_disposition, content_type)?; - - //TODO: Dangling metadata in database if creation fails - let mut f = self.create_media_file(&key).await?; - f.write_all(file).await?; - - Ok(()) - } - - /// Downloads a file's thumbnail. - /// - /// Here's an example on how it works: - /// - /// - Client requests an image with width=567, height=567 - /// - Server rounds that up to (800, 600), so it doesn't have to save too - /// many thumbnails - /// - Server rounds that up again to (958, 600) to fix the aspect ratio - /// (only for width,height>96) - /// - Server creates the thumbnail and sends it to the user - /// - /// For width,height <= 96 the server uses another thumbnailing algorithm - /// which crops the image afterwards. - #[tracing::instrument(skip(self), name = "thumbnail", level = "debug")] - pub async fn get_thumbnail(&self, mxc: &Mxc<'_>, dim: &Dim) -> Result> { - // 0, 0 because that's the original file - let dim = dim.normalized(); - - match self.db.search_file_metadata(mxc, &dim).await { - | Ok(metadata) => self.get_thumbnail_saved(metadata).await, - | _ => match self.db.search_file_metadata(mxc, &Dim::default()).await { - | Ok(metadata) => self.get_thumbnail_generate(mxc, &dim, metadata).await, - | _ => Ok(None), - }, - } - } -} - -/// Using saved thumbnail -#[implement(super::Service)] -#[tracing::instrument(name = "saved", level = "debug", skip(self, data))] -async fn get_thumbnail_saved(&self, data: Metadata) -> Result> { - let mut content = Vec::new(); - let path = self.get_media_file(&data.key); - fs::File::open(path) - .await? - .read_to_end(&mut content) - .await?; - - Ok(Some(into_filemeta(data, content))) -} - -/// Generate a thumbnail -#[cfg(feature = "media_thumbnail")] -#[implement(super::Service)] -#[tracing::instrument(name = "generate", level = "debug", skip(self, data))] -async fn get_thumbnail_generate( - &self, - mxc: &Mxc<'_>, - dim: &Dim, - data: Metadata, -) -> Result> { - let mut content = Vec::new(); - let path = self.get_media_file(&data.key); - fs::File::open(path) - .await? - .read_to_end(&mut content) - .await?; - - let Ok(image) = image::load_from_memory(&content) else { - // Couldn't parse file to generate thumbnail, send original - return Ok(Some(into_filemeta(data, content))); - }; - - if dim.width > image.width() || dim.height > image.height() { - return Ok(Some(into_filemeta(data, content))); - } - - let mut thumbnail_bytes = Vec::new(); - let thumbnail = thumbnail_generate(&image, dim)?; - let mut cursor = std::io::Cursor::new(&mut thumbnail_bytes); - thumbnail - .write_to(&mut cursor, image::ImageFormat::Png) - .map_err(|error| err!(error!(?error, "Error writing PNG thumbnail.")))?; - - // Save thumbnail in database so we don't have to generate it again next time - let thumbnail_key = self.db.create_file_metadata( - mxc, - None, - dim, - data.content_disposition.as_ref(), - data.content_type.as_deref(), - )?; - - let mut f = self.create_media_file(&thumbnail_key).await?; - f.write_all(&thumbnail_bytes).await?; - - Ok(Some(into_filemeta(data, thumbnail_bytes))) -} - -#[cfg(not(feature = "media_thumbnail"))] -#[implement(super::Service)] -#[tracing::instrument(name = "fallback", level = "debug", skip_all)] -async fn get_thumbnail_generate( - &self, - _mxc: &Mxc<'_>, - _dim: &Dim, - data: Metadata, -) -> Result> { - self.get_thumbnail_saved(data).await -} - -#[cfg(feature = "media_thumbnail")] -fn thumbnail_generate( - image: &image::DynamicImage, - requested: &Dim, -) -> Result { - use image::imageops::FilterType; - - let thumbnail = if !requested.crop() { - let Dim { width, height, .. } = requested.scaled(&Dim { - width: image.width(), - height: image.height(), - ..Dim::default() - })?; - image.thumbnail_exact(width, height) - } else { - image.resize_to_fill(requested.width, requested.height, FilterType::CatmullRom) - }; - - Ok(thumbnail) -} - -fn into_filemeta(data: Metadata, content: Vec) -> FileMeta { - FileMeta { - content: Some(content), - content_type: data.content_type, - content_disposition: data.content_disposition, - } -} - -impl Dim { - /// Instantiate a Dim from Ruma integers with optional method. - pub fn from_ruma(width: UInt, height: UInt, method: Option) -> Result { - let width = width - .try_into() - .map_err(|e| err!(Request(InvalidParam("Width is invalid: {e:?}"))))?; - let height = height - .try_into() - .map_err(|e| err!(Request(InvalidParam("Height is invalid: {e:?}"))))?; - - Ok(Self::new(width, height, method)) - } - - /// Instantiate a Dim with optional method - #[inline] - #[must_use] - pub fn new(width: u32, height: u32, method: Option) -> Self { - Self { - width, - height, - method: method.unwrap_or(Method::Scale), - } - } - - pub fn scaled(&self, image: &Self) -> Result { - let image_width = image.width; - let image_height = image.height; - - let width = cmp::min(self.width, image_width); - let height = cmp::min(self.height, image_height); - - let use_width = Sat(width) * Sat(image_height) < Sat(height) * Sat(image_width); - - let x = if use_width { - let dividend = (Sat(height) * Sat(image_width)).0; - checked!(dividend / image_height)? - } else { - width - }; - - let y = if !use_width { - let dividend = (Sat(width) * Sat(image_height)).0; - checked!(dividend / image_width)? - } else { - height - }; - - Ok(Self { - width: x, - height: y, - method: Method::Scale, - }) - } - - /// Returns width, height of the thumbnail and whether it should be cropped. - /// Returns None when the server should send the original file. - /// Ignores the input Method. - #[must_use] - pub fn normalized(&self) -> Self { - match (self.width, self.height) { - | (0..=32, 0..=32) => Self::new(32, 32, Some(Method::Crop)), - | (0..=96, 0..=96) => Self::new(96, 96, Some(Method::Crop)), - | (0..=320, 0..=240) => Self::new(320, 240, Some(Method::Scale)), - | (0..=640, 0..=480) => Self::new(640, 480, Some(Method::Scale)), - | (0..=800, 0..=600) => Self::new(800, 600, Some(Method::Scale)), - | _ => Self::default(), - } - } - - /// Returns true if the method is Crop. - #[inline] - #[must_use] - pub fn crop(&self) -> bool { self.method == Method::Crop } -} - -impl Default for Dim { - #[inline] - fn default() -> Self { - Self { - width: 0, - height: 0, - method: Method::Scale, - } - } -} diff --git a/src/service/migrations.rs b/src/service/migrations.rs deleted file mode 100644 index 512a7867..00000000 --- a/src/service/migrations.rs +++ /dev/null @@ -1,559 +0,0 @@ -use std::cmp; - -use conduwuit::{ - Err, Result, debug, debug_info, debug_warn, error, info, - result::NotFound, - utils::{ - IterStream, ReadyExt, - stream::{TryExpect, TryIgnore}, - }, - warn, -}; -use futures::{FutureExt, StreamExt}; -use itertools::Itertools; -use ruma::{ - OwnedUserId, RoomId, UserId, - events::{ - GlobalAccountDataEventType, push_rules::PushRulesEvent, room::member::MembershipState, - }, - push::Ruleset, -}; - -use crate::{Services, media}; - -/// The current schema version. -/// - If database is opened at greater version we reject with error. The -/// software must be updated for backward-incompatible changes. -/// - If database is opened at lesser version we apply migrations up to this. -/// Note that named-feature migrations may also be performed when opening at -/// equal or lesser version. These are expected to be backward-compatible. -pub(crate) const DATABASE_VERSION: u64 = 17; - -pub(crate) async fn migrations(services: &Services) -> Result<()> { - let users_count = services.users.count().await; - - // Matrix resource ownership is based on the server name; changing it - // requires recreating the database from scratch. - if users_count > 0 { - let server_user = &services.globals.server_user; - if !services.users.exists(server_user).await { - error!("The {server_user} server user does not exist, and the database is not new."); - return Err!(Database( - "Cannot reuse an existing database after changing the server name, please \ - delete the old one first.", - )); - } - } - - if users_count > 0 { - migrate(services).await - } else { - fresh(services).await - } -} - -async fn fresh(services: &Services) -> Result<()> { - let db = &services.db; - - services.globals.db.bump_database_version(DATABASE_VERSION); - - db["global"].insert(b"feat_sha256_media", []); - db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); - db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); - db["global"].insert(b"fix_referencedevents_missing_sep", []); - db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []); - - // Create the admin room and server user on first run - crate::admin::create_admin_room(services).boxed().await?; - - warn!("Created new RocksDB database with version {DATABASE_VERSION}"); - - Ok(()) -} - -/// Apply any migrations -async fn migrate(services: &Services) -> Result<()> { - let db = &services.db; - let config = &services.server.config; - - if services.globals.db.database_version().await < 11 { - return Err!(Database( - "Database schema version {} is no longer supported", - services.globals.db.database_version().await - )); - } - - if services.globals.db.database_version().await < 12 { - db_lt_12(services).await?; - } - - // This migration can be reused as-is anytime the server-default rules are - // updated. - if services.globals.db.database_version().await < 13 { - db_lt_13(services).await?; - } - - if db["global"].get(b"feat_sha256_media").await.is_not_found() { - media::migrations::migrate_sha256_media(services).await?; - } else if config.media_startup_check { - media::migrations::checkup_sha256_media(services).await?; - } - - if db["global"] - .get(b"fix_bad_double_separator_in_state_cache") - .await - .is_not_found() - { - fix_bad_double_separator_in_state_cache(services).await?; - } - - if db["global"] - .get(b"retroactively_fix_bad_data_from_roomuserid_joined") - .await - .is_not_found() - { - retroactively_fix_bad_data_from_roomuserid_joined(services).await?; - } - - if db["global"] - .get(b"fix_referencedevents_missing_sep") - .await - .is_not_found() - || services.globals.db.database_version().await < 17 - { - fix_referencedevents_missing_sep(services).await?; - } - - if db["global"] - .get(b"fix_readreceiptid_readreceipt_duplicates") - .await - .is_not_found() - || services.globals.db.database_version().await < 17 - { - fix_readreceiptid_readreceipt_duplicates(services).await?; - } - - if services.globals.db.database_version().await < 17 { - services.globals.db.bump_database_version(17); - info!("Migration: Bumped database version to 17"); - } - - assert_eq!( - services.globals.db.database_version().await, - DATABASE_VERSION, - "Failed asserting local database version {} is equal to known latest conduwuit database \ - version {}", - services.globals.db.database_version().await, - DATABASE_VERSION, - ); - - { - let patterns = services.globals.forbidden_usernames(); - if !patterns.is_empty() { - services - .users - .stream() - .filter(|user_id| services.users.is_active_local(user_id)) - .ready_for_each(|user_id| { - let matches = patterns.matches(user_id.localpart()); - if matches.matched_any() { - warn!( - "User {} matches the following forbidden username patterns: {}", - user_id.to_string(), - matches - .into_iter() - .map(|x| &patterns.patterns()[x]) - .join(", ") - ); - } - }) - .await; - } - } - - { - let patterns = services.globals.forbidden_alias_names(); - if !patterns.is_empty() { - for room_id in services - .rooms - .metadata - .iter_ids() - .map(ToOwned::to_owned) - .collect::>() - .await - { - services - .rooms - .alias - .local_aliases_for_room(&room_id) - .ready_for_each(|room_alias| { - let matches = patterns.matches(room_alias.alias()); - if matches.matched_any() { - warn!( - "Room with alias {} ({}) matches the following forbidden room \ - name patterns: {}", - room_alias, - &room_id, - matches - .into_iter() - .map(|x| &patterns.patterns()[x]) - .join(", ") - ); - } - }) - .await; - } - } - } - - info!("Loaded RocksDB database with schema version {DATABASE_VERSION}"); - - Ok(()) -} - -async fn db_lt_12(services: &Services) -> Result<()> { - for username in &services - .users - .list_local_users() - .map(UserId::to_owned) - .collect::>() - .await - { - let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) - { - | Ok(u) => u, - | Err(e) => { - warn!("Invalid username {username}: {e}"); - continue; - }, - }; - - let mut account_data: PushRulesEvent = services - .account_data - .get_global(&user, GlobalAccountDataEventType::PushRules) - .await - .expect("Username is invalid"); - - let rules_list = &mut account_data.content.global; - - //content rule - { - let content_rule_transformation = - [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; - - let rule = rules_list.content.get(content_rule_transformation[0]); - if rule.is_some() { - let mut rule = rule.unwrap().clone(); - content_rule_transformation[1].clone_into(&mut rule.rule_id); - rules_list - .content - .shift_remove(content_rule_transformation[0]); - rules_list.content.insert(rule); - } - } - - //underride rules - { - let underride_rule_transformation = [ - [".m.rules.call", ".m.rule.call"], - [".m.rules.room_one_to_one", ".m.rule.room_one_to_one"], - [".m.rules.encrypted_room_one_to_one", ".m.rule.encrypted_room_one_to_one"], - [".m.rules.message", ".m.rule.message"], - [".m.rules.encrypted", ".m.rule.encrypted"], - ]; - - for transformation in underride_rule_transformation { - let rule = rules_list.underride.get(transformation[0]); - if let Some(rule) = rule { - let mut rule = rule.clone(); - transformation[1].clone_into(&mut rule.rule_id); - rules_list.underride.shift_remove(transformation[0]); - rules_list.underride.insert(rule); - } - } - } - - services - .account_data - .update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) - .await?; - } - - services.globals.db.bump_database_version(12); - info!("Migration: 11 -> 12 finished"); - Ok(()) -} - -async fn db_lt_13(services: &Services) -> Result<()> { - for username in &services - .users - .list_local_users() - .map(UserId::to_owned) - .collect::>() - .await - { - let user = match UserId::parse_with_server_name(username.as_str(), &services.server.name) - { - | Ok(u) => u, - | Err(e) => { - warn!("Invalid username {username}: {e}"); - continue; - }, - }; - - let mut account_data: PushRulesEvent = services - .account_data - .get_global(&user, GlobalAccountDataEventType::PushRules) - .await - .expect("Username is invalid"); - - let user_default_rules = Ruleset::server_default(&user); - account_data - .content - .global - .update_with_server_default(user_default_rules); - - services - .account_data - .update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) - .await?; - } - - services.globals.db.bump_database_version(13); - info!("Migration: 12 -> 13 finished"); - Ok(()) -} - -async fn fix_bad_double_separator_in_state_cache(services: &Services) -> Result<()> { - warn!("Fixing bad double separator in state_cache roomuserid_joined"); - - let db = &services.db; - let roomuserid_joined = &db["roomuserid_joined"]; - let _cork = db.cork_and_sync(); - - let mut iter_count: usize = 0; - roomuserid_joined - .raw_stream() - .ignore_err() - .ready_for_each(|(key, value)| { - let mut key = key.to_vec(); - iter_count = iter_count.saturating_add(1); - debug_info!(%iter_count); - let first_sep_index = key - .iter() - .position(|&i| i == 0xFF) - .expect("found 0xFF delim"); - - if key - .iter() - .get(first_sep_index..=first_sep_index.saturating_add(1)) - .copied() - .collect_vec() - == vec![0xFF, 0xFF] - { - debug_warn!("Found bad key: {key:?}"); - roomuserid_joined.remove(&key); - - key.remove(first_sep_index); - debug_warn!("Fixed key: {key:?}"); - roomuserid_joined.insert(&key, value); - } - }) - .await; - - db.db.sort()?; - db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); - - info!("Finished fixing"); - Ok(()) -} - -async fn retroactively_fix_bad_data_from_roomuserid_joined(services: &Services) -> Result<()> { - warn!("Retroactively fixing bad data from broken roomuserid_joined"); - - let db = &services.db; - let _cork = db.cork_and_sync(); - - let room_ids = services - .rooms - .metadata - .iter_ids() - .map(ToOwned::to_owned) - .collect::>() - .await; - - for room_id in &room_ids { - debug_info!("Fixing room {room_id}"); - - let users_in_room: Vec = services - .rooms - .state_cache - .room_members(room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - let joined_members = users_in_room - .iter() - .stream() - .filter(|user_id| { - services - .rooms - .state_accessor - .get_member(room_id, user_id) - .map(|member| { - member.is_ok_and(|member| member.membership == MembershipState::Join) - }) - }) - .collect::>() - .await; - - let non_joined_members = users_in_room - .iter() - .stream() - .filter(|user_id| { - services - .rooms - .state_accessor - .get_member(room_id, user_id) - .map(|member| { - member.is_ok_and(|member| member.membership == MembershipState::Join) - }) - }) - .collect::>() - .await; - - for user_id in &joined_members { - debug_info!("User is joined, marking as joined"); - services.rooms.state_cache.mark_as_joined(user_id, room_id); - } - - for user_id in &non_joined_members { - debug_info!("User is left or banned, marking as left"); - services.rooms.state_cache.mark_as_left(user_id, room_id); - } - } - - for room_id in &room_ids { - debug_info!( - "Updating joined count for room {room_id} to fix servers in room after correcting \ - membership states" - ); - - services - .rooms - .state_cache - .update_joined_count(room_id) - .await; - } - - db.db.sort()?; - db["global"].insert(b"retroactively_fix_bad_data_from_roomuserid_joined", []); - - info!("Finished fixing"); - Ok(()) -} - -async fn fix_referencedevents_missing_sep(services: &Services) -> Result { - warn!("Fixing missing record separator between room_id and event_id in referencedevents"); - - let db = &services.db; - let cork = db.cork_and_sync(); - - let referencedevents = db["referencedevents"].clone(); - - let totals: (usize, usize) = (0, 0); - let (total, fixed) = referencedevents - .raw_stream() - .expect_ok() - .enumerate() - .ready_fold(totals, |mut a, (i, (key, val))| { - debug_assert!(val.is_empty(), "expected no value"); - - let has_sep = key.contains(&database::SEP); - - if !has_sep { - let key_str = std::str::from_utf8(key).expect("key not utf-8"); - let room_id_len = key_str.find('$').expect("missing '$' in key"); - let (room_id, event_id) = key_str.split_at(room_id_len); - debug!(?a, "fixing {room_id}, {event_id}"); - - let new_key = (room_id, event_id); - referencedevents.put_raw(new_key, val); - referencedevents.remove(key); - } - - a.0 = cmp::max(i, a.0); - a.1 = a.1.saturating_add((!has_sep).into()); - a - }) - .await; - - drop(cork); - info!(?total, ?fixed, "Fixed missing record separators in 'referencedevents'."); - - db["global"].insert(b"fix_referencedevents_missing_sep", []); - db.db.sort() -} - -async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result { - use conduwuit::arrayvec::ArrayString; - use ruma::identifiers_validation::MAX_BYTES; - - type ArrayId = ArrayString; - type Key<'a> = (&'a RoomId, u64, &'a UserId); - - warn!("Fixing undeleted entries in readreceiptid_readreceipt..."); - - let db = &services.db; - let cork = db.cork_and_sync(); - let readreceiptid_readreceipt = db["readreceiptid_readreceipt"].clone(); - - let mut cur_room: Option = None; - let mut cur_user: Option = None; - let (mut total, mut fixed): (usize, usize) = (0, 0); - readreceiptid_readreceipt - .keys() - .expect_ok() - .ready_for_each(|key: Key<'_>| { - let (room_id, _, user_id) = key; - let last_room = cur_room.replace( - room_id - .as_str() - .try_into() - .expect("invalid room_id in database"), - ); - - let last_user = cur_user.replace( - user_id - .as_str() - .try_into() - .expect("invalid user_id in database"), - ); - - let is_dup = cur_room == last_room && cur_user == last_user; - if is_dup { - readreceiptid_readreceipt.del(key); - } - - fixed = fixed.saturating_add(is_dup.into()); - total = total.saturating_add(1); - }) - .await; - - drop(cork); - info!(?total, ?fixed, "Fixed undeleted entries in readreceiptid_readreceipt."); - - db["global"].insert(b"fix_readreceiptid_readreceipt_duplicates", []); - db.db.sort() -} diff --git a/src/service/mod.rs b/src/service/mod.rs index eb15e5ec..4199b423 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,41 +1,306 @@ -#![type_length_limit = "8192"] -#![allow(refining_impl_trait)] +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, Mutex as StdMutex}, +}; -mod manager; -mod migrations; -mod service; -pub mod services; +use lru_cache::LruCache; +use tokio::sync::{broadcast, Mutex, RwLock}; -pub mod account_data; -pub mod admin; -pub mod announcements; -pub mod appservice; -pub mod client; -pub mod config; -pub mod emergency; -pub mod federation; -pub mod globals; -pub mod key_backups; -pub mod media; -pub mod moderation; -pub mod presence; -pub mod pusher; -pub mod resolver; -pub mod rooms; -pub mod sending; -pub mod server_keys; -pub mod sync; -pub mod transaction_ids; -pub mod uiaa; -pub mod users; +use crate::{Config, Result}; -extern crate conduwuit_core as conduwuit; -extern crate conduwuit_database as database; +pub(crate) mod account_data; +pub(crate) mod admin; +pub(crate) mod appservice; +pub(crate) mod globals; +pub(crate) mod key_backups; +pub(crate) mod media; +pub(crate) mod pdu; +pub(crate) mod presence; +pub(crate) mod pusher; +pub(crate) mod rooms; +pub(crate) mod sending; +pub(crate) mod transaction_ids; +pub(crate) mod uiaa; +pub(crate) mod users; -pub(crate) use service::{Args, Dep, Service}; +pub struct Services<'a> { + pub appservice: appservice::Service, + pub pusher: pusher::Service, + pub rooms: rooms::Service, + pub transaction_ids: transaction_ids::Service, + pub uiaa: uiaa::Service, + pub users: users::Service, + pub account_data: account_data::Service, + pub presence: Arc, + pub admin: Arc, + pub globals: globals::Service<'a>, + pub key_backups: key_backups::Service, + pub media: media::Service, + pub sending: Arc, +} -pub use crate::services::Services; +impl Services<'_> { + pub fn build< + D: appservice::Data + + pusher::Data + + rooms::Data + + transaction_ids::Data + + uiaa::Data + + users::Data + + account_data::Data + + presence::Data + + globals::Data + + key_backups::Data + + media::Data + + sending::Data + + 'static, + >( + db: &'static D, config: &Config, + tracing_reload_handle: tracing_subscriber::reload::Handle< + tracing_subscriber::EnvFilter, + tracing_subscriber::Registry, + >, + ) -> Result { + Ok(Self { + appservice: appservice::Service::build(db)?, + pusher: pusher::Service { + db, + }, + rooms: rooms::Service { + alias: rooms::alias::Service { + db, + }, + auth_chain: rooms::auth_chain::Service { + db, + }, + directory: rooms::directory::Service { + db, + }, + event_handler: rooms::event_handler::Service, + lazy_loading: rooms::lazy_loading::Service { + db, + lazy_load_waiting: Mutex::new(HashMap::new()), + }, + metadata: rooms::metadata::Service { + db, + }, + outlier: rooms::outlier::Service { + db, + }, + pdu_metadata: rooms::pdu_metadata::Service { + db, + }, + read_receipt: rooms::read_receipt::Service { + db, + }, + search: rooms::search::Service { + db, + }, + short: rooms::short::Service { + db, + }, + state: rooms::state::Service { + db, + }, + state_accessor: rooms::state_accessor::Service { + db, + server_visibility_cache: StdMutex::new(LruCache::new( + (f64::from(config.server_visibility_cache_capacity) * config.conduit_cache_capacity_modifier) + as usize, + )), + user_visibility_cache: StdMutex::new(LruCache::new( + (f64::from(config.user_visibility_cache_capacity) * config.conduit_cache_capacity_modifier) + as usize, + )), + }, + state_cache: rooms::state_cache::Service { + db, + }, + state_compressor: rooms::state_compressor::Service { + db, + stateinfo_cache: StdMutex::new(LruCache::new( + (f64::from(config.stateinfo_cache_capacity) * config.conduit_cache_capacity_modifier) as usize, + )), + }, + timeline: rooms::timeline::Service { + db, + lasttimelinecount_cache: Mutex::new(HashMap::new()), + }, + threads: rooms::threads::Service { + db, + }, + typing: rooms::typing::Service { + typing: RwLock::new(BTreeMap::new()), + last_typing_update: RwLock::new(BTreeMap::new()), + typing_update_sender: broadcast::channel(100).0, + }, + spaces: rooms::spaces::Service { + roomid_spacehierarchy_cache: Mutex::new(LruCache::new( + (f64::from(config.roomid_spacehierarchy_cache_capacity) + * config.conduit_cache_capacity_modifier) as usize, + )), + }, + user: rooms::user::Service { + db, + }, + }, + transaction_ids: transaction_ids::Service { + db, + }, + uiaa: uiaa::Service { + db, + }, + users: users::Service { + db, + connections: StdMutex::new(BTreeMap::new()), + }, + account_data: account_data::Service { + db, + }, + presence: presence::Service::build(db, config), + admin: admin::Service::build(), + key_backups: key_backups::Service { + db, + }, + media: media::Service { + db, + url_preview_mutex: RwLock::new(HashMap::new()), + }, + sending: sending::Service::build(db, config), -conduwuit::mod_ctor! {} -conduwuit::mod_dtor! {} -conduwuit::rustc_flags_capture! {} + globals: globals::Service::load(db, config, tracing_reload_handle)?, + }) + } + + async fn memory_usage(&self) -> String { + let lazy_load_waiting = self.rooms.lazy_loading.lazy_load_waiting.lock().await.len(); + let server_visibility_cache = self + .rooms + .state_accessor + .server_visibility_cache + .lock() + .unwrap() + .len(); + let user_visibility_cache = self + .rooms + .state_accessor + .user_visibility_cache + .lock() + .unwrap() + .len(); + let stateinfo_cache = self + .rooms + .state_compressor + .stateinfo_cache + .lock() + .unwrap() + .len(); + let lasttimelinecount_cache = self + .rooms + .timeline + .lasttimelinecount_cache + .lock() + .await + .len(); + let roomid_spacehierarchy_cache = self + .rooms + .spaces + .roomid_spacehierarchy_cache + .lock() + .await + .len(); + let resolver_overrides_cache = self.globals.resolver.overrides.read().unwrap().len(); + let resolver_destinations_cache = self.globals.resolver.destinations.read().await.len(); + let servername_ratelimiter = self.globals.servername_ratelimiter.read().await.len(); + let bad_event_ratelimiter = self.globals.bad_event_ratelimiter.read().await.len(); + let bad_query_ratelimiter = self.globals.bad_query_ratelimiter.read().await.len(); + let bad_signature_ratelimiter = self.globals.bad_signature_ratelimiter.read().await.len(); + + format!( + "\ +lazy_load_waiting: {lazy_load_waiting} +server_visibility_cache: {server_visibility_cache} +user_visibility_cache: {user_visibility_cache} +stateinfo_cache: {stateinfo_cache} +lasttimelinecount_cache: {lasttimelinecount_cache} +roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache} +resolver_overrides_cache: {resolver_overrides_cache} +resolver_destinations_cache: {resolver_destinations_cache} +servername_ratelimiter: {servername_ratelimiter} +bad_event_ratelimiter: {bad_event_ratelimiter} +bad_query_ratelimiter: {bad_query_ratelimiter} +bad_signature_ratelimiter: {bad_signature_ratelimiter} +" + ) + } + + async fn clear_caches(&self, amount: u32) { + if amount > 0 { + self.rooms + .lazy_loading + .lazy_load_waiting + .lock() + .await + .clear(); + } + if amount > 1 { + self.rooms + .state_accessor + .server_visibility_cache + .lock() + .unwrap() + .clear(); + } + if amount > 2 { + self.rooms + .state_accessor + .user_visibility_cache + .lock() + .unwrap() + .clear(); + } + if amount > 3 { + self.rooms + .state_compressor + .stateinfo_cache + .lock() + .unwrap() + .clear(); + } + if amount > 4 { + self.rooms + .timeline + .lasttimelinecount_cache + .lock() + .await + .clear(); + } + if amount > 5 { + self.rooms + .spaces + .roomid_spacehierarchy_cache + .lock() + .await + .clear(); + } + if amount > 6 { + self.globals.resolver.overrides.write().unwrap().clear(); + self.globals.resolver.destinations.write().await.clear(); + } + if amount > 7 { + self.globals.resolver.resolver.clear_cache(); + } + if amount > 8 { + self.globals.servername_ratelimiter.write().await.clear(); + } + if amount > 9 { + self.globals.bad_event_ratelimiter.write().await.clear(); + } + if amount > 10 { + self.globals.bad_query_ratelimiter.write().await.clear(); + } + if amount > 11 { + self.globals.bad_signature_ratelimiter.write().await.clear(); + } + } +} diff --git a/src/service/moderation.rs b/src/service/moderation.rs deleted file mode 100644 index c3e55a1d..00000000 --- a/src/service/moderation.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::sync::Arc; - -use conduwuit::{Result, implement}; -use ruma::ServerName; - -use crate::{Dep, config}; - -pub struct Service { - services: Services, -} - -struct Services { - // pub server: Arc, - pub config: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - // server: args.server.clone(), - config: args.depend::("config"), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -#[must_use] -pub fn is_remote_server_ignored(&self, server_name: &ServerName) -> bool { - // We must never block federating with ourselves - if server_name == self.services.config.server_name { - return false; - } - - self.services - .config - .ignore_messages_from_server_names - .is_match(server_name.host()) -} - -#[implement(Service)] -#[must_use] -pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { - // We must never block federating with ourselves - if server_name == self.services.config.server_name { - return false; - } - - // Check if server is explicitly allowed - if self - .services - .config - .allowed_remote_server_names - .is_match(server_name.host()) - { - return false; - } - - // Check if server is explicitly forbidden - self.services - .config - .forbidden_remote_server_names - .is_match(server_name.host()) -} - -#[implement(Service)] -#[must_use] -pub fn is_remote_server_room_directory_forbidden(&self, server_name: &ServerName) -> bool { - // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) - // OR forbidden contains server - self.is_remote_server_forbidden(server_name) - || self - .services - .config - .forbidden_remote_room_directory_server_names - .is_match(server_name.host()) -} - -#[implement(Service)] -#[must_use] -pub fn is_remote_server_media_downloads_forbidden(&self, server_name: &ServerName) -> bool { - // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) - // OR forbidden contains server - self.is_remote_server_forbidden(server_name) - || self - .services - .config - .prevent_media_downloads_from - .is_match(server_name.host()) -} diff --git a/src/service/pdu.rs b/src/service/pdu.rs new file mode 100644 index 00000000..6dc965ff --- /dev/null +++ b/src/service/pdu.rs @@ -0,0 +1,394 @@ +use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; + +use ruma::{ + canonical_json::redact_content_in_place, + events::{ + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, + AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, + AnyTimelineEvent, StateEvent, TimelineEventType, + }, + serde::Raw, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, + OwnedUserId, RoomId, RoomVersionId, UInt, UserId, +}; +use serde::{Deserialize, Serialize}; +use serde_json::{ + json, + value::{to_raw_value, RawValue as RawJsonValue}, +}; +use tracing::warn; + +use crate::{services, Error}; + +/// Content hashes of a PDU. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct EventHash { + /// The SHA-256 hash. + pub sha256: String, +} + +#[derive(Clone, Deserialize, Serialize, Debug)] +pub struct PduEvent { + pub event_id: Arc, + pub room_id: OwnedRoomId, + pub sender: OwnedUserId, + #[serde(skip_serializing_if = "Option::is_none")] + pub origin: Option, + pub origin_server_ts: UInt, + #[serde(rename = "type")] + pub kind: TimelineEventType, + pub content: Box, + #[serde(skip_serializing_if = "Option::is_none")] + pub state_key: Option, + pub prev_events: Vec>, + pub depth: UInt, + pub auth_events: Vec>, + #[serde(skip_serializing_if = "Option::is_none")] + pub redacts: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub unsigned: Option>, + pub hashes: EventHash, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub signatures: Option>, // BTreeMap, BTreeMap> +} + +impl PduEvent { + #[tracing::instrument(skip(self))] + pub fn redact(&mut self, room_version_id: RoomVersionId, reason: &PduEvent) -> crate::Result<()> { + self.unsigned = None; + + let mut content = serde_json::from_str(self.content.get()) + .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; + redact_content_in_place(&mut content, &room_version_id, self.kind.to_string()) + .map_err(|e| Error::RedactionError(self.sender.server_name().to_owned(), e))?; + + self.unsigned = Some( + to_raw_value(&json!({ + "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works") + })) + .expect("to string always works"), + ); + + self.content = to_raw_value(&content).expect("to string always works"); + + Ok(()) + } + + pub fn remove_transaction_id(&mut self) -> crate::Result<()> { + if let Some(unsigned) = &self.unsigned { + let mut unsigned: BTreeMap> = serde_json::from_str(unsigned.get()) + .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; + unsigned.remove("transaction_id"); + self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); + } + + Ok(()) + } + + pub fn add_age(&mut self) -> crate::Result<()> { + let mut unsigned: BTreeMap> = self + .unsigned + .as_ref() + .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; + + unsigned.insert("age".to_owned(), to_raw_value(&1).unwrap()); + self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn to_sync_room_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + /// This only works for events that are also AnyRoomEvents. + #[tracing::instrument(skip(self))] + pub fn to_any_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + #[tracing::instrument(skip(self))] + pub fn to_room_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + #[tracing::instrument(skip(self))] + pub fn to_message_like_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &self.redacts { + json["redacts"] = json!(redacts); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + #[tracing::instrument(skip(self))] + pub fn to_state_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "room_id": self.room_id, + "state_key": self.state_key, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + #[tracing::instrument(skip(self))] + pub fn to_sync_state_event(&self) -> Raw { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "state_key": self.state_key, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + #[tracing::instrument(skip(self))] + pub fn to_stripped_state_event(&self) -> Raw { + let json = json!({ + "content": self.content, + "type": self.kind, + "sender": self.sender, + "state_key": self.state_key, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + #[tracing::instrument(skip(self))] + pub fn to_stripped_spacechild_state_event(&self) -> Raw { + let json = json!({ + "content": self.content, + "type": self.kind, + "sender": self.sender, + "state_key": self.state_key, + "origin_server_ts": self.origin_server_ts, + }); + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + #[tracing::instrument(skip(self))] + pub fn to_member_event(&self) -> Raw> { + let mut json = json!({ + "content": self.content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + "redacts": self.redacts, + "room_id": self.room_id, + "state_key": self.state_key, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Raw::from_value always works") + } + + /// This does not return a full `Pdu` it is only to satisfy ruma's types. + #[tracing::instrument] + pub fn convert_to_outgoing_federation_event(mut pdu_json: CanonicalJsonObject) -> Box { + if let Some(unsigned) = pdu_json + .get_mut("unsigned") + .and_then(|val| val.as_object_mut()) + { + unsigned.remove("transaction_id"); + } + + if let Some(room_id) = pdu_json + .get("room_id") + .and_then(|val| RoomId::parse(val.as_str()?).ok()) + { + if let Ok(room_version_id) = services().rooms.state.get_room_version(&room_id) { + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + _ => { + pdu_json.remove("event_id"); + }, + }; + } else { + pdu_json.remove("event_id"); + } + } else { + pdu_json.remove("event_id"); + } + + // TODO: another option would be to convert it to a canonical string to validate + // size and return a Result> + // serde_json::from_str::>( + // ruma::serde::to_canonical_json_string(pdu_json).expect("CanonicalJson is + // valid serde_json::Value"), ) + // .expect("Raw::from_value always works") + + to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value") + } + + pub fn from_id_val(event_id: &EventId, mut json: CanonicalJsonObject) -> Result { + json.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + + serde_json::from_value(serde_json::to_value(json).expect("valid JSON")) + } +} + +impl state_res::Event for PduEvent { + type Id = Arc; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { &self.room_id } + + fn sender(&self) -> &UserId { &self.sender } + + fn event_type(&self) -> &TimelineEventType { &self.kind } + + fn content(&self) -> &RawJsonValue { &self.content } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { MilliSecondsSinceUnixEpoch(self.origin_server_ts) } + + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + fn prev_events(&self) -> Box + '_> { Box::new(self.prev_events.iter()) } + + fn auth_events(&self) -> Box + '_> { Box::new(self.auth_events.iter()) } + + fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } +} + +// These impl's allow us to dedup state snapshots when resolving state +// for incoming events (federation/send/{txn}). +impl Eq for PduEvent {} +impl PartialEq for PduEvent { + fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } +} +impl PartialOrd for PduEvent { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} +impl Ord for PduEvent { + fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } +} + +/// Generates a correct eventId for the incoming pdu. +/// +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. +pub(crate) fn gen_event_id_canonical_json( + pdu: &RawJsonValue, room_version_id: &RoomVersionId, +) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + warn!("Error parsing incoming event {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = format!( + "${}", + // Anything higher than version3 behaves the same + ruma::signatures::reference_hash(&value, room_version_id).expect("ruma can calculate reference hashes") + ) + .try_into() + .expect("ruma's reference hashes are valid event ids"); + + Ok((event_id, value)) +} + +/// Build the start of a PDU in order to add it to the Database. +#[derive(Debug, Deserialize)] +pub struct PduBuilder { + #[serde(rename = "type")] + pub event_type: TimelineEventType, + pub content: Box, + pub unsigned: Option>, + pub state_key: Option, + pub redacts: Option>, +} diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index d7ef5175..6f0f58f8 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -1,186 +1,21 @@ -use std::sync::Arc; +use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; -use conduwuit::{ - Result, debug_warn, utils, - utils::{ReadyExt, stream::TryIgnore}, -}; -use database::{Deserialized, Json, Map}; -use futures::Stream; -use ruma::{UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; +use crate::Result; -use super::Presence; -use crate::{Dep, globals, users}; +pub trait Data: Send + Sync { + /// Returns the latest presence event for the given user. + fn get_presence(&self, user_id: &UserId) -> Result>; -pub(crate) struct Data { - presenceid_presence: Arc, - userid_presenceid: Arc, - services: Services, -} - -struct Services { - globals: Dep, - users: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - presenceid_presence: db["presenceid_presence"].clone(), - userid_presenceid: db["userid_presenceid"].clone(), - services: Services { - globals: args.depend::("globals"), - users: args.depend::("users"), - }, - } - } - - pub(super) async fn get_presence(&self, user_id: &UserId) -> Result<(u64, PresenceEvent)> { - let count = self - .userid_presenceid - .get(user_id) - .await - .deserialized::()?; - - let key = presenceid_key(count, user_id); - let bytes = self.presenceid_presence.get(&key).await?; - let event = Presence::from_json_bytes(&bytes)? - .to_presence_event(user_id, &self.services.users) - .await; - - Ok((count, event)) - } - - pub(super) async fn set_presence( - &self, - user_id: &UserId, - presence_state: &PresenceState, - currently_active: Option, - last_active_ago: Option, - status_msg: Option, - ) -> Result<()> { - let last_presence = self.get_presence(user_id).await; - let state_changed = match last_presence { - | Err(_) => true, - | Ok(ref presence) => presence.1.content.presence != *presence_state, - }; - - let status_msg_changed = match last_presence { - | Err(_) => true, - | Ok(ref last_presence) => { - let old_msg = last_presence - .1 - .content - .status_msg - .clone() - .unwrap_or_default(); - - let new_msg = status_msg.clone().unwrap_or_default(); - - new_msg != old_msg - }, - }; - - let now = utils::millis_since_unix_epoch(); - let last_last_active_ts = match last_presence { - | Err(_) => 0, - | Ok((_, ref presence)) => - now.saturating_sub(presence.content.last_active_ago.unwrap_or_default().into()), - }; - - let last_active_ts = match last_active_ago { - | None => now, - | Some(last_active_ago) => now.saturating_sub(last_active_ago.into()), - }; - - // TODO: tighten for state flicker? - if !status_msg_changed && !state_changed && last_active_ts < last_last_active_ts { - debug_warn!( - "presence spam {user_id:?} last_active_ts:{last_active_ts:?} < \ - {last_last_active_ts:?}", - ); - return Ok(()); - } - - let status_msg = if status_msg.as_ref().is_some_and(String::is_empty) { - None - } else { - status_msg - }; - - let presence = Presence::new( - presence_state.to_owned(), - currently_active.unwrap_or(false), - last_active_ts, - status_msg, - ); - - let count = self.services.globals.next_count()?; - let key = presenceid_key(count, user_id); - - self.presenceid_presence.raw_put(key, Json(presence)); - self.userid_presenceid.raw_put(user_id, count); - - if let Ok((last_count, _)) = last_presence { - let key = presenceid_key(last_count, user_id); - self.presenceid_presence.remove(&key); - } - - Ok(()) - } - - pub(super) async fn remove_presence(&self, user_id: &UserId) { - let Ok(count) = self - .userid_presenceid - .get(user_id) - .await - .deserialized::() - else { - return; - }; - - let key = presenceid_key(count, user_id); - self.presenceid_presence.remove(&key); - self.userid_presenceid.remove(user_id); - } - - #[inline] - pub(super) fn presence_since( - &self, - since: u64, - ) -> impl Stream + Send + '_ { - self.presenceid_presence - .raw_stream() - .ignore_err() - .ready_filter_map(move |(key, presence)| { - let (count, user_id) = presenceid_parse(key).ok()?; - (count > since).then_some((user_id, count, presence)) - }) - } -} - -#[inline] -fn presenceid_key(count: u64, user_id: &UserId) -> Vec { - let cap = size_of::().saturating_add(user_id.as_bytes().len()); - let mut key = Vec::with_capacity(cap); - key.extend_from_slice(&count.to_be_bytes()); - key.extend_from_slice(user_id.as_bytes()); - key -} - -#[inline] -fn presenceid_parse(key: &[u8]) -> Result<(u64, &UserId)> { - let (count, user_id) = key.split_at(8); - let user_id = user_id_from_bytes(user_id)?; - let count = utils::u64_from_u8(count); - - Ok((count, user_id)) -} - -/// Parses a `UserId` from bytes. -fn user_id_from_bytes(bytes: &[u8]) -> Result<&UserId> { - let str: &str = utils::str_from_bytes(bytes)?; - let user_id: &UserId = str.try_into()?; - - Ok(user_id) + /// Adds a presence event which will be saved until a new event replaces it. + fn set_presence( + &self, user_id: &UserId, presence_state: &PresenceState, currently_active: Option, + last_active_ago: Option, status_msg: Option, + ) -> Result<()>; + + /// Removes the presence record for the given user from the database. + fn remove_presence(&self, user_id: &UserId) -> Result<()>; + + /// Returns the most recent presence updates that happened after the event + /// with id `since`. + fn presence_since<'a>(&'a self, since: u64) -> Box)> + 'a>; } diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 8f646be6..2be8c981 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -1,117 +1,128 @@ mod data; -mod presence; use std::{sync::Arc, time::Duration}; -use async_trait::async_trait; -use conduwuit::{ - Error, Result, Server, checked, debug, debug_warn, error, result::LogErr, trace, +pub use data::Data; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::{ + events::presence::{PresenceEvent, PresenceEventContent}, + presence::PresenceState, + OwnedUserId, UInt, UserId, }; -use database::Database; -use futures::{Stream, StreamExt, TryFutureExt, stream::FuturesUnordered}; -use loole::{Receiver, Sender}; -use ruma::{OwnedUserId, UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; -use tokio::time::sleep; +use serde::{Deserialize, Serialize}; +use tokio::{sync::Mutex, time::sleep}; +use tracing::{debug, error}; -use self::{data::Data, presence::Presence}; -use crate::{Dep, globals, users}; +use crate::{services, utils, Config, Error, Result}; + +/// Represents data required to be kept in order to implement the presence +/// specification. +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Presence { + pub state: PresenceState, + pub currently_active: bool, + pub last_active_ts: u64, + pub status_msg: Option, +} + +impl Presence { + pub fn new(state: PresenceState, currently_active: bool, last_active_ts: u64, status_msg: Option) -> Self { + Self { + state, + currently_active, + last_active_ts, + status_msg, + } + } + + pub fn from_json_bytes_to_event(bytes: &[u8], user_id: &UserId) -> Result { + let presence = Self::from_json_bytes(bytes)?; + presence.to_presence_event(user_id) + } + + pub fn from_json_bytes(bytes: &[u8]) -> Result { + serde_json::from_slice(bytes).map_err(|_| Error::bad_database("Invalid presence data in database")) + } + + pub fn to_json_bytes(&self) -> Result> { + serde_json::to_vec(self).map_err(|_| Error::bad_database("Could not serialize Presence to JSON")) + } + + /// Creates a PresenceEvent from available data. + pub fn to_presence_event(&self, user_id: &UserId) -> Result { + let now = utils::millis_since_unix_epoch(); + let last_active_ago = if self.currently_active { + None + } else { + Some(UInt::new_saturating(now.saturating_sub(self.last_active_ts))) + }; + + Ok(PresenceEvent { + sender: user_id.to_owned(), + content: PresenceEventContent { + presence: self.state.clone(), + status_msg: self.status_msg.clone(), + currently_active: Some(self.currently_active), + last_active_ago, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, + }, + }) + } +} pub struct Service { - timer_channel: (Sender, Receiver), + pub db: &'static dyn Data, + pub timer_sender: loole::Sender<(OwnedUserId, Duration)>, + timer_receiver: Mutex>, timeout_remote_users: bool, - idle_timeout: u64, - offline_timeout: u64, - db: Data, - services: Services, -} - -struct Services { - server: Arc, - db: Arc, - globals: Dep, - users: Dep, -} - -type TimerType = (OwnedUserId, Duration); - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let idle_timeout_s = config.presence_idle_timeout_s; - let offline_timeout_s = config.presence_offline_timeout_s; - Ok(Arc::new(Self { - timer_channel: loole::unbounded(), - timeout_remote_users: config.presence_timeout_remote_users, - idle_timeout: checked!(idle_timeout_s * 1_000)?, - offline_timeout: checked!(offline_timeout_s * 1_000)?, - db: Data::new(&args), - services: Services { - server: args.server.clone(), - db: args.db.clone(), - globals: args.depend::("globals"), - users: args.depend::("users"), - }, - })) - } - - async fn worker(self: Arc) -> Result<()> { - let receiver = self.timer_channel.1.clone(); - - let mut presence_timers = FuturesUnordered::new(); - while !receiver.is_closed() { - tokio::select! { - Some(user_id) = presence_timers.next() => { - self.process_presence_timer(&user_id).await.log_err().ok(); - }, - event = receiver.recv_async() => match event { - Err(_) => break, - Ok((user_id, timeout)) => { - debug!("Adding timer {}: {user_id} timeout:{timeout:?}", presence_timers.len()); - presence_timers.push(presence_timer(user_id, timeout)); - }, - }, - } - } - - Ok(()) - } - - fn interrupt(&self) { - let (timer_sender, _) = &self.timer_channel; - if !timer_sender.is_closed() { - timer_sender.close(); - } - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } impl Service { + pub fn build(db: &'static dyn Data, config: &Config) -> Arc { + let (timer_sender, timer_receiver) = loole::unbounded(); + + Arc::new(Self { + db, + timer_sender, + timer_receiver: Mutex::new(timer_receiver), + timeout_remote_users: config.presence_timeout_remote_users, + }) + } + + pub fn start_handler(self: &Arc) { + let self_ = Arc::clone(self); + tokio::spawn(async move { + self_ + .handler() + .await + .expect("Failed to start presence handler"); + }); + } + /// Returns the latest presence event for the given user. - #[inline] - pub async fn get_presence(&self, user_id: &UserId) -> Result { - self.db - .get_presence(user_id) - .map_ok(|(_, presence)| presence) - .await + pub fn get_presence(&self, user_id: &UserId) -> Result> { + if let Some((_, presence)) = self.db.get_presence(user_id)? { + Ok(Some(presence)) + } else { + Ok(None) + } } /// Pings the presence of the given user in the given room, setting the /// specified state. - pub async fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> { - const REFRESH_TIMEOUT: u64 = 60 * 1000; + pub fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> { + const REFRESH_TIMEOUT: u64 = 60 * 25 * 1000; - let last_presence = self.db.get_presence(user_id).await; + let last_presence = self.db.get_presence(user_id)?; let state_changed = match last_presence { - | Err(_) => true, - | Ok((_, ref presence)) => presence.content.presence != *new_state, + None => true, + Some((_, ref presence)) => presence.content.presence != *new_state, }; let last_last_active_ago = match last_presence { - | Err(_) => 0_u64, - | Ok((_, ref presence)) => - presence.content.last_active_ago.unwrap_or_default().into(), + None => 0_u64, + Some((_, ref presence)) => presence.content.last_active_ago.unwrap_or_default().into(), }; if !state_changed && last_last_active_ago < REFRESH_TIMEOUT { @@ -119,44 +130,35 @@ impl Service { } let status_msg = match last_presence { - | Ok((_, ref presence)) => presence.content.status_msg.clone(), - | Err(_) => Some(String::new()), + Some((_, ref presence)) => presence.content.status_msg.clone(), + None => Some(String::new()), }; let last_active_ago = UInt::new(0); let currently_active = *new_state == PresenceState::Online; self.set_presence(user_id, new_state, Some(currently_active), last_active_ago, status_msg) - .await } /// Adds a presence event which will be saved until a new event replaces it. - pub async fn set_presence( - &self, - user_id: &UserId, - state: &PresenceState, - currently_active: Option, - last_active_ago: Option, + pub fn set_presence( + &self, user_id: &UserId, state: &PresenceState, currently_active: Option, last_active_ago: Option, status_msg: Option, ) -> Result<()> { let presence_state = match state.as_str() { - | "" => &PresenceState::Offline, // default an empty string to 'offline' - | &_ => state, + "" => &PresenceState::Offline, // default an empty string to 'offline' + &_ => state, }; self.db - .set_presence(user_id, presence_state, currently_active, last_active_ago, status_msg) - .await?; + .set_presence(user_id, presence_state, currently_active, last_active_ago, status_msg)?; - if (self.timeout_remote_users || self.services.globals.user_is_local(user_id)) - && user_id != self.services.globals.server_user - { + if self.timeout_remote_users || user_id.server_name() == services().globals.server_name() { let timeout = match presence_state { - | PresenceState::Online => self.services.server.config.presence_idle_timeout_s, - | _ => self.services.server.config.presence_offline_timeout_s, + PresenceState::Online => services().globals.config.presence_idle_timeout_s, + _ => services().globals.config.presence_offline_timeout_s, }; - self.timer_channel - .0 + self.timer_sender .send((user_id.to_owned(), Duration::from_secs(timeout))) .map_err(|e| { error!("Failed to add presence timer: {}", e); @@ -168,116 +170,38 @@ impl Service { } /// Removes the presence record for the given user from the database. - /// - /// TODO: Why is this not used? - #[allow(dead_code)] - pub async fn remove_presence(&self, user_id: &UserId) { - self.db.remove_presence(user_id).await; - } - - // Unset online/unavailable presence to offline on startup - pub async fn unset_all_presence(&self) { - let _cork = self.services.db.cork(); - - for user_id in &self - .services - .users - .list_local_users() - .map(UserId::to_owned) - .collect::>() - .await - { - let presence = self.db.get_presence(user_id).await; - - let presence = match presence { - | Ok((_, ref presence)) => &presence.content, - | _ => continue, - }; - - if !matches!( - presence.presence, - PresenceState::Unavailable | PresenceState::Online | PresenceState::Busy - ) { - trace!(?user_id, ?presence, "Skipping user"); - continue; - } - - trace!(?user_id, ?presence, "Resetting presence to offline"); - - _ = self - .set_presence( - user_id, - &PresenceState::Offline, - Some(false), - presence.last_active_ago, - presence.status_msg.clone(), - ) - .await - .inspect_err(|e| { - debug_warn!( - ?presence, - "{user_id} has invalid presence in database and failed to reset it to \ - offline: {e}" - ); - }); - } - } + pub fn remove_presence(&self, user_id: &UserId) -> Result<()> { self.db.remove_presence(user_id) } /// Returns the most recent presence updates that happened after the event /// with id `since`. - pub fn presence_since( - &self, - since: u64, - ) -> impl Stream + Send + '_ { + pub fn presence_since(&self, since: u64) -> Box)>> { self.db.presence_since(since) } - #[inline] - pub async fn from_json_bytes_to_event( - &self, - bytes: &[u8], - user_id: &UserId, - ) -> Result { - let presence = Presence::from_json_bytes(bytes)?; - let event = presence - .to_presence_event(user_id, &self.services.users) - .await; + async fn handler(&self) -> Result<()> { + let mut presence_timers = FuturesUnordered::new(); + let receiver = self.timer_receiver.lock().await; + loop { + tokio::select! { + event = receiver.recv_async() => { - Ok(event) - } + match event { + Ok((user_id, timeout)) => { + debug!("Adding timer {}: {user_id} timeout:{timeout:?}", presence_timers.len()); + presence_timers.push(presence_timer(user_id, timeout)); + } + Err(e) => { + // generally shouldn't happen + error!("Failed to receive presence timer through channel: {e}"); + } + } + } - async fn process_presence_timer(&self, user_id: &OwnedUserId) -> Result<()> { - let mut presence_state = PresenceState::Offline; - let mut last_active_ago = None; - let mut status_msg = None; - - let presence_event = self.get_presence(user_id).await; - - if let Ok(presence_event) = presence_event { - presence_state = presence_event.content.presence; - last_active_ago = presence_event.content.last_active_ago; - status_msg = presence_event.content.status_msg; + Some(user_id) = presence_timers.next() => { + process_presence_timer(&user_id)?; + } + } } - - let new_state = match (&presence_state, last_active_ago.map(u64::from)) { - | (PresenceState::Online, Some(ago)) if ago >= self.idle_timeout => - Some(PresenceState::Unavailable), - | (PresenceState::Unavailable, Some(ago)) if ago >= self.offline_timeout => - Some(PresenceState::Offline), - | _ => None, - }; - - debug!( - "Processed presence timer for user '{user_id}': Old state = {presence_state}, New \ - state = {new_state:?}" - ); - - if let Some(new_state) = new_state { - self.set_presence(user_id, &new_state, Some(false), last_active_ago, status_msg) - .await?; - } - - Ok(()) } } @@ -286,3 +210,36 @@ async fn presence_timer(user_id: OwnedUserId, timeout: Duration) -> OwnedUserId user_id } + +fn process_presence_timer(user_id: &OwnedUserId) -> Result<()> { + let idle_timeout = services().globals.config.presence_idle_timeout_s * 1_000; + let offline_timeout = services().globals.config.presence_offline_timeout_s * 1_000; + + let mut presence_state = PresenceState::Offline; + let mut last_active_ago = None; + let mut status_msg = None; + + let presence_event = services().presence.get_presence(user_id)?; + + if let Some(presence_event) = presence_event { + presence_state = presence_event.content.presence; + last_active_ago = presence_event.content.last_active_ago; + status_msg = presence_event.content.status_msg; + } + + let new_state = match (&presence_state, last_active_ago.map(u64::from)) { + (PresenceState::Online, Some(ago)) if ago >= idle_timeout => Some(PresenceState::Unavailable), + (PresenceState::Unavailable, Some(ago)) if ago >= offline_timeout => Some(PresenceState::Offline), + _ => None, + }; + + debug!("Processed presence timer for user '{user_id}': Old state = {presence_state}, New state = {new_state:?}"); + + if let Some(new_state) = new_state { + services() + .presence + .set_presence(user_id, &new_state, Some(false), last_active_ago, status_msg)?; + } + + Ok(()) +} diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs deleted file mode 100644 index 3357bd61..00000000 --- a/src/service/presence/presence.rs +++ /dev/null @@ -1,63 +0,0 @@ -use conduwuit::{Error, Result, utils}; -use ruma::{ - UInt, UserId, - events::presence::{PresenceEvent, PresenceEventContent}, - presence::PresenceState, -}; -use serde::{Deserialize, Serialize}; - -use crate::users; - -/// Represents data required to be kept in order to implement the presence -/// specification. -#[derive(Serialize, Deserialize, Debug, Clone)] -pub(super) struct Presence { - state: PresenceState, - currently_active: bool, - last_active_ts: u64, - status_msg: Option, -} - -impl Presence { - #[must_use] - pub(super) fn new( - state: PresenceState, - currently_active: bool, - last_active_ts: u64, - status_msg: Option, - ) -> Self { - Self { - state, - currently_active, - last_active_ts, - status_msg, - } - } - - pub(super) fn from_json_bytes(bytes: &[u8]) -> Result { - serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence data in database")) - } - - /// Creates a PresenceEvent from available data. - pub(super) async fn to_presence_event( - &self, - user_id: &UserId, - users: &users::Service, - ) -> PresenceEvent { - let now = utils::millis_since_unix_epoch(); - let last_active_ago = Some(UInt::new_saturating(now.saturating_sub(self.last_active_ts))); - - PresenceEvent { - sender: user_id.to_owned(), - content: PresenceEventContent { - presence: self.state.clone(), - status_msg: self.status_msg.clone(), - currently_active: Some(self.currently_active), - last_active_ago, - displayname: users.displayname(user_id).await.ok(), - avatar_url: users.avatar_url(user_id).await.ok(), - }, - } - } -} diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs new file mode 100644 index 00000000..b58cd3fc --- /dev/null +++ b/src/service/pusher/data.rs @@ -0,0 +1,16 @@ +use ruma::{ + api::client::push::{set_pusher, Pusher}, + UserId, +}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()>; + + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; + + fn get_pushers(&self, sender: &UserId) -> Result>; + + fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a>; +} diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 27490fb8..70d303ca 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,235 +1,113 @@ -use std::{fmt::Debug, mem, sync::Arc}; +mod data; +use std::{fmt::Debug, mem}; use bytes::BytesMut; -use conduwuit::{ - Err, PduEvent, Result, debug_warn, err, trace, - utils::{stream::TryIgnore, string_from_bytes}, - warn, -}; -use database::{Deserialized, Ignore, Interfix, Json, Map}; -use futures::{Stream, StreamExt}; +pub use data::Data; use ipaddress::IPAddress; use ruma::{ - DeviceId, OwnedDeviceId, RoomId, UInt, UserId, api::{ - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - client::push::{Pusher, PusherKind, set_pusher}, + client::push::{set_pusher, Pusher, PusherKind}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ - AnySyncTimelineEvent, StateEventType, TimelineEventType, - room::power_levels::RoomPowerLevelsEventContent, - }, - push::{ - Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak, + room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, StateEventType, TimelineEventType, }, + push::{Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, - uint, + uint, RoomId, UInt, UserId, }; +use tracing::{debug, info, warn}; -use crate::{Dep, client, globals, rooms, sending, users}; +use crate::{services, Error, PduEvent, Result}; pub struct Service { - db: Data, - services: Services, -} - -struct Services { - globals: Dep, - client: Dep, - state_accessor: Dep, - state_cache: Dep, - users: Dep, - sending: Dep, -} - -struct Data { - senderkey_pusher: Arc, - pushkey_deviceid: Arc, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - senderkey_pusher: args.db["senderkey_pusher"].clone(), - pushkey_deviceid: args.db["pushkey_deviceid"].clone(), - }, - services: Services { - globals: args.depend::("globals"), - client: args.depend::("client"), - state_accessor: args - .depend::("rooms::state_accessor"), - state_cache: args.depend::("rooms::state_cache"), - users: args.depend::("users"), - sending: args.depend::("sending"), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, } impl Service { - pub async fn set_pusher( - &self, - sender: &UserId, - sender_device: &DeviceId, - pusher: &set_pusher::v3::PusherAction, - ) -> Result { - match pusher { - | set_pusher::v3::PusherAction::Post(data) => { - let pushkey = data.pusher.ids.pushkey.as_str(); - - if pushkey.len() > 512 { - return Err!(Request(InvalidParam( - "Push key length cannot be greater than 512 bytes." - ))); - } - - if data.pusher.ids.app_id.as_str().len() > 64 { - return Err!(Request(InvalidParam( - "App ID length cannot be greater than 64 bytes." - ))); - } - - // add some validation to the pusher URL - let pusher_kind = &data.pusher.kind; - if let PusherKind::Http(http) = pusher_kind { - let url = &http.url; - let url = url::Url::parse(&http.url).map_err(|e| { - err!(Request(InvalidParam( - warn!(%url, "HTTP pusher URL is not a valid URL: {e}") - ))) - })?; - - if ["http", "https"] - .iter() - .all(|&scheme| scheme != url.scheme().to_lowercase()) - { - return Err!(Request(InvalidParam( - warn!(%url, "HTTP pusher URL is not a valid HTTP/HTTPS URL") - ))); - } - - if let Ok(ip) = - IPAddress::parse(url.host_str().expect("URL previously validated")) - { - if !self.services.client.valid_cidr_range(&ip) { - return Err!(Request(InvalidParam( - warn!(%url, "HTTP pusher URL is a forbidden remote address") - ))); - } - } - } - - let pushkey = data.pusher.ids.pushkey.as_str(); - let key = (sender, pushkey); - self.db.senderkey_pusher.put(key, Json(pusher)); - self.db.pushkey_deviceid.insert(pushkey, sender_device); - }, - | set_pusher::v3::PusherAction::Delete(ids) => { - self.delete_pusher(sender, ids.pushkey.as_str()).await; - }, - } - - Ok(()) + pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { + self.db.set_pusher(sender, pusher) } - pub async fn delete_pusher(&self, sender: &UserId, pushkey: &str) { - let key = (sender, pushkey); - self.db.senderkey_pusher.del(key); - self.db.pushkey_deviceid.remove(pushkey); - - self.services - .sending - .cleanup_events(None, Some(sender), Some(pushkey)) - .await - .ok(); + pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + self.db.get_pusher(sender, pushkey) } - pub async fn get_pusher_device(&self, pushkey: &str) -> Result { - self.db.pushkey_deviceid.get(pushkey).await.deserialized() + pub fn get_pushers(&self, sender: &UserId) -> Result> { self.db.get_pushers(sender) } + + pub fn get_pushkeys(&self, sender: &UserId) -> Box>> { + self.db.get_pushkeys(sender) } - pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result { - let senderkey = (sender, pushkey); - self.db - .senderkey_pusher - .qry(&senderkey) - .await - .deserialized() - } - - pub async fn get_pushers(&self, sender: &UserId) -> Vec { - let prefix = (sender, Interfix); - self.db - .senderkey_pusher - .stream_prefix(&prefix) - .ignore_err() - .map(|(_, pusher): (Ignore, Pusher)| pusher) - .collect() - .await - } - - pub fn get_pushkeys<'a>( - &'a self, - sender: &'a UserId, - ) -> impl Stream + Send + 'a { - let prefix = (sender, Interfix); - self.db - .senderkey_pusher - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, pushkey): (Ignore, &str)| pushkey) - } - - #[tracing::instrument(skip(self, dest, request))] - pub async fn send_request(&self, dest: &str, request: T) -> Result + #[tracing::instrument(skip(self, destination, request))] + pub async fn send_request(&self, destination: &str, request: T) -> Result where - T: OutgoingRequest + Debug + Send, + T: OutgoingRequest + Debug, { - const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_0]; - - let dest = dest.replace(self.services.globals.notification_push_path(), ""); - trace!("Push gateway destination: {dest}"); + let destination = destination.replace(services().globals.notification_push_path(), ""); let http_request = request - .try_into_http_request::(&dest, SendAccessToken::IfRequired(""), &VERSIONS) + .try_into_http_request::(&destination, SendAccessToken::IfRequired(""), &[MatrixVersion::V1_0]) .map_err(|e| { - err!(BadServerResponse(warn!( - "Failed to find destination {dest} for push gateway: {e}" - ))) + warn!("Failed to find destination {}: {}", destination, e); + Error::BadServerResponse("Invalid destination") })? .map(BytesMut::freeze); let reqwest_request = reqwest::Request::try_from(http_request)?; - if let Some(url_host) = reqwest_request.url().host_str() { - trace!("Checking request URL for IP"); + // TODO: we could keep this very short and let expo backoff do it's thing... + //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + + let url = reqwest_request.url().clone(); + + if let Some(url_host) = url.host_str() { + debug!("Checking request URL for IP"); if let Ok(ip) = IPAddress::parse(url_host) { - if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Not allowed to send requests to this IP")); + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + for cidr in cidr_ranges { + if cidr.includes(&ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); + } } } } - let response = self.services.client.pusher.execute(reqwest_request).await; + let response = services() + .globals + .client + .pusher + .execute(reqwest_request) + .await; match response { - | Ok(mut response) => { + Ok(mut response) => { // reqwest::Response -> http::Response conversion - trace!("Checking response destination's IP"); + debug!("Checking response destination's IP"); if let Some(remote_addr) = response.remote_addr() { if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { - if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse( - "Not allowed to send requests to this IP" - )); + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + for cidr in cidr_ranges { + if cidr.includes(&ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); + } } } } @@ -245,13 +123,19 @@ impl Service { .expect("http::response::Builder is usable"), ); - let body = response.bytes().await?; // TODO: handle timeout + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout if !status.is_success() { - debug_warn!("Push gateway response body: {:?}", string_from_bytes(&body)); - return Err!(BadServerResponse(warn!( - "Push gateway {dest} returned unsuccessful HTTP response: {status}" - ))); + info!( + "Push gateway returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + crate::utils::string_from_bytes(&body) + ); } let response = T::IncomingResponse::try_from_http_response( @@ -259,14 +143,13 @@ impl Service { .body(body) .expect("reqwest body is valid http body"), ); - response.map_err(|e| { - err!(BadServerResponse(warn!( - "Push gateway {dest} returned invalid response: {e}" - ))) + response.map_err(|_| { + info!("Push gateway returned invalid response bytes {}\n{}", destination, url); + Error::BadServerResponse("Push gateway returned bad response.") }) }, - | Err(e) => { - warn!("Could not send request to pusher {dest}: {e}"); + Err(e) => { + warn!("Could not send request to pusher {}: {}", destination, e); Err(e.into()) }, } @@ -274,44 +157,35 @@ impl Service { #[tracing::instrument(skip(self, user, unread, pusher, ruleset, pdu))] pub async fn send_push_notice( - &self, - user: &UserId, - unread: UInt, - pusher: &Pusher, - ruleset: Ruleset, - pdu: &PduEvent, + &self, user: &UserId, unread: UInt, pusher: &Pusher, ruleset: Ruleset, pdu: &PduEvent, ) -> Result<()> { let mut notify = None; let mut tweaks = Vec::new(); - let power_levels: RoomPowerLevelsEventContent = self - .services + let power_levels: RoomPowerLevelsEventContent = services() + .rooms .state_accessor - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "") - .await - .and_then(|ev| { - serde_json::from_str(ev.content.get()).map_err(|e| { - err!(Database(error!("invalid m.room.power_levels event: {e:?}"))) - }) + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) }) + .transpose()? .unwrap_or_default(); - for action in self - .get_actions(user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id) - .await - { + for action in self.get_actions(user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id)? { let n = match action { - | Action::Notify => true, - | Action::SetTweak(tweak) => { + Action::Notify => true, + Action::SetTweak(tweak) => { tweaks.push(tweak.clone()); continue; }, - | _ => false, + _ => false, }; if notify.is_some() { - return Err!(Database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"# + return Err(Error::bad_database( + r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, )); } @@ -326,91 +200,51 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(self, user, ruleset, pdu), level = "debug")] - pub async fn get_actions<'a>( - &self, - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - ) -> &'a [Action] { + #[tracing::instrument(skip(self, user, ruleset, pdu))] + pub fn get_actions<'a>( + &self, user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, + pdu: &Raw, room_id: &RoomId, + ) -> Result<&'a [Action]> { let power_levels = PushConditionPowerLevelsCtx { users: power_levels.users.clone(), users_default: power_levels.users_default, notifications: power_levels.notifications.clone(), }; - let room_joined_count = self - .services - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(1) - .try_into() - .unwrap_or_else(|_| uint!(0)); - - let user_display_name = self - .services - .users - .displayname(user) - .await - .unwrap_or_else(|_| user.localpart().to_owned()); - let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), - member_count: room_joined_count, + member_count: UInt::from( + services() + .rooms + .state_cache + .room_joined_count(room_id)? + .unwrap_or(1) as u32, + ), user_id: user.to_owned(), - user_display_name, + user_display_name: services() + .users + .displayname(user)? + .unwrap_or_else(|| user.localpart().to_owned()), power_levels: Some(power_levels), }; - ruleset.get_actions(pdu, &ctx) + Ok(ruleset.get_actions(pdu, &ctx)) } #[tracing::instrument(skip(self, unread, pusher, tweaks, event))] - async fn send_notice( - &self, - unread: UInt, - pusher: &Pusher, - tweaks: Vec, - event: &PduEvent, - ) -> Result { + async fn send_notice(&self, unread: UInt, pusher: &Pusher, tweaks: Vec, event: &PduEvent) -> Result<()> { // TODO: email match &pusher.kind { - | PusherKind::Http(http) => { - let url = &http.url; - let url = url::Url::parse(&http.url).map_err(|e| { - err!(Request(InvalidParam( - warn!(%url, "HTTP pusher URL is not a valid URL: {e}") - ))) - })?; - - if ["http", "https"] - .iter() - .all(|&scheme| scheme != url.scheme().to_lowercase()) - { - return Err!(Request(InvalidParam( - warn!(%url, "HTTP pusher URL is not a valid HTTP/HTTPS URL") - ))); - } - - if let Ok(ip) = - IPAddress::parse(url.host_str().expect("URL previously validated")) - { - if !self.services.client.valid_cidr_range(&ip) { - return Err!(Request(InvalidParam( - warn!(%url, "HTTP pusher URL is a forbidden remote address") - ))); - } - } - - // TODO (timo): can pusher/devices have conflicting formats + PusherKind::Http(http) => { + // TODO: + // Two problems with this + // 1. if "event_id_only" is the only format kind it seems we should never add + // more info + // 2. can pusher/devices have conflicting formats let event_id_only = http.format == Some(PushFormat::EventIdOnly); - let mut device = - Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); - device.data.data.clone_from(&http.data); + let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); + device.data.default_payload = http.default_payload.clone(); device.data.format.clone_from(&http.format); // Tweaks are only added if the format is NOT event_id_only @@ -421,74 +255,45 @@ impl Service { let d = vec![device]; let mut notifi = Notification::new(d); + notifi.prio = NotificationPriority::Low; notifi.event_id = Some((*event.event_id).to_owned()); notifi.room_id = Some((*event.room_id).to_owned()); - if http - .data - .get("org.matrix.msc4076.disable_badge_count") - .is_none() && http.data.get("disable_badge_count").is_none() + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); + + if event.kind == TimelineEventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) { - notifi.counts = NotificationCounts::new(unread, uint!(0)); - } else { - // counts will not be serialised if it's the default (0, 0) - // skip_serializing_if = "NotificationCounts::is_default" - notifi.counts = NotificationCounts::default(); + notifi.prio = NotificationPriority::High; } if event_id_only { - self.send_request( - &http.url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) + .await?; } else { - if event.kind == TimelineEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High; - } else { - notifi.prio = NotificationPriority::Low; - } notifi.sender = Some(event.sender.clone()); notifi.event_type = Some(event.kind.clone()); notifi.content = serde_json::value::to_raw_value(&event.content).ok(); if event.kind == TimelineEventType::RoomMember { - notifi.user_is_target = - event.state_key.as_deref() == Some(event.sender.as_str()); + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - notifi.sender_display_name = - self.services.users.displayname(&event.sender).await.ok(); + notifi.sender_display_name = services().users.displayname(&event.sender)?; - notifi.room_name = self - .services - .state_accessor - .get_name(&event.room_id) - .await - .ok(); + notifi.room_name = services().rooms.state_accessor.get_name(&event.room_id)?; - notifi.room_alias = self - .services - .state_accessor - .get_canonical_alias(&event.room_id) - .await - .ok(); - - self.send_request( - &http.url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) + .await?; } Ok(()) }, // TODO: Handle email //PusherKind::Email(_) => Ok(()), - | _ => Ok(()), + _ => Ok(()), } } } diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs deleted file mode 100644 index 0151c4d7..00000000 --- a/src/service/resolver/actual.rs +++ /dev/null @@ -1,398 +0,0 @@ -use std::{ - fmt::Debug, - net::{IpAddr, SocketAddr}, -}; - -use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; -use futures::{FutureExt, TryFutureExt}; -use hickory_resolver::ResolveError; -use ipaddress::IPAddress; -use ruma::ServerName; - -use super::{ - cache::{CachedDest, CachedOverride, MAX_IPS}, - fed::{FedDest, PortString, add_port_to_hostname, get_ip_with_port}, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ActualDest { - pub(crate) dest: FedDest, - pub(crate) host: String, -} - -impl ActualDest { - #[inline] - pub(crate) fn string(&self) -> String { self.dest.https_string() } -} - -impl super::Service { - #[tracing::instrument(skip_all, level = "debug", name = "resolve")] - pub(crate) async fn get_actual_dest(&self, server_name: &ServerName) -> Result { - let (CachedDest { dest, host, .. }, _cached) = - self.lookup_actual_dest(server_name).await?; - - Ok(ActualDest { dest, host }) - } - - pub(crate) async fn lookup_actual_dest( - &self, - server_name: &ServerName, - ) -> Result<(CachedDest, bool)> { - if let Ok(result) = self.cache.get_destination(server_name).await { - return Ok((result, true)); - } - - let _dedup = self.resolving.lock(server_name.as_str()); - if let Ok(result) = self.cache.get_destination(server_name).await { - return Ok((result, true)); - } - - self.resolve_actual_dest(server_name, true) - .inspect_ok(|result| self.cache.set_destination(server_name, result)) - .map_ok(|result| (result, false)) - .boxed() - .await - } - - /// Returns: `actual_destination`, host header - /// Implemented according to the specification at - /// Numbers in comments below refer to bullet points in linked section of - /// specification - #[tracing::instrument(name = "actual", level = "debug", skip(self, cache))] - pub async fn resolve_actual_dest( - &self, - dest: &ServerName, - cache: bool, - ) -> Result { - self.validate_dest(dest)?; - let mut host = dest.as_str().to_owned(); - let actual_dest = match get_ip_with_port(dest.as_str()) { - | Some(host_port) => Self::actual_dest_1(host_port)?, - | None => - if let Some(pos) = dest.as_str().find(':') { - self.actual_dest_2(dest, cache, pos).await? - } else { - self.conditional_query_and_cache(dest.as_str(), 8448, true) - .await?; - self.services.server.check_running()?; - match self.request_well_known(dest.as_str()).await? { - | Some(delegated) => - self.actual_dest_3(&mut host, cache, delegated).await?, - | _ => match self.query_srv_record(dest.as_str()).await? { - | Some(overrider) => - self.actual_dest_4(&host, cache, overrider).await?, - | _ => self.actual_dest_5(dest, cache).await?, - }, - } - }, - }; - - // Can't use get_ip_with_port here because we don't want to add a port - // to an IP address if it wasn't specified - let host = if let Ok(addr) = host.parse::() { - FedDest::Literal(addr) - } else if let Ok(addr) = host.parse::() { - FedDest::Named(addr.to_string(), FedDest::default_port()) - } else if let Some(pos) = host.find(':') { - let (host, port) = host.split_at(pos); - FedDest::Named( - host.to_owned(), - port.try_into().unwrap_or_else(|_| FedDest::default_port()), - ) - } else { - FedDest::Named(host, FedDest::default_port()) - }; - - debug!("Actual destination: {actual_dest:?} hostname: {host:?}"); - Ok(CachedDest { - dest: actual_dest, - host: host.uri_string(), - expire: CachedDest::default_expire(), - }) - } - - fn actual_dest_1(host_port: FedDest) -> Result { - debug!("1: IP literal with provided or default port"); - Ok(host_port) - } - - async fn actual_dest_2(&self, dest: &ServerName, cache: bool, pos: usize) -> Result { - debug!("2: Hostname with included port"); - let (host, port) = dest.as_str().split_at(pos); - self.conditional_query_and_cache(host, port.parse::().unwrap_or(8448), cache) - .await?; - - Ok(FedDest::Named( - host.to_owned(), - port.try_into().unwrap_or_else(|_| FedDest::default_port()), - )) - } - - async fn actual_dest_3( - &self, - host: &mut String, - cache: bool, - delegated: String, - ) -> Result { - debug!("3: A .well-known file is available"); - *host = add_port_to_hostname(&delegated).uri_string(); - match get_ip_with_port(&delegated) { - | Some(host_and_port) => Self::actual_dest_3_1(host_and_port), - | None => - if let Some(pos) = delegated.find(':') { - self.actual_dest_3_2(cache, delegated, pos).await - } else { - trace!("Delegated hostname has no port in this branch"); - match self.query_srv_record(&delegated).await? { - | Some(overrider) => - self.actual_dest_3_3(cache, delegated, overrider).await, - | _ => self.actual_dest_3_4(cache, delegated).await, - } - }, - } - } - - fn actual_dest_3_1(host_and_port: FedDest) -> Result { - debug!("3.1: IP literal in .well-known file"); - Ok(host_and_port) - } - - async fn actual_dest_3_2( - &self, - cache: bool, - delegated: String, - pos: usize, - ) -> Result { - debug!("3.2: Hostname with port in .well-known file"); - let (host, port) = delegated.split_at(pos); - self.conditional_query_and_cache(host, port.parse::().unwrap_or(8448), cache) - .await?; - - Ok(FedDest::Named( - host.to_owned(), - port.try_into().unwrap_or_else(|_| FedDest::default_port()), - )) - } - - async fn actual_dest_3_3( - &self, - cache: bool, - delegated: String, - overrider: FedDest, - ) -> Result { - debug!("3.3: SRV lookup successful"); - let force_port = overrider.port(); - self.conditional_query_and_cache_override( - &delegated, - &overrider.hostname(), - force_port.unwrap_or(8448), - cache, - ) - .await?; - - if let Some(port) = force_port { - return Ok(FedDest::Named( - delegated, - format!(":{port}") - .as_str() - .try_into() - .unwrap_or_else(|_| FedDest::default_port()), - )); - } - - Ok(add_port_to_hostname(&delegated)) - } - - async fn actual_dest_3_4(&self, cache: bool, delegated: String) -> Result { - debug!("3.4: No SRV records, just use the hostname from .well-known"); - self.conditional_query_and_cache(&delegated, 8448, cache) - .await?; - Ok(add_port_to_hostname(&delegated)) - } - - async fn actual_dest_4( - &self, - host: &str, - cache: bool, - overrider: FedDest, - ) -> Result { - debug!("4: No .well-known; SRV record found"); - let force_port = overrider.port(); - self.conditional_query_and_cache_override( - host, - &overrider.hostname(), - force_port.unwrap_or(8448), - cache, - ) - .await?; - - if let Some(port) = force_port { - let port = format!(":{port}"); - - return Ok(FedDest::Named( - host.to_owned(), - PortString::from(port.as_str()).unwrap_or_else(|_| FedDest::default_port()), - )); - } - - Ok(add_port_to_hostname(host)) - } - - async fn actual_dest_5(&self, dest: &ServerName, cache: bool) -> Result { - debug!("5: No SRV record found"); - self.conditional_query_and_cache(dest.as_str(), 8448, cache) - .await?; - - Ok(add_port_to_hostname(dest.as_str())) - } - - #[inline] - async fn conditional_query_and_cache( - &self, - hostname: &str, - port: u16, - cache: bool, - ) -> Result { - self.conditional_query_and_cache_override(hostname, hostname, port, cache) - .await - } - - #[inline] - async fn conditional_query_and_cache_override( - &self, - untername: &str, - hostname: &str, - port: u16, - cache: bool, - ) -> Result { - if !cache { - return Ok(()); - } - - if self.cache.has_override(untername).await { - return Ok(()); - } - - self.query_and_cache_override(untername, hostname, port) - .await - } - - #[tracing::instrument(name = "ip", level = "debug", skip(self))] - async fn query_and_cache_override( - &self, - untername: &'_ str, - hostname: &'_ str, - port: u16, - ) -> Result { - self.services.server.check_running()?; - - debug!("querying IP for {untername:?} ({hostname:?}:{port})"); - match self.resolver.resolver.lookup_ip(hostname.to_owned()).await { - | Err(e) => Self::handle_resolve_error(&e, hostname), - | Ok(override_ip) => { - self.cache.set_override(untername, &CachedOverride { - ips: override_ip.into_iter().take(MAX_IPS).collect(), - port, - expire: CachedOverride::default_expire(), - overriding: (hostname != untername) - .then_some(hostname.into()) - .inspect(|_| debug_info!("{untername:?} overriden by {hostname:?}")), - }); - - Ok(()) - }, - } - } - - #[tracing::instrument(name = "srv", level = "debug", skip(self))] - async fn query_srv_record(&self, hostname: &'_ str) -> Result> { - let hostnames = - [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; - - for hostname in hostnames { - self.services.server.check_running()?; - - debug!("querying SRV for {hostname:?}"); - let hostname = hostname.trim_end_matches('.'); - match self.resolver.resolver.srv_lookup(hostname).await { - | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => { - return Ok(result.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()) - .as_str() - .try_into() - .unwrap_or_else(|_| FedDest::default_port()), - ) - })); - }, - } - } - - Ok(None) - } - - fn handle_resolve_error(e: &ResolveError, host: &'_ str) -> Result<()> { - use hickory_resolver::{ResolveErrorKind::Proto, proto::ProtoErrorKind}; - - match e.kind() { - | Proto(e) => match e.kind() { - | ProtoErrorKind::NoRecordsFound { .. } => { - // Raise to debug_warn if we can find out the result wasn't from cache - debug!(%host, "No DNS records found: {e}"); - Ok(()) - }, - | ProtoErrorKind::Timeout => { - Err!(warn!(%host, "DNS {e}")) - }, - | ProtoErrorKind::NoConnections => { - error!( - "Your DNS server is overloaded and has ran out of connections. It is \ - strongly recommended you remediate this issue to ensure proper \ - federation connectivity." - ); - - Err!(error!(%host, "DNS error: {e}")) - }, - | _ => Err!(error!(%host, "DNS error: {e}")), - }, - | _ => Err!(error!(%host, "DNS error: {e}")), - } - } - - fn validate_dest(&self, dest: &ServerName) -> Result<()> { - if dest == self.services.server.name && !self.services.server.config.federation_loopback { - return Err!("Won't send federation request to ourselves"); - } - - if dest.is_ip_literal() || IPAddress::is_valid(dest.host()) { - self.validate_dest_ip_literal(dest)?; - } - - Ok(()) - } - - fn validate_dest_ip_literal(&self, dest: &ServerName) -> Result<()> { - trace!("Destination is an IP literal, checking against IP range denylist.",); - debug_assert!( - dest.is_ip_literal() || !IPAddress::is_valid(dest.host()), - "Destination is not an IP literal." - ); - let ip = IPAddress::parse(dest.host()).map_err(|e| { - err!(BadServerResponse(debug_error!("Failed to parse IP literal from string: {e}"))) - })?; - - self.validate_ip(&ip)?; - - Ok(()) - } - - pub(crate) fn validate_ip(&self, ip: &IPAddress) -> Result<()> { - if !self.services.client.valid_cidr_range(ip) { - return Err!(BadServerResponse("Not allowed to send requests to this IP")); - } - - Ok(()) - } -} diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs deleted file mode 100644 index cfea7187..00000000 --- a/src/service/resolver/cache.rs +++ /dev/null @@ -1,158 +0,0 @@ -use std::{net::IpAddr, sync::Arc, time::SystemTime}; - -use conduwuit::{ - Result, - arrayvec::ArrayVec, - at, err, implement, - utils::{math::Expected, rand, stream::TryIgnore}, -}; -use database::{Cbor, Deserialized, Map}; -use futures::{Stream, StreamExt, future::join}; -use ruma::ServerName; -use serde::{Deserialize, Serialize}; - -use super::fed::FedDest; - -pub struct Cache { - destinations: Arc, - overrides: Arc, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct CachedDest { - pub dest: FedDest, - pub host: String, - pub expire: SystemTime, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct CachedOverride { - pub ips: IpAddrs, - pub port: u16, - pub expire: SystemTime, - pub overriding: Option, -} - -pub type IpAddrs = ArrayVec; -pub(crate) const MAX_IPS: usize = 3; - -impl Cache { - pub(super) fn new(args: &crate::Args<'_>) -> Arc { - Arc::new(Self { - destinations: args.db["servername_destination"].clone(), - overrides: args.db["servername_override"].clone(), - }) - } -} - -#[implement(Cache)] -pub async fn clear(&self) { join(self.clear_destinations(), self.clear_overrides()).await; } - -#[implement(Cache)] -pub async fn clear_destinations(&self) { self.destinations.clear().await; } - -#[implement(Cache)] -pub async fn clear_overrides(&self) { self.overrides.clear().await; } - -#[implement(Cache)] -pub fn del_destination(&self, name: &ServerName) { self.destinations.remove(name); } - -#[implement(Cache)] -pub fn del_override(&self, name: &ServerName) { self.overrides.remove(name); } - -#[implement(Cache)] -pub fn set_destination(&self, name: &ServerName, dest: &CachedDest) { - self.destinations.raw_put(name, Cbor(dest)); -} - -#[implement(Cache)] -pub fn set_override(&self, name: &str, over: &CachedOverride) { - self.overrides.raw_put(name, Cbor(over)); -} - -#[implement(Cache)] -#[must_use] -pub async fn has_destination(&self, destination: &ServerName) -> bool { - self.get_destination(destination).await.is_ok() -} - -#[implement(Cache)] -#[must_use] -pub async fn has_override(&self, destination: &str) -> bool { - self.get_override(destination) - .await - .iter() - .any(CachedOverride::valid) -} - -#[implement(Cache)] -pub async fn get_destination(&self, name: &ServerName) -> Result { - self.destinations - .get(name) - .await - .deserialized::>() - .map(at!(0)) - .into_iter() - .find(CachedDest::valid) - .ok_or(err!(Request(NotFound("Expired from cache")))) -} - -#[implement(Cache)] -pub async fn get_override(&self, name: &str) -> Result { - self.overrides - .get(name) - .await - .deserialized::>() - .map(at!(0)) -} - -#[implement(Cache)] -pub fn destinations(&self) -> impl Stream + Send + '_ { - self.destinations - .stream() - .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) -} - -#[implement(Cache)] -pub fn overrides(&self) -> impl Stream + Send + '_ { - self.overrides - .stream() - .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) -} - -impl CachedDest { - #[inline] - #[must_use] - pub fn valid(&self) -> bool { self.expire > SystemTime::now() } - - #[must_use] - pub(crate) fn default_expire() -> SystemTime { - rand::time_from_now_secs(60 * 60 * 18..60 * 60 * 36) - } - - #[inline] - #[must_use] - pub fn size(&self) -> usize { - self.dest - .size() - .expected_add(self.host.len()) - .expected_add(size_of_val(&self.expire)) - } -} - -impl CachedOverride { - #[inline] - #[must_use] - pub fn valid(&self) -> bool { self.expire > SystemTime::now() } - - #[must_use] - pub(crate) fn default_expire() -> SystemTime { - rand::time_from_now_secs(60 * 60 * 6..60 * 60 * 12) - } - - #[inline] - #[must_use] - pub fn size(&self) -> usize { size_of_val(self) } -} diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs deleted file mode 100644 index 3a0b2551..00000000 --- a/src/service/resolver/dns.rs +++ /dev/null @@ -1,158 +0,0 @@ -use std::{net::SocketAddr, sync::Arc, time::Duration}; - -use conduwuit::{Result, Server, err}; -use futures::FutureExt; -use hickory_resolver::{TokioResolver, lookup_ip::LookupIp}; -use reqwest::dns::{Addrs, Name, Resolve, Resolving}; - -use super::cache::{Cache, CachedOverride}; - -pub struct Resolver { - pub(crate) resolver: Arc, - pub(crate) hooked: Arc, - server: Arc, -} - -pub(crate) struct Hooked { - resolver: Arc, - cache: Arc, - server: Arc, -} - -type ResolvingResult = Result>; - -impl Resolver { - #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] - pub(super) fn build(server: &Arc, cache: Arc) -> Result> { - let config = &server.config; - let (sys_conf, mut opts) = hickory_resolver::system_conf::read_system_conf() - .map_err(|e| err!(error!("Failed to configure DNS resolver from system: {e}")))?; - - let mut conf = hickory_resolver::config::ResolverConfig::new(); - - if let Some(domain) = sys_conf.domain() { - conf.set_domain(domain.clone()); - } - - for sys_conf in sys_conf.search() { - conf.add_search(sys_conf.clone()); - } - - for sys_conf in sys_conf.name_servers() { - let mut ns = sys_conf.clone(); - - if config.query_over_tcp_only { - ns.protocol = hickory_resolver::proto::xfer::Protocol::Tcp; - } - - ns.trust_negative_responses = !config.query_all_nameservers; - - conf.add_name_server(ns); - } - - opts.cache_size = config.dns_cache_entries as usize; - opts.preserve_intermediates = true; - opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain)); - opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30)); - opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl)); - opts.positive_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 7)); - opts.timeout = Duration::from_secs(config.dns_timeout); - opts.attempts = config.dns_attempts as usize; - opts.try_tcp_on_error = config.dns_tcp_fallback; - opts.num_concurrent_reqs = 1; - opts.edns0 = true; - opts.case_randomization = true; - opts.ip_strategy = match config.ip_lookup_strategy { - | 1 => hickory_resolver::config::LookupIpStrategy::Ipv4Only, - | 2 => hickory_resolver::config::LookupIpStrategy::Ipv6Only, - | 3 => hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6, - | 4 => hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4, - | _ => hickory_resolver::config::LookupIpStrategy::Ipv4thenIpv6, - }; - - let rt_prov = hickory_resolver::proto::runtime::TokioRuntimeProvider::new(); - let conn_prov = hickory_resolver::name_server::TokioConnectionProvider::new(rt_prov); - let mut builder = TokioResolver::builder_with_config(conf, conn_prov); - *builder.options_mut() = opts; - let resolver = Arc::new(builder.build()); - - Ok(Arc::new(Self { - resolver: resolver.clone(), - hooked: Arc::new(Hooked { resolver, cache, server: server.clone() }), - server: server.clone(), - })) - } - - /// Clear the in-memory hickory-dns caches - #[inline] - pub fn clear_cache(&self) { self.resolver.clear_cache(); } -} - -impl Resolve for Resolver { - fn resolve(&self, name: Name) -> Resolving { - resolve_to_reqwest(self.server.clone(), self.resolver.clone(), name).boxed() - } -} - -impl Resolve for Hooked { - fn resolve(&self, name: Name) -> Resolving { - hooked_resolve(self.cache.clone(), self.server.clone(), self.resolver.clone(), name) - .boxed() - } -} - -#[tracing::instrument( - level = "debug", - skip_all, - fields(name = ?name.as_str()) -)] -async fn hooked_resolve( - cache: Arc, - server: Arc, - resolver: Arc, - name: Name, -) -> Result> { - match cache.get_override(name.as_str()).await { - | Ok(cached) if cached.valid() => cached_to_reqwest(cached).await, - | Ok(CachedOverride { overriding, .. }) if overriding.is_some() => - resolve_to_reqwest( - server, - resolver, - overriding - .as_deref() - .map(str::parse) - .expect("overriding is set for this record") - .expect("overriding is a valid internet name"), - ) - .boxed() - .await, - - | _ => resolve_to_reqwest(server, resolver, name).boxed().await, - } -} - -async fn resolve_to_reqwest( - server: Arc, - resolver: Arc, - name: Name, -) -> ResolvingResult { - use std::{io, io::ErrorKind::Interrupted}; - - let handle_shutdown = || Box::new(io::Error::new(Interrupted, "Server shutting down")); - let handle_results = - |results: LookupIp| Box::new(results.into_iter().map(|ip| SocketAddr::new(ip, 0))); - - tokio::select! { - results = resolver.lookup_ip(name.as_str()) => Ok(handle_results(results?)), - () = server.until_shutdown() => Err(handle_shutdown()), - } -} - -async fn cached_to_reqwest(cached: CachedOverride) -> ResolvingResult { - let addrs = cached - .ips - .into_iter() - .map(move |ip| SocketAddr::new(ip, cached.port)); - - Ok(Box::new(addrs)) -} diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs deleted file mode 100644 index e5bee9ac..00000000 --- a/src/service/resolver/fed.rs +++ /dev/null @@ -1,95 +0,0 @@ -use std::{ - borrow::Cow, - fmt, - net::{IpAddr, SocketAddr}, -}; - -use conduwuit::{arrayvec::ArrayString, utils::math::Expected}; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] -pub enum FedDest { - Literal(SocketAddr), - Named(String, PortString), -} - -/// numeric or service-name -pub type PortString = ArrayString<16>; - -const DEFAULT_PORT: &str = ":8448"; - -pub(crate) fn get_ip_with_port(dest_str: &str) -> Option { - if let Ok(dest) = dest_str.parse::() { - Some(FedDest::Literal(dest)) - } else if let Ok(ip_addr) = dest_str.parse::() { - Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) - } else { - None - } -} - -pub(crate) fn add_port_to_hostname(dest: &str) -> FedDest { - let (host, port) = match dest.find(':') { - | None => (dest, DEFAULT_PORT), - | Some(pos) => dest.split_at(pos), - }; - - FedDest::Named( - host.to_owned(), - PortString::from(port).unwrap_or_else(|_| FedDest::default_port()), - ) -} - -impl FedDest { - pub(crate) fn https_string(&self) -> String { - match self { - | Self::Literal(addr) => format!("https://{addr}"), - | Self::Named(host, port) => format!("https://{host}{port}"), - } - } - - pub(crate) fn uri_string(&self) -> String { - match self { - | Self::Literal(addr) => addr.to_string(), - | Self::Named(host, port) => format!("{host}{port}"), - } - } - - #[inline] - pub(crate) fn hostname(&self) -> Cow<'_, str> { - match &self { - | Self::Literal(addr) => addr.ip().to_string().into(), - | Self::Named(host, _) => host.into(), - } - } - - #[inline] - #[allow(clippy::string_slice)] - pub(crate) fn port(&self) -> Option { - match &self { - | Self::Literal(addr) => Some(addr.port()), - | Self::Named(_, port) => port[1..].parse().ok(), - } - } - - #[inline] - #[must_use] - pub fn default_port() -> PortString { - PortString::from(DEFAULT_PORT).expect("default port string") - } - - #[inline] - #[must_use] - pub fn size(&self) -> usize { - match self { - | Self::Literal(saddr) => size_of_val(saddr), - | Self::Named(host, port) => host.len().expected_add(port.capacity()), - } - } -} - -impl fmt::Display for FedDest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.uri_string().as_str()) - } -} diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs deleted file mode 100644 index c513cec9..00000000 --- a/src/service/resolver/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -pub mod actual; -pub mod cache; -mod dns; -pub mod fed; -#[cfg(test)] -mod tests; -mod well_known; - -use std::sync::Arc; - -use async_trait::async_trait; -use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; - -use self::{cache::Cache, dns::Resolver}; -use crate::{Dep, client}; - -pub struct Service { - pub cache: Arc, - pub resolver: Arc, - resolving: Resolving, - services: Services, -} - -struct Services { - server: Arc, - client: Dep, -} - -type Resolving = MutexMap; -type NameBuf = ArrayString<256>; - -#[async_trait] -impl crate::Service for Service { - #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] - fn build(args: crate::Args<'_>) -> Result> { - let cache = Cache::new(&args); - Ok(Arc::new(Self { - cache: cache.clone(), - resolver: Resolver::build(args.server, cache)?, - resolving: MutexMap::new(), - services: Services { - server: args.server.clone(), - client: args.depend::("client"), - }, - })) - } - - async fn clear_cache(&self) { - self.resolver.clear_cache(); - self.cache.clear().await; - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs deleted file mode 100644 index 068e08bd..00000000 --- a/src/service/resolver/tests.rs +++ /dev/null @@ -1,41 +0,0 @@ -use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; - -#[test] -fn ips_get_default_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1"), - Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("dead:beef::"), - Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) - ); -} - -#[test] -fn ips_keep_custom_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1:1234"), - Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("[dead::beef]:8933"), - Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) - ); -} - -#[test] -fn hostnames_get_default_ports() { - assert_eq!( - add_port_to_hostname("example.com"), - FedDest::Named(String::from("example.com"), ":8448".try_into().unwrap()) - ); -} - -#[test] -fn hostnames_keep_custom_ports() { - assert_eq!( - add_port_to_hostname("example.com:1337"), - FedDest::Named(String::from("example.com"), ":1337".try_into().unwrap()) - ); -} diff --git a/src/service/resolver/well_known.rs b/src/service/resolver/well_known.rs deleted file mode 100644 index 68a8e620..00000000 --- a/src/service/resolver/well_known.rs +++ /dev/null @@ -1,49 +0,0 @@ -use conduwuit::{Result, debug, debug_error, debug_info, debug_warn, implement, trace}; - -#[implement(super::Service)] -#[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] -pub(super) async fn request_well_known(&self, dest: &str) -> Result> { - trace!("Requesting well known for {dest}"); - let response = self - .services - .client - .well_known - .get(format!("https://{dest}/.well-known/matrix/server")) - .send() - .await; - - trace!("response: {response:?}"); - if let Err(e) = &response { - debug!("error: {e:?}"); - return Ok(None); - } - - let response = response?; - if !response.status().is_success() { - debug!("response not 2XX"); - return Ok(None); - } - - let text = response.text().await?; - trace!("response text: {text:?}"); - if text.len() >= 12288 { - debug_warn!("response contains junk"); - return Ok(None); - } - - let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); - - let m_server = body - .get("m.server") - .unwrap_or(&serde_json::Value::Null) - .as_str() - .unwrap_or_default(); - - if ruma::identifiers_validation::server_name::validate(m_server).is_err() { - debug_error!("response content missing or invalid"); - return Ok(None); - } - - debug_info!("{dest:?} found at {m_server:?}"); - Ok(Some(m_server.to_owned())) -} diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs new file mode 100644 index 00000000..095d6e66 --- /dev/null +++ b/src/service/rooms/alias/data.rs @@ -0,0 +1,22 @@ +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; + +use crate::Result; + +pub trait Data: Send + Sync { + /// Creates or updates the alias to the given room id. + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>; + + /// Forgets about an alias. Returns an error if the alias did not exist. + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; + + /// Looks up the roomid for the given alias. + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>; + + /// Returns all local aliases that point to the given room + fn local_aliases_for_room<'a>( + &'a self, room_id: &RoomId, + ) -> Box> + 'a>; + + /// Returns all local aliases on the server + fn all_local_aliases<'a>(&'a self) -> Box> + 'a>; +} diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 866e45a9..a52faefe 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,311 +1,35 @@ -mod remote; +mod data; -use std::sync::Arc; +pub use data::Data; +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; -use conduwuit::{ - Err, Result, Server, err, - utils::{ReadyExt, stream::TryIgnore}, -}; -use database::{Deserialized, Ignore, Interfix, Map}; -use futures::{Stream, StreamExt, TryFutureExt}; -use ruma::{ - OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, UserId, - events::{ - StateEventType, - room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - }, -}; - -use crate::{Dep, admin, appservice, appservice::RegistrationInfo, globals, rooms, sending}; +use crate::Result; pub struct Service { - db: Data, - services: Services, -} - -struct Data { - alias_userid: Arc, - alias_roomid: Arc, - aliasid_alias: Arc, -} - -struct Services { - server: Arc, - admin: Dep, - appservice: Dep, - globals: Dep, - sending: Dep, - state_accessor: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - alias_userid: args.db["alias_userid"].clone(), - alias_roomid: args.db["alias_roomid"].clone(), - aliasid_alias: args.db["aliasid_alias"].clone(), - }, - services: Services { - server: args.server.clone(), - admin: args.depend::("admin"), - appservice: args.depend::("appservice"), - globals: args.depend::("globals"), - sending: args.depend::("sending"), - state_accessor: args - .depend::("rooms::state_accessor"), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, } impl Service { #[tracing::instrument(skip(self))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: &RoomId, - user_id: &UserId, - ) -> Result<()> { - if alias == self.services.globals.admin_alias - && user_id != self.services.globals.server_user - { - return Err!(Request(Forbidden("Only the server user can set this alias"))); - } + pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { self.db.set_alias(alias, room_id) } - // Comes first as we don't want a stuck alias - self.db - .alias_userid - .insert(alias.alias().as_bytes(), user_id.as_bytes()); + #[tracing::instrument(skip(self))] + pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { self.db.remove_alias(alias) } - self.db - .alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes()); - - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xFF); - aliasid.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes()); - self.db.aliasid_alias.insert(&aliasid, alias.as_bytes()); - - Ok(()) + #[tracing::instrument(skip(self))] + pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { + self.db.resolve_local_alias(alias) } #[tracing::instrument(skip(self))] - pub async fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result<()> { - if !self.user_can_remove_alias(alias, user_id).await? { - return Err!(Request(Forbidden("User is not permitted to remove this alias."))); - } - - let alias = alias.alias(); - let Ok(room_id) = self.db.alias_roomid.get(&alias).await else { - return Err!(Request(NotFound("Alias does not exist or is invalid."))); - }; - - let prefix = (&room_id, Interfix); - self.db - .aliasid_alias - .keys_prefix_raw(&prefix) - .ignore_err() - .ready_for_each(|key| self.db.aliasid_alias.remove(key)) - .await; - - self.db.alias_roomid.remove(alias.as_bytes()); - self.db.alias_userid.remove(alias.as_bytes()); - - Ok(()) - } - - #[inline] - pub async fn resolve(&self, room: &RoomOrAliasId) -> Result { - self.resolve_with_servers(room, None) - .await - .map(|(room_id, _)| room_id) - } - - pub async fn resolve_with_servers( - &self, - room: &RoomOrAliasId, - servers: Option>, - ) -> Result<(OwnedRoomId, Vec)> { - if room.is_room_id() { - let room_id: &RoomId = room.try_into().expect("valid RoomId"); - Ok((room_id.to_owned(), servers.unwrap_or_default())) - } else { - let alias: &RoomAliasId = room.try_into().expect("valid RoomAliasId"); - self.resolve_alias(alias, servers).await - } - } - - #[tracing::instrument(skip(self), name = "resolve")] - pub async fn resolve_alias( - &self, - room_alias: &RoomAliasId, - servers: Option>, - ) -> Result<(OwnedRoomId, Vec)> { - let server_name = room_alias.server_name(); - let server_is_ours = self.services.globals.server_is_ours(server_name); - let servers_contains_ours = || { - servers - .as_ref() - .is_some_and(|servers| servers.contains(&self.services.server.name)) - }; - - if !server_is_ours && !servers_contains_ours() { - return self - .remote_resolve(room_alias, servers.unwrap_or_default()) - .await; - } - - let room_id = match self.resolve_local_alias(room_alias).await { - | Ok(r) => Some(r), - | Err(_) => self.resolve_appservice_alias(room_alias).await?, - }; - - room_id.map_or_else( - || Err!(Request(NotFound("Room with alias not found."))), - |room_id| Ok((room_id, Vec::new())), - ) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result { - self.db.alias_roomid.get(alias.alias()).await.deserialized() - } - - #[tracing::instrument(skip(self), level = "debug")] pub fn local_aliases_for_room<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .aliasid_alias - .stream_prefix(&prefix) - .ignore_err() - .map(|(_, alias): (Ignore, &RoomAliasId)| alias) + &'a self, room_id: &RoomId, + ) -> Box> + 'a> { + self.db.local_aliases_for_room(room_id) } - #[tracing::instrument(skip(self), level = "debug")] - pub fn all_local_aliases<'a>(&'a self) -> impl Stream + Send + 'a { - self.db - .alias_roomid - .stream() - .ignore_err() - .map(|(alias_localpart, room_id): (&str, &RoomId)| (room_id, alias_localpart)) - } - - async fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Result { - let room_id = self - .resolve_local_alias(alias) - .await - .map_err(|_| err!(Request(NotFound("Alias not found."))))?; - - let server_user = &self.services.globals.server_user; - - // The creator of an alias can remove it - if self - .who_created_alias(alias).await - .is_ok_and(|user| user == user_id) - // Server admins can remove any local alias - || self.services.admin.user_is_admin(user_id).await - // Always allow the server service account to remove the alias, since there may not be an admin room - || server_user == user_id - { - return Ok(true); - } - - // Checking whether the user is able to change canonical aliases of the room - if let Ok(power_levels) = self - .services - .state_accessor - .room_state_get_content::( - &room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .map_ok(RoomPowerLevels::from) - .await - { - return Ok( - power_levels.user_can_send_state(user_id, StateEventType::RoomCanonicalAlias) - ); - } - - // If there is no power levels event, only the room creator can change - // canonical aliases - if let Ok(event) = self - .services - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "") - .await - { - return Ok(event.sender == user_id); - } - - Err!(Database("Room has no m.room.create event")) - } - - async fn who_created_alias(&self, alias: &RoomAliasId) -> Result { - self.db.alias_userid.get(alias.alias()).await.deserialized() - } - - async fn resolve_appservice_alias( - &self, - room_alias: &RoomAliasId, - ) -> Result> { - use ruma::api::appservice::query::query_room_alias; - - for appservice in self.services.appservice.read().await.values() { - if appservice.aliases.is_match(room_alias.as_str()) - && matches!( - self.services - .sending - .send_appservice_request( - appservice.registration.clone(), - query_room_alias::v1::Request { room_alias: room_alias.to_owned() }, - ) - .await, - Ok(Some(_opt_result)) - ) { - return self - .resolve_local_alias(room_alias) - .await - .map_err(|_| err!(Request(NotFound("Room does not exist.")))) - .map(Some); - } - } - - Ok(None) - } - - pub async fn appservice_checks( - &self, - room_alias: &RoomAliasId, - appservice_info: &Option, - ) -> Result<()> { - if !self - .services - .globals - .server_is_ours(room_alias.server_name()) - { - return Err!(Request(InvalidParam("Alias is from another server."))); - } - - if let Some(info) = appservice_info { - if !info.aliases.is_match(room_alias.as_str()) { - return Err!(Request(Exclusive("Room alias is not in namespace."))); - } - } else if self - .services - .appservice - .is_exclusive_alias(room_alias) - .await - { - return Err!(Request(Exclusive("Room alias reserved by appservice."))); - } - - Ok(()) + #[tracing::instrument(skip(self))] + pub fn all_local_aliases<'a>(&'a self) -> Box> + 'a> { + self.db.all_local_aliases() } } diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs deleted file mode 100644 index 60aed76d..00000000 --- a/src/service/rooms/alias/remote.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::iter::once; - -use conduwuit::{Result, debug, debug_error, err, implement}; -use federation::query::get_room_information::v1::Response; -use ruma::{OwnedRoomId, OwnedServerName, RoomAliasId, ServerName, api::federation}; - -#[implement(super::Service)] -pub(super) async fn remote_resolve( - &self, - room_alias: &RoomAliasId, - servers: Vec, -) -> Result<(OwnedRoomId, Vec)> { - debug!(?room_alias, servers = ?servers, "resolve"); - let servers = once(room_alias.server_name()) - .map(ToOwned::to_owned) - .chain(servers.into_iter()); - - let mut resolved_servers = Vec::new(); - let mut resolved_room_id: Option = None; - for server in servers { - match self.remote_request(room_alias, &server).await { - | Err(e) => debug_error!("Failed to query for {room_alias:?} from {server}: {e}"), - | Ok(Response { room_id, servers }) => { - debug!( - "Server {server} answered with {room_id:?} for {room_alias:?} servers: \ - {servers:?}" - ); - - resolved_room_id.get_or_insert(room_id); - add_server(&mut resolved_servers, server); - - if !servers.is_empty() { - add_servers(&mut resolved_servers, servers); - break; - } - }, - } - } - - resolved_room_id - .map(|room_id| (room_id, resolved_servers)) - .ok_or_else(|| { - err!(Request(NotFound("No servers could assist in resolving the room alias"))) - }) -} - -#[implement(super::Service)] -async fn remote_request( - &self, - room_alias: &RoomAliasId, - server: &ServerName, -) -> Result { - use federation::query::get_room_information::v1::Request; - - let request = Request { room_alias: room_alias.to_owned() }; - - self.services - .sending - .send_federation_request(server, request) - .await -} - -fn add_servers(servers: &mut Vec, new: Vec) { - for server in new { - add_server(servers, server); - } -} - -fn add_server(servers: &mut Vec, server: OwnedServerName) { - if !servers.contains(&server) { - servers.push(server); - } -} diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 8c3588cc..f77d2d90 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,92 +1,8 @@ -use std::{ - mem::size_of, - sync::{Arc, Mutex}, -}; +use std::sync::Arc; -use conduwuit::{Err, Result, err, utils, utils::math::usize_from_f64}; -use database::Map; -use lru_cache::LruCache; +use crate::Result; -use crate::rooms::short::ShortEventId; - -pub(super) struct Data { - shorteventid_authchain: Arc, - pub(super) auth_chain_cache: Mutex, Arc<[ShortEventId]>>>, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - let config = &args.server.config; - let cache_size = f64::from(config.auth_chain_cache_capacity); - let cache_size = usize_from_f64(cache_size * config.cache_capacity_modifier) - .expect("valid cache size"); - Self { - shorteventid_authchain: db["shorteventid_authchain"].clone(), - auth_chain_cache: Mutex::new(LruCache::new(cache_size)), - } - } - - pub(super) async fn get_cached_eventid_authchain( - &self, - key: &[u64], - ) -> Result> { - debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); - - // Check RAM cache - if let Some(result) = self - .auth_chain_cache - .lock() - .expect("cache locked") - .get_mut(key) - { - return Ok(Arc::clone(result)); - } - - // We only save auth chains for single events in the db - if key.len() != 1 { - return Err!(Request(NotFound("auth_chain not cached"))); - } - - // Check database - let chain = self - .shorteventid_authchain - .qry(&key[0]) - .await - .map_err(|_| err!(Request(NotFound("auth_chain not found"))))?; - - let chain = chain - .chunks_exact(size_of::()) - .map(utils::u64_from_u8) - .collect::>(); - - // Cache in RAM - self.auth_chain_cache - .lock() - .expect("cache locked") - .insert(vec![key[0]], Arc::clone(&chain)); - - Ok(chain) - } - - pub(super) fn cache_auth_chain(&self, key: Vec, auth_chain: Arc<[ShortEventId]>) { - debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); - - // Only persist single events in db - if key.len() == 1 { - let key = key[0].to_be_bytes(); - let val = auth_chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(); - - self.shorteventid_authchain.insert(&key, &val); - } - - // Cache in RAM - self.auth_chain_cache - .lock() - .expect("cache locked") - .insert(key, auth_chain); - } +pub trait Data: Send + Sync { + fn get_cached_eventid_authchain(&self, shorteventid: &[u64]) -> Result>>; + fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc<[u64]>) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 0903ea75..f3bf50f8 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,257 +1,183 @@ mod data; - use std::{ - collections::{BTreeSet, HashSet, VecDeque}, - fmt::Debug, + collections::{BTreeSet, HashSet}, sync::Arc, - time::Instant, }; -use conduwuit::{ - Err, Result, at, debug, debug_error, implement, trace, - utils::{ - IterStream, - stream::{ReadyExt, TryBroadbandExt}, - }, - validated, warn, -}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; -use ruma::{EventId, OwnedEventId, RoomId}; +pub use data::Data; +use ruma::{api::client::error::ErrorKind, EventId, RoomId}; +use tracing::{debug, error, warn}; -use self::data::Data; -use crate::{Dep, rooms, rooms::short::ShortEventId}; +use crate::{services, Error, Result}; pub struct Service { - services: Services, - db: Data, + pub db: &'static dyn Data, } -struct Services { - short: Dep, - timeline: Dep, -} +impl Service { + pub async fn event_ids_iter<'a>( + &self, room_id: &RoomId, starting_events_: Vec>, + ) -> Result> + 'a> { + let mut starting_events: Vec<&EventId> = Vec::with_capacity(starting_events_.len()); + for starting_event in &starting_events_ { + starting_events.push(starting_event); + } -type Bucket<'a> = BTreeSet<(u64, &'a EventId)>; - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - short: args.depend::("rooms::short"), - timeline: args.depend::("rooms::timeline"), - }, - db: Data::new(&args), - })) + Ok(self + .get_auth_chain(room_id, &starting_events) + .await? + .into_iter() + .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} + pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result> { + const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? + const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); -#[implement(Service)] -pub fn event_ids_iter<'a, I>( - &'a self, - room_id: &'a RoomId, - starting_events: I, -) -> impl Stream> + Send + 'a -where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, -{ - self.get_auth_chain(room_id, starting_events) - .map_ok(|chain| { - self.services - .short - .multi_get_eventid_from_short(chain.into_iter().stream()) - .ready_filter(Result::is_ok) - }) - .try_flatten_stream() -} + let started = std::time::Instant::now(); + let mut buckets = [BUCKET; NUM_BUCKETS]; + for (i, short) in services() + .rooms + .short + .multi_get_or_create_shorteventid(starting_events)? + .iter() + .enumerate() + { + let bucket = short % NUM_BUCKETS as u64; + buckets[bucket as usize].insert((*short, starting_events[i])); + } -#[implement(Service)] -#[tracing::instrument(name = "auth_chain", level = "debug", skip_all)] -pub async fn get_auth_chain<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, -) -> Result> -where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, -{ - const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? - const BUCKET: Bucket<'_> = BTreeSet::new(); + debug!( + starting_events = ?starting_events.len(), + elapsed = ?started.elapsed(), + "start", + ); - let started = Instant::now(); - let mut starting_ids = self - .services - .short - .multi_get_or_create_shorteventid(starting_events.clone()) - .zip(starting_events.clone().stream()) - .boxed(); - - let mut buckets = [BUCKET; NUM_BUCKETS]; - while let Some((short, starting_event)) = starting_ids.next().await { - let bucket: usize = short.try_into()?; - let bucket: usize = validated!(bucket % NUM_BUCKETS); - buckets[bucket].insert((short, starting_event)); - } - - debug!( - starting_events = ?starting_events.count(), - elapsed = ?started.elapsed(), - "start", - ); - - let full_auth_chain: Vec = buckets - .into_iter() - .try_stream() - .broad_and_then(|chunk| self.get_auth_chain_outer(room_id, started, chunk)) - .try_collect() - .map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect()) - .map_ok(|mut full_auth_chain: Vec<_>| { - full_auth_chain.sort_unstable(); - full_auth_chain.dedup(); - full_auth_chain - }) - .boxed() - .await?; - - debug!( - chain_length = ?full_auth_chain.len(), - elapsed = ?started.elapsed(), - "done", - ); - - Ok(full_auth_chain) -} - -#[implement(Service)] -async fn get_auth_chain_outer( - &self, - room_id: &RoomId, - started: Instant, - chunk: Bucket<'_>, -) -> Result> { - let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - - if chunk_key.is_empty() { - return Ok(Vec::new()); - } - - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - return Ok(cached.to_vec()); - } - - let chunk_cache: Vec<_> = chunk - .into_iter() - .try_stream() - .broad_and_then(|(shortid, event_id)| async move { - if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { - return Ok(cached.to_vec()); + let mut hits = 0; + let mut misses = 0; + let mut full_auth_chain = Vec::new(); + for chunk in buckets { + if chunk.is_empty() { + continue; } - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); - debug!( - ?event_id, - elapsed = ?started.elapsed(), - "Cache missed event" - ); + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&chunk_key)? + { + full_auth_chain.extend(cached.iter().copied()); + hits += 1; + continue; + } + + let mut hits2 = 0; + let mut misses2 = 0; + let mut chunk_cache = Vec::new(); + for (sevent_id, event_id) in chunk { + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&[sevent_id])? + { + chunk_cache.extend(cached.iter().copied()); + hits2 += 1; + } else { + let auth_chain = self.get_auth_chain_inner(room_id, event_id)?; + services() + .rooms + .auth_chain + .cache_auth_chain(vec![sevent_id], &auth_chain)?; + chunk_cache.extend(auth_chain.iter()); + misses2 += 1; + debug!( + event_id = ?event_id, + chain_length = ?auth_chain.len(), + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed event" + ); + }; + } - Ok(auth_chain) - }) - .try_collect() - .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) - .map_ok(|mut chunk_cache: Vec<_>| { chunk_cache.sort_unstable(); chunk_cache.dedup(); - chunk_cache - }) - .await?; - - self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); - debug!( - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed chunk", - ); - - Ok(chunk_cache) -} - -#[implement(Service)] -#[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] -async fn get_auth_chain_inner( - &self, - room_id: &RoomId, - event_id: &EventId, -) -> Result> { - let mut todo: VecDeque<_> = [event_id.to_owned()].into(); - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop_front() { - trace!(?event_id, "processing auth event"); - - match self.services.timeline.get_pdu(&event_id).await { - | Err(e) => { - debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events"); - }, - | Ok(pdu) => { - if pdu.room_id != room_id { - return Err!(Request(Forbidden(error!( - ?event_id, - ?room_id, - wrong_room_id = ?pdu.room_id, - "auth event for incorrect room" - )))); - } - - for auth_event in &pdu.auth_events { - let sauthevent = self - .services - .short - .get_or_create_shorteventid(auth_event) - .await; - - if found.insert(sauthevent) { - trace!(?event_id, ?auth_event, "adding auth event to processing queue"); - - todo.push_back(auth_event.clone()); - } - } - }, + services() + .rooms + .auth_chain + .cache_auth_chain_vec(chunk_key, &chunk_cache)?; + full_auth_chain.extend(chunk_cache.iter()); + misses += 1; + debug!( + chunk_cache_length = ?chunk_cache.len(), + hits = ?hits2, + misses = ?misses2, + elapsed = ?started.elapsed(), + "Chunk missed", + ); } + + full_auth_chain.sort(); + full_auth_chain.dedup(); + debug!( + chain_length = ?full_auth_chain.len(), + hits = ?hits, + misses = ?misses, + elapsed = ?started.elapsed(), + "done", + ); + + Ok(full_auth_chain) } - Ok(found.into_iter().collect()) + #[tracing::instrument(skip(self, event_id))] + fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + let mut todo = vec![Arc::from(event_id)]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match services().rooms.timeline.get_pdu(&event_id) { + Ok(Some(pdu)) => { + if pdu.room_id != room_id { + return Err(Error::BadRequest(ErrorKind::forbidden(), "Evil event in db")); + } + for auth_event in &pdu.auth_events { + let sauthevent = services() + .rooms + .short + .get_or_create_shorteventid(auth_event)?; + + if found.insert(sauthevent) { + todo.push(auth_event.clone()); + } + } + }, + Ok(None) => { + warn!(?event_id, "Could not find pdu mentioned in auth events"); + }, + Err(error) => { + error!(?event_id, ?error, "Could not load event in auth chain"); + }, + } + } + + Ok(found) + } + + pub fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>> { + self.db.get_cached_eventid_authchain(key) + } + + #[tracing::instrument(skip(self))] + pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) -> Result<()> { + self.db + .cache_auth_chain(key, auth_chain.iter().copied().collect::>()) + } + + #[tracing::instrument(skip(self))] + pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &Vec) -> Result<()> { + self.db + .cache_auth_chain(key, auth_chain.iter().copied().collect::>()) + } } - -#[implement(Service)] -#[inline] -pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result> { - self.db.get_cached_eventid_authchain(key).await -} - -#[implement(Service)] -#[tracing::instrument(skip_all, level = "debug")] -pub fn cache_auth_chain(&self, key: Vec, auth_chain: &HashSet) { - let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); - - self.db.cache_auth_chain(key, val); -} - -#[implement(Service)] -#[tracing::instrument(skip_all, level = "debug")] -pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &[ShortEventId]) { - let val: Arc<[ShortEventId]> = auth_chain.iter().copied().collect(); - - self.db.cache_auth_chain(key, val); -} - -#[implement(Service)] -pub fn get_cache_usage(&self) -> (usize, usize) { - let cache = self.db.auth_chain_cache.lock().expect("locked"); - - (cache.len(), cache.capacity()) -} - -#[implement(Service)] -pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().expect("locked").clear(); } diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs new file mode 100644 index 00000000..691b8604 --- /dev/null +++ b/src/service/rooms/directory/data.rs @@ -0,0 +1,17 @@ +use ruma::{OwnedRoomId, RoomId}; + +use crate::Result; + +pub trait Data: Send + Sync { + /// Adds the room to the public room directory + fn set_public(&self, room_id: &RoomId) -> Result<()>; + + /// Removes the room from the public room directory. + fn set_not_public(&self, room_id: &RoomId) -> Result<()>; + + /// Returns true if the room is in the public room directory. + fn is_public_room(&self, room_id: &RoomId) -> Result; + + /// Returns the unsorted public room directory + fn public_rooms<'a>(&'a self) -> Box> + 'a>; +} diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 4ea10641..0efc365c 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,51 +1,24 @@ -use std::sync::Arc; +mod data; -use conduwuit::{Result, implement, utils::stream::TryIgnore}; -use database::Map; -use futures::Stream; -use ruma::{RoomId, api::client::room::Visibility}; +pub use data::Data; +use ruma::{OwnedRoomId, RoomId}; + +use crate::Result; pub struct Service { - db: Data, + pub db: &'static dyn Data, } -struct Data { - publicroomids: Arc, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - publicroomids: args.db["publicroomids"].clone(), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -pub fn set_public(&self, room_id: &RoomId) { self.db.publicroomids.insert(room_id, []); } - -#[implement(Service)] -pub fn set_not_public(&self, room_id: &RoomId) { self.db.publicroomids.remove(room_id); } - -#[implement(Service)] -pub fn public_rooms(&self) -> impl Stream + Send { - self.db.publicroomids.keys().ignore_err() -} - -#[implement(Service)] -pub async fn is_public_room(&self, room_id: &RoomId) -> bool { - self.visibility(room_id).await == Visibility::Public -} - -#[implement(Service)] -pub async fn visibility(&self, room_id: &RoomId) -> Visibility { - if self.db.publicroomids.get(room_id).await.is_ok() { - Visibility::Public - } else { - Visibility::Private - } +impl Service { + #[tracing::instrument(skip(self))] + pub fn set_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_public(room_id) } + + #[tracing::instrument(skip(self))] + pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_not_public(room_id) } + + #[tracing::instrument(skip(self))] + pub fn is_public_room(&self, room_id: &RoomId) -> Result { self.db.is_public_room(room_id) } + + #[tracing::instrument(skip(self))] + pub fn public_rooms(&self) -> impl Iterator> + '_ { self.db.public_rooms() } } diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs deleted file mode 100644 index f847015b..00000000 --- a/src/service/rooms/event_handler/acl_check.rs +++ /dev/null @@ -1,42 +0,0 @@ -use conduwuit::{Err, Result, debug, implement, trace, warn}; -use ruma::{ - RoomId, ServerName, - events::{StateEventType, room::server_acl::RoomServerAclEventContent}, -}; - -/// Returns Ok if the acl allows the server -#[implement(super::Service)] -#[tracing::instrument(skip_all, level = "debug")] -pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result { - let Ok(acl_event_content) = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") - .await - .map(|c: RoomServerAclEventContent| c) - .inspect(|acl| trace!(%room_id, "ACL content found: {acl:?}")) - .inspect_err(|e| trace!(%room_id, "No ACL content found: {e:?}")) - else { - return Ok(()); - }; - - if acl_event_content.allow.is_empty() { - warn!(%room_id, "Ignoring broken ACL event (allow key is empty)"); - return Ok(()); - } - - if acl_event_content.deny.contains(&String::from("*")) - && acl_event_content.allow.contains(&String::from("*")) - { - warn!(%room_id, "Ignoring broken ACL event (allow key and deny key both contain wildcard \"*\""); - return Ok(()); - } - - if acl_event_content.is_allowed(server_name) { - trace!("server {server_name} is allowed by ACL"); - Ok(()) - } else { - debug!("Server {server_name} was denied by room ACL in {room_id}"); - Err!(Request(Forbidden("Server was denied by room ACL"))) - } -} diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs deleted file mode 100644 index b0a7d827..00000000 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ /dev/null @@ -1,221 +0,0 @@ -use std::{ - collections::{BTreeMap, HashSet, VecDeque, hash_map}, - time::Instant, -}; - -use conduwuit::{ - PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, - utils::continue_exponential_backoff_secs, warn, -}; -use ruma::{ - CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, -}; - -use super::get_room_version_id; - -/// Find the event and auth it. Once the event is validated (steps 1 - 8) -/// it is appended to the outliers Tree. -/// -/// Returns pdu and if we fetched it over federation the raw json. -/// -/// a. Look in the main timeline (pduid_pdu tree) -/// b. Look at outlier pdu tree -/// c. Ask origin server over federation -/// d. TODO: Ask other servers over federation? -#[implement(super::Service)] -pub(super) async fn fetch_and_handle_outliers<'a>( - &self, - origin: &'a ServerName, - events: &'a [OwnedEventId], - create_event: &'a PduEvent, - room_id: &'a RoomId, -) -> Vec<(PduEvent, Option>)> { - let back_off = |id| match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(id) - { - | hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - }, - | hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)); - }, - }; - - let mut events_with_auth_events = Vec::with_capacity(events.len()); - for id in events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { - trace!("Found {id} in db"); - events_with_auth_events.push((id, Some(local_pdu), vec![])); - continue; - } - - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events: VecDeque<_> = [id.clone()].into(); - let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); - let mut events_all = HashSet::with_capacity(todo_auth_events.len()); - while let Some(next_id) = todo_auth_events.pop_front() { - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(&*next_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 60 * 2; - const MAX_DURATION: u64 = 60 * 60 * 8; - if continue_exponential_backoff_secs( - MIN_DURATION, - MAX_DURATION, - time.elapsed(), - *tries, - ) { - debug_warn!( - tried = ?*tries, - elapsed = ?time.elapsed(), - "Backing off from {next_id}", - ); - continue; - } - } - - if events_all.contains(&next_id) { - continue; - } - - if self.services.timeline.pdu_exists(&next_id).await { - trace!("Found {next_id} in db"); - continue; - } - - debug!("Fetching {next_id} over federation."); - match self - .services - .sending - .send_federation_request(origin, get_event::v1::Request { - event_id: (*next_id).to_owned(), - include_unredacted_content: None, - }) - .await - { - | Ok(res) => { - debug!("Got {next_id} over federation"); - let Ok(room_version_id) = get_room_version_id(create_event) else { - back_off((*next_id).to_owned()); - continue; - }; - - let Ok((calculated_event_id, value)) = - pdu::gen_event_id_canonical_json(&res.pdu, &room_version_id) - else { - back_off((*next_id).to_owned()); - continue; - }; - - if calculated_event_id != *next_id { - warn!( - "Server didn't return event id we requested: requested: {next_id}, \ - we got {calculated_event_id}. Event: {:?}", - &res.pdu - ); - } - - if let Some(auth_events) = value - .get("auth_events") - .and_then(CanonicalJsonValue::as_array) - { - for auth_event in auth_events { - match serde_json::from_value::( - auth_event.clone().into(), - ) { - | Ok(auth_event) => { - todo_auth_events.push_back(auth_event); - }, - | _ => { - warn!("Auth event id is not valid"); - }, - } - } - } else { - warn!("Auth event list invalid"); - } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - }, - | Err(e) => { - debug_error!("Failed to fetch event {next_id}: {e}"); - back_off((*next_id).to_owned()); - }, - } - } - events_with_auth_events.push((id, None, events_in_reverse_order)); - } - - let mut pdus = Vec::with_capacity(events_with_auth_events.len()); - for (id, local_pdu, events_in_reverse_order) in events_with_auth_events { - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Some(local_pdu) = local_pdu { - trace!("Found {id} in db"); - pdus.push((local_pdu.clone(), None)); - } - - for (next_id, value) in events_in_reverse_order.into_iter().rev() { - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(&*next_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs( - MIN_DURATION, - MAX_DURATION, - time.elapsed(), - *tries, - ) { - debug!("Backing off from {next_id}"); - continue; - } - } - - match Box::pin(self.handle_outlier_pdu( - origin, - create_event, - &next_id, - room_id, - value.clone(), - true, - )) - .await - { - | Ok((pdu, json)) => - if next_id == *id { - pdus.push((pdu, Some(json))); - }, - | Err(e) => { - warn!("Authentication of event {next_id} failed: {e:?}"); - back_off(next_id); - }, - } - } - } - pdus -} diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs deleted file mode 100644 index 0f92d6e6..00000000 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ /dev/null @@ -1,115 +0,0 @@ -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; - -use conduwuit::{ - PduEvent, Result, debug_warn, err, implement, - state_res::{self}, -}; -use futures::{FutureExt, future}; -use ruma::{ - CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, int, - uint, -}; - -use super::check_room_id; - -#[implement(super::Service)] -#[tracing::instrument( - level = "debug", - skip_all, - fields(%origin), -)] -#[allow(clippy::type_complexity)] -pub(super) async fn fetch_prev( - &self, - origin: &ServerName, - create_event: &PduEvent, - room_id: &RoomId, - first_ts_in_room: UInt, - initial_set: Vec, -) -> Result<( - Vec, - HashMap)>, -)> { - let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: VecDeque = initial_set.into(); - - let mut amount = 0; - - while let Some(prev_event_id) = todo_outlier_stack.pop_front() { - self.services.server.check_running()?; - - match self - .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) - .boxed() - .await - .pop() - { - | Some((pdu, mut json_opt)) => { - check_room_id(room_id, &pdu)?; - - let limit = self.services.server.config.max_fetch_prev_events; - if amount > limit { - debug_warn!("Max prev event limit reached! Limit: {limit}"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if json_opt.is_none() { - json_opt = self - .services - .outlier - .get_outlier_pdu_json(&prev_event_id) - .await - .ok(); - } - - if let Some(json) = json_opt { - if pdu.origin_server_ts > first_ts_in_room { - amount = amount.saturating_add(1); - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push_back(prev_prev.clone()); - } - } - - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation - graph.insert(prev_event_id.clone(), HashSet::new()); - } - }, - | _ => { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); - }, - } - } - - let event_fetch = |event_id| { - let origin_server_ts = eventid_info - .get(&event_id) - .cloned() - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts); - - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - future::ok((int!(0), MilliSecondsSinceUnixEpoch(origin_server_ts))) - }; - - let sorted = state_res::lexicographical_topological_sort(&graph, &event_fetch) - .await - .map_err(|e| err!(Database(error!("Error sorting prev events: {e}"))))?; - - Ok((sorted, eventid_info)) -} diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs deleted file mode 100644 index 0f9e093b..00000000 --- a/src/service/rooms/event_handler/fetch_state.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::collections::{HashMap, hash_map}; - -use conduwuit::{Err, Error, PduEvent, Result, debug, debug_warn, implement}; -use futures::FutureExt; -use ruma::{ - EventId, OwnedEventId, RoomId, ServerName, api::federation::event::get_room_state_ids, - events::StateEventType, -}; - -use crate::rooms::short::ShortStateKey; - -/// Call /state_ids to find out what the state at this pdu is. We trust the -/// server's response to some extend (sic), but we still do a lot of checks -/// on the events -#[implement(super::Service)] -#[tracing::instrument( - level = "debug", - skip_all, - fields(%origin), -)] -pub(super) async fn fetch_state( - &self, - origin: &ServerName, - create_event: &PduEvent, - room_id: &RoomId, - event_id: &EventId, -) -> Result>> { - let res = self - .services - .sending - .send_federation_request(origin, get_room_state_ids::v1::Request { - room_id: room_id.to_owned(), - event_id: event_id.to_owned(), - }) - .await - .inspect_err(|e| debug_warn!("Fetching state for event failed: {e}"))?; - - debug!("Fetching state events"); - let state_vec = self - .fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id) - .boxed() - .await; - - let mut state: HashMap = HashMap::with_capacity(state_vec.len()); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; - - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) - .await; - - match state.entry(shortstatekey) { - | hash_map::Entry::Vacant(v) => { - v.insert(pdu.event_id.clone()); - }, - | hash_map::Entry::Occupied(_) => { - return Err!(Database( - "State event's type and state_key combination exists multiple times.", - )); - }, - } - } - - // The original create event must still be in the state - let create_shortstatekey = self - .services - .short - .get_shortstatekey(&StateEventType::RoomCreate, "") - .await?; - - if state.get(&create_shortstatekey) != Some(&create_event.event_id) { - return Err!(Database("Incoming event refers to wrong create event.")); - } - - Ok(Some(state)) -} diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs deleted file mode 100644 index 77cae41d..00000000 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ /dev/null @@ -1,202 +0,0 @@ -use std::{ - collections::{BTreeMap, hash_map}, - time::Instant, -}; - -use conduwuit::{ - Err, Result, debug, debug::INFO_SPAN_LEVEL, defer, err, implement, utils::stream::IterStream, - warn, -}; -use futures::{ - FutureExt, TryFutureExt, TryStreamExt, - future::{OptionFuture, try_join5}, -}; -use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; - -use crate::rooms::timeline::RawPduId; - -/// When receiving an event one needs to: -/// 0. Check the server is in the room -/// 1. Skip the PDU if we already know about it -/// 1.1. Remove unsigned field -/// 2. Check signatures, otherwise drop -/// 3. Check content hash, redact if doesn't match -/// 4. Fetch any missing auth events doing all checks listed here starting at 1. -/// These are not timeline events -/// 5. Reject "due to auth events" if can't get all the auth events or some of -/// the auth events are also rejected "due to auth events" -/// 6. Reject "due to auth events" if the event doesn't pass auth based on the -/// auth events -/// 7. Persist this event as an outlier -/// 8. If not timeline event: stop -/// 9. Fetch any missing prev events doing all checks listed here starting at 1. -/// These are timeline events -/// 10. Fetch missing state and auth chain events by calling `/state_ids` at -/// backwards extremities doing all the checks in this list starting at -/// 1. These are not timeline events -/// 11. Check the auth of the event passes based on the state of the event -/// 12. Ensure that the state is derived from the previous current state (i.e. -/// we calculated by doing state res where one of the inputs was a -/// previously trusted set of state, don't just trust a set of state we got -/// from a remote) -/// 13. Use state resolution to find new room state -/// 14. Check if the event passes auth based on the "current state" of the room, -/// if not soft fail it -#[implement(super::Service)] -#[tracing::instrument( - name = "pdu", - level = INFO_SPAN_LEVEL, - skip_all, - fields(%room_id, %event_id), -)] -pub async fn handle_incoming_pdu<'a>( - &self, - origin: &'a ServerName, - room_id: &'a RoomId, - event_id: &'a EventId, - value: BTreeMap, - is_timeline_event: bool, -) -> Result> { - // 1. Skip the PDU if we already have it as a timeline event - if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await { - return Ok(Some(pdu_id)); - } - - // 1.1 Check the server is in the room - let meta_exists = self.services.metadata.exists(room_id).map(Ok); - - // 1.2 Check if the room is disabled - let is_disabled = self.services.metadata.is_disabled(room_id).map(Ok); - - // 1.3.1 Check room ACL on origin field/server - let origin_acl_check = self.acl_check(origin, room_id); - - // 1.3.2 Check room ACL on sender's server name - let sender: &UserId = value - .get("sender") - .try_into() - .map_err(|e| err!(Request(InvalidParam("PDU does not have a valid sender key: {e}"))))?; - - let sender_acl_check: OptionFuture<_> = sender - .server_name() - .ne(origin) - .then(|| self.acl_check(sender.server_name(), room_id)) - .into(); - - // Fetch create event - let create_event = - self.services - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, ""); - - let (meta_exists, is_disabled, (), (), ref create_event) = try_join5( - meta_exists, - is_disabled, - origin_acl_check, - sender_acl_check.map(|o| o.unwrap_or(Ok(()))), - create_event, - ) - .await?; - - if !meta_exists { - return Err!(Request(NotFound("Room is unknown to this server"))); - } - - if is_disabled { - return Err!(Request(Forbidden("Federation of this room is disabled by this server."))); - } - - let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, create_event, event_id, room_id, value, false) - .await?; - - // 8. if not timeline event: stop - if !is_timeline_event { - return Ok(None); - } - - // Skip old events - let first_ts_in_room = self - .services - .timeline - .first_pdu_in_room(room_id) - .await? - .origin_server_ts; - - if incoming_pdu.origin_server_ts < first_ts_in_room { - return Ok(None); - } - - // 9. Fetch any missing prev events doing all checks listed here starting at 1. - // These are timeline events - let (sorted_prev_events, mut eventid_info) = self - .fetch_prev( - origin, - create_event, - room_id, - first_ts_in_room, - incoming_pdu.prev_events.clone(), - ) - .await?; - - debug!( - events = ?sorted_prev_events, - "Handling previous events" - ); - - sorted_prev_events - .iter() - .try_stream() - .map_ok(AsRef::as_ref) - .try_for_each(|prev_id| { - self.handle_prev_pdu( - origin, - event_id, - room_id, - eventid_info.remove(prev_id), - create_event, - first_ts_in_room, - prev_id, - ) - .inspect_err(move |e| { - warn!("Prev {prev_id} failed: {e}"); - match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(prev_id.into()) - { - | hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - }, - | hash_map::Entry::Occupied(mut e) => { - let tries = e.get().1.saturating_add(1); - *e.get_mut() = (Instant::now(), tries); - }, - } - }) - .map(|_| self.services.server.check_running()) - }) - .boxed() - .await?; - - // Done with prev events, now handling the incoming event - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.into(), (event_id.to_owned(), start_time)); - - defer! {{ - self.federation_handletime - .write() - .expect("locked") - .remove(room_id); - }}; - - self.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, create_event, origin, room_id) - .boxed() - .await -} diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs deleted file mode 100644 index 5339249d..00000000 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ /dev/null @@ -1,157 +0,0 @@ -use std::collections::{BTreeMap, HashMap, hash_map}; - -use conduwuit::{ - Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, -}; -use futures::future::ready; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, - api::client::error::ErrorKind, events::StateEventType, -}; - -use super::{check_room_id, get_room_version_id, to_room_version}; - -#[implement(super::Service)] -#[allow(clippy::too_many_arguments)] -pub(super) async fn handle_outlier_pdu<'a>( - &self, - origin: &'a ServerName, - create_event: &'a PduEvent, - event_id: &'a EventId, - room_id: &'a RoomId, - mut value: CanonicalJsonObject, - auth_events_known: bool, -) -> Result<(PduEvent, BTreeMap)> { - // 1. Remove unsigned field - value.remove("unsigned"); - - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match - let room_version_id = get_room_version_id(create_event)?; - let mut val = match self - .services - .server_keys - .verify_event(&value, Some(&room_version_id)) - .await - { - | Ok(ruma::signatures::Verified::All) => value, - | Ok(ruma::signatures::Verified::Signatures) => { - // Redact - debug_info!("Calculated hash does not match (redaction): {event_id}"); - let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else { - return Err!(Request(InvalidParam("Redaction failed"))); - }; - - // Skip the PDU if it is redacted and we already have it as an outlier event - if self.services.timeline.pdu_exists(event_id).await { - return Err!(Request(InvalidParam( - "Event was redacted and we already knew about it" - ))); - } - - obj - }, - | Err(e) => { - return Err!(Request(InvalidParam(debug_error!( - "Signature verification failed for {event_id}: {e}" - )))); - }, - }; - - // Now that we have checked the signature and hashes we can add the eventID and - // convert to our PduEvent type - val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|e| err!(Request(BadJson(debug_warn!("Event is not a valid PDU: {e}")))))?; - - check_room_id(room_id, &incoming_pdu)?; - - if !auth_events_known { - // 4. fetch any missing auth events doing all checks listed here starting at 1. - // These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of - // the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often - debug!("Fetching auth events"); - Box::pin(self.fetch_and_handle_outliers( - origin, - &incoming_pdu.auth_events, - create_event, - room_id, - )) - .await; - } - - // 6. Reject "due to auth events" if the event doesn't pass auth based on the - // auth events - debug!("Checking based on auth events"); - // Build map of auth events - let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); - for id in &incoming_pdu.auth_events { - let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { - warn!("Could not find auth event {id}"); - continue; - }; - - check_room_id(room_id, &auth_event)?; - - match auth_events.entry(( - auth_event.kind.to_string().into(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - | hash_map::Entry::Vacant(v) => { - v.insert(auth_event); - }, - | hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times.", - )); - }, - } - } - - // The original create event must be in the auth events - if !matches!( - auth_events.get(&(StateEventType::RoomCreate, String::new().into())), - Some(_) | None - ) { - return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); - } - - let state_fetch = |ty: &StateEventType, sk: &str| { - let key = (ty.to_owned(), sk.into()); - ready(auth_events.get(&key)) - }; - - let auth_check = state_res::event_auth::auth_check( - &to_room_version(&room_version_id), - &incoming_pdu, - None, // TODO: third party invite - state_fetch, - ) - .await - .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - - if !auth_check { - return Err!(Request(Forbidden("Auth check failed"))); - } - - trace!("Validation successful."); - - // 7. Persist the event as an outlier. - self.services - .outlier - .add_pdu_outlier(&incoming_pdu.event_id, &val); - - trace!("Added pdu as outlier."); - - Ok((incoming_pdu, val)) -} diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs deleted file mode 100644 index d612b2bf..00000000 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::{collections::BTreeMap, time::Instant}; - -use conduwuit::{ - Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, defer, implement, - utils::continue_exponential_backoff_secs, -}; -use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UInt}; - -#[implement(super::Service)] -#[allow(clippy::type_complexity)] -#[allow(clippy::too_many_arguments)] -#[tracing::instrument( - name = "prev", - level = INFO_SPAN_LEVEL, - skip_all, - fields(%prev_id), -)] -pub(super) async fn handle_prev_pdu<'a>( - &self, - origin: &'a ServerName, - event_id: &'a EventId, - room_id: &'a RoomId, - eventid_info: Option<(PduEvent, BTreeMap)>, - create_event: &'a PduEvent, - first_ts_in_room: UInt, - prev_id: &'a EventId, -) -> Result { - // Check for disabled again because it might have changed - if self.services.metadata.is_disabled(room_id).await { - return Err!(Request(Forbidden(debug_warn!( - "Federaton of room {room_id} is currently disabled on this server. Request by \ - origin {origin} and event ID {event_id}" - )))); - } - - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .expect("locked") - .get(prev_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; - if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { - debug!( - ?tries, - duration = ?time.elapsed(), - "Backing off from prev_event" - ); - return Ok(()); - } - } - - let Some((pdu, json)) = eventid_info else { - return Ok(()); - }; - - // Skip old events - if pdu.origin_server_ts < first_ts_in_room { - return Ok(()); - } - - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.into(), ((*prev_id).to_owned(), start_time)); - - defer! {{ - self.federation_handletime - .write() - .expect("locked") - .remove(room_id); - }}; - - self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) - .await?; - - debug!( - elapsed = ?start_time.elapsed(), - "Handled prev_event", - ); - - Ok(()) -} diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 45675da8..56b9260e 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,130 +1,1339 @@ -mod acl_check; -mod fetch_and_handle_outliers; -mod fetch_prev; -mod fetch_state; -mod handle_incoming_pdu; -mod handle_outlier_pdu; -mod handle_prev_pdu; -mod parse_incoming_pdu; -mod resolve_state; -mod state_at_incoming; -mod upgrade_outlier_pdu; - use std::{ - collections::HashMap, - fmt::Write, - sync::{Arc, RwLock as StdRwLock}, - time::Instant, + cmp, + collections::{hash_map, HashSet}, + pin::Pin, + time::{Duration, Instant}, }; -use async_trait::async_trait; -use conduwuit::{Err, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; +use futures_util::Future; use ruma::{ - OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, - events::room::create::RoomCreateEventContent, + api::{ + client::error::ErrorKind, + federation::event::{get_event, get_room_state_ids}, + }, + events::{ + room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, + StateEventType, + }, + int, + serde::Base64, + state_res::{self, RoomVersion, StateMap}, + uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, +}; +use tokio::sync::RwLock; +use tracing::{debug, error, info, trace, warn}; + +use super::state_compressor::CompressedStateEvent; +use crate::{ + service::{pdu, Arc, BTreeMap, HashMap, Result}, + services, Error, PduEvent, }; -use crate::{Dep, globals, rooms, sending, server_keys}; +pub mod signing_keys; +pub struct Service; -pub struct Service { - pub mutex_federation: RoomMutexMap, - pub federation_handletime: StdRwLock, - services: Services, -} +// We use some AsyncRecursiveType hacks here so we can call async funtion +// recursively. +type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; +type AsyncRecursiveCanonicalJsonVec<'a> = + AsyncRecursiveType<'a, Vec<(Arc, Option>)>>; +type AsyncRecursiveCanonicalJsonResult<'a> = + AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>>; -struct Services { - globals: Dep, - sending: Dep, - auth_chain: Dep, - metadata: Dep, - outlier: Dep, - pdu_metadata: Dep, - server_keys: Dep, - short: Dep, - state: Dep, - state_accessor: Dep, - state_compressor: Dep, - timeline: Dep, - server: Arc, -} +impl Service { + /// When receiving an event one needs to: + /// 0. Check the server is in the room + /// 1. Skip the PDU if we already know about it + /// 1.1. Remove unsigned field + /// 2. Check signatures, otherwise drop + /// 3. Check content hash, redact if doesn't match + /// 4. Fetch any missing auth events doing all checks listed here starting + /// at 1. These are not timeline events + /// 5. Reject "due to auth events" if can't get all the auth events or some + /// of the auth events are also rejected "due to auth events" + /// 6. Reject "due to auth events" if the event doesn't pass auth based on + /// the auth events + /// 7. Persist this event as an outlier + /// 8. If not timeline event: stop + /// 9. Fetch any missing prev events doing all checks listed here starting + /// at 1. These are timeline events + /// 10. Fetch missing state and auth chain events by calling `/state_ids` at + /// backwards extremities doing all the checks in this list starting at + /// 1. These are not timeline events + /// 11. Check the auth of the event passes based on the state of the event + /// 12. Ensure that the state is derived from the previous current state + /// (i.e. we calculated by doing state res where one of the inputs was a + /// previously trusted set of state, don't just trust a set of state we + /// got from a remote) + /// 13. Use state resolution to find new room state + /// 14. Check if the event passes auth based on the "current state" of the + /// room, if not soft fail it + #[tracing::instrument(skip(self, origin, value, is_timeline_event, pub_key_map), name = "pdu")] + pub(crate) async fn handle_incoming_pdu<'a>( + &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, + value: BTreeMap, is_timeline_event: bool, + pub_key_map: &'a RwLock>>, + ) -> Result>> { + // 1. Skip the PDU if we already have it as a timeline event + if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { + return Ok(Some(pdu_id)); + } -type RoomMutexMap = MutexMap; -type HandleTimeMap = HashMap; + // 1.1 Check the server is in the room + if !services().rooms.metadata.exists(room_id)? { + return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); + } -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - mutex_federation: RoomMutexMap::new(), - federation_handletime: HandleTimeMap::new().into(), - services: Services { - globals: args.depend::("globals"), - sending: args.depend::("sending"), - auth_chain: args.depend::("rooms::auth_chain"), - metadata: args.depend::("rooms::metadata"), - outlier: args.depend::("rooms::outlier"), - server_keys: args.depend::("server_keys"), - pdu_metadata: args.depend::("rooms::pdu_metadata"), - short: args.depend::("rooms::short"), - state: args.depend::("rooms::state"), - state_accessor: args - .depend::("rooms::state_accessor"), - state_compressor: args - .depend::("rooms::state_compressor"), - timeline: args.depend::("rooms::timeline"), - server: args.server.clone(), - }, - })) + // 1.2 Check if the room is disabled + if services().rooms.metadata.is_disabled(room_id)? { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Federation of this room is currently disabled on this server.", + )); + } + + // 1.3 Check room ACL + services().rooms.event_handler.acl_check(origin, room_id)?; + + // Fetch create event + let create_event = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; + + // Procure the room version + let room_version_id = self.get_room_version_id(&create_event)?; + + let first_pdu_in_room = services() + .rooms + .timeline + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + + let (incoming_pdu, val) = self + .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false, pub_key_map) + .await?; + + self.check_room_id(room_id, &incoming_pdu)?; + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(None); + } + // Skip old events + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(None); + } + + // 9. Fetch any missing prev events doing all checks listed here starting at 1. + // These are timeline events + let (sorted_prev_events, mut eventid_info) = self + .fetch_prev( + origin, + &create_event, + room_id, + &room_version_id, + pub_key_map, + incoming_pdu.prev_events.clone(), + ) + .await?; + + debug!(events = ?sorted_prev_events, "Got previous events"); + for prev_id in sorted_prev_events { + match self + .handle_prev_pdu( + origin, + event_id, + room_id, + pub_key_map, + &mut eventid_info, + &create_event, + &first_pdu_in_room, + &prev_id, + ) + .await + { + Ok(()) => continue, + Err(e) => { + warn!("Prev event {} failed: {}", prev_id, e); + match services() + .globals + .bad_event_ratelimiter + .write() + .await + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1); + }, + }; + }, + } + } + + // Done with prev events, now handling the incoming event + let start_time = Instant::now(); + services() + .globals + .roomid_federationhandletime + .write() + .await + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + + let r = services() + .rooms + .event_handler + .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id, pub_key_map) + .await; + + services() + .globals + .roomid_federationhandletime + .write() + .await + .remove(&room_id.to_owned()); + + r } - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - let mutex_federation = self.mutex_federation.len(); - writeln!(out, "federation_mutex: {mutex_federation}")?; + #[allow(clippy::type_complexity)] + #[allow(clippy::too_many_arguments)] + #[tracing::instrument( + skip(self, origin, event_id, room_id, pub_key_map, eventid_info, create_event, first_pdu_in_room), + name = "prev" + )] + pub(crate) async fn handle_prev_pdu<'a>( + &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, + pub_key_map: &'a RwLock>>, + eventid_info: &mut HashMap, (Arc, BTreeMap)>, + create_event: &Arc, first_pdu_in_room: &Arc, prev_id: &EventId, + ) -> Result<()> { + // Check for disabled again because it might have changed + if services().rooms.metadata.is_disabled(room_id)? { + debug!( + "Federaton of room {room_id} is currently disabled on this server. Request by origin {origin} and \ + event ID {event_id}" + ); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Federation of this room is currently disabled on this server.", + )); + } - let federation_handletime = self - .federation_handletime + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter .read() - .expect("locked for reading") - .len(); - writeln!(out, "federation_handletime: {federation_handletime}")?; + .await + .get(prev_id) + { + // Exponential backoff + const MAX_DURATION: Duration = Duration::from_secs(60 * 60 * 24); + let min_duration = cmp::min(MAX_DURATION, Duration::from_secs(5 * 60) * (*tries) * (*tries)); + let duration = time.elapsed(); + if duration < min_duration { + debug!( + duration = ?duration, + min_duration = ?min_duration, + "Backing off from prev_event" + ); + return Ok(()); + } + } + + if let Some((pdu, json)) = eventid_info.remove(prev_id) { + // Skip old events + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(()); + } + + let start_time = Instant::now(); + services() + .globals + .roomid_federationhandletime + .write() + .await + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + + self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id, pub_key_map) + .await?; + + services() + .globals + .roomid_federationhandletime + .write() + .await + .remove(&room_id.to_owned()); + + debug!( + elapsed = ?start_time.elapsed(), + "Handled prev_event", + ); + } Ok(()) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} + #[allow(clippy::too_many_arguments)] + fn handle_outlier_pdu<'a>( + &'a self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, + mut value: BTreeMap, auth_events_known: bool, + pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveCanonicalJsonResult<'a> { + Box::pin(async move { + // 1. Remove unsigned field + value.remove("unsigned"); -impl Service { - async fn event_exists(&self, event_id: OwnedEventId) -> bool { - self.services.timeline.pdu_exists(&event_id).await + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + let room_version_id = self.get_room_version_id(create_event)?; + + let guard = pub_key_map.read().await; + let mut val = match ruma::signatures::verify_event(&guard, &value, &room_version_id) { + Err(e) => { + // Drop + warn!("Dropping bad event {}: {}", event_id, e,); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Signature verification failed")); + }, + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + warn!("Calculated hash does not match: {}", event_id); + let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Redaction failed")); + }; + + // Skip the PDU if it is redacted and we already have it as an outlier event + if services().rooms.timeline.get_pdu_json(event_id)?.is_some() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event was redacted and we already knew about it", + )); + } + + obj + }, + Ok(ruma::signatures::Verified::All) => value, + }; + + drop(guard); + + // Now that we have checked the signature and hashes we can add the eventID and + // convert to our PduEvent type + val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; + + self.check_room_id(room_id, &incoming_pdu)?; + + if !auth_events_known { + // 4. fetch any missing auth events doing all checks listed here starting at 1. + // These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of + // the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + debug!("Fetching auth events"); + self.fetch_and_handle_outliers( + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + &room_version_id, + pub_key_map, + ) + .await; + } + + // 6. Reject "due to auth events" if the event doesn't pass auth based on the + // auth events + debug!("Checking based on auth events"); + // Build map of auth events + let mut auth_events = HashMap::new(); + for id in &incoming_pdu.auth_events { + let Some(auth_event) = services().rooms.timeline.get_pdu(id)? else { + warn!("Could not find auth event {}", id); + continue; + }; + + self.check_room_id(room_id, &auth_event)?; + + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + }, + hash_map::Entry::Occupied(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times.", + )); + }, + } + } + + // The original create event must be in the auth events + if !matches!( + auth_events + .get(&(StateEventType::RoomCreate, String::new())) + .map(AsRef::as_ref), + Some(_) | None + ) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Incoming event refers to wrong create event.", + )); + } + + if !state_res::event_auth::auth_check( + &self.to_room_version(&room_version_id), + &incoming_pdu, + None::, // TODO: third party invite + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? + { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); + } + + trace!("Validation successful."); + + // 7. Persist the event as an outlier. + services() + .rooms + .outlier + .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + + trace!("Added pdu as outlier."); + + Ok((Arc::new(incoming_pdu), val)) + }) } - async fn event_fetch(&self, event_id: OwnedEventId) -> Option { - self.services.timeline.get_pdu(&event_id).await.ok() - } -} + pub async fn upgrade_outlier_to_timeline_pdu( + &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, + origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock>>, + ) -> Result>> { + // Skip the PDU if we already have it as a timeline event + if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { + return Ok(Some(pduid)); + } -fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result { - if pdu.room_id != room_id { - return Err!(Request(InvalidParam(error!( - pdu_event_id = ?pdu.event_id, - pdu_room_id = ?pdu.room_id, - ?room_id, - "Found event from room in room", - )))); + if services() + .rooms + .pdu_metadata + .is_event_soft_failed(&incoming_pdu.event_id)? + { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + } + + debug!("Upgrading to timeline pdu"); + let timer = tokio::time::Instant::now(); + let room_version_id = self.get_room_version_id(create_event)?; + + // 10. Fetch missing state and auth chain events by calling /state_ids at + // backwards extremities doing all the checks in this list starting at 1. + // These are not timeline events. + + debug!("Resolving state at event"); + let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 { + self.state_at_incoming_degree_one(&incoming_pdu).await? + } else { + self.state_at_incoming_resolved(&incoming_pdu, room_id, &room_version_id) + .await? + }; + + if state_at_incoming_event.is_none() { + state_at_incoming_event = self + .fetch_state( + origin, + create_event, + room_id, + &room_version_id, + pub_key_map, + &incoming_pdu.event_id, + ) + .await?; + } + + let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); + let room_version = self.to_room_version(&room_version_id); + + debug!("Performing auth check"); + // 11. Check the auth of the event passes based on the state of the event + let check_result = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| { + services() + .rooms + .short + .get_shortstatekey(&k.to_string().into(), s) + .ok() + .flatten() + .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) + .and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten()) + }, + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + + if !check_result { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has failed auth check with state at the event.", + )); + } + + debug!("Gathering auth events"); + let auth_events = services().rooms.state.get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + )?; + + // Soft fail check before doing state res + debug!("Performing soft-fail check"); + let soft_fail = !state_res::event_auth::auth_check(&room_version, &incoming_pdu, None::, |k, s| { + auth_events.get(&(k.clone(), s.to_owned())) + }) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + + // 13. Use state resolution to find new room state + + // We start looking at current room state now, so lets lock the room + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .await + .entry(room_id.to_owned()) + .or_default(), + ); + + trace!("Locking the room"); + let state_lock = mutex_state.lock().await; + + // Now we calculate the set of extremities this room has after the incoming + // event has been applied. We start with the previous extremities (aka leaves) + trace!("Calculating extremities"); + let mut extremities = services().rooms.state.get_forward_extremities(room_id)?; + trace!("Calculated {} extremities", extremities.len()); + + // Remove any forward extremities that are referenced by this incoming event's + // prev_events + for prev_event in &incoming_pdu.prev_events { + extremities.remove(prev_event); + } + + // Only keep those extremities were not referenced yet + extremities.retain(|id| { + !matches!( + services() + .rooms + .pdu_metadata + .is_event_referenced(room_id, id), + Ok(true) + ) + }); + debug!("Retained {} extremities. Compressing state", extremities.len()); + let state_ids_compressed = Arc::new( + state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + services() + .rooms + .state_compressor + .compress_state_event(*shortstatekey, id) + }) + .collect::>()?, + ); + + if incoming_pdu.state_key.is_some() { + debug!("Event is a state-event. Deriving new room state"); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?; + + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + } + + let new_room_state = self + .resolve_state(room_id, &room_version_id, state_after) + .await?; + + // Set the new room state to the resolved state + debug!("Forcing new room state"); + let (sstatehash, new, removed) = services() + .rooms + .state_compressor + .save_state(room_id, new_room_state)?; + + services() + .rooms + .state + .force_state(room_id, sstatehash, new, removed, &state_lock) + .await?; + } + + // 14. Check if the event passes auth based on the "current state" of the room, + // if not soft fail it + if soft_fail { + debug!("Soft failing event"); + services() + .rooms + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; + + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); + services() + .rooms + .pdu_metadata + .mark_event_soft_failed(&incoming_pdu.event_id)?; + + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + } + + trace!("Appending pdu to timeline"); + extremities.insert(incoming_pdu.event_id.clone()); + + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + let pdu_id = services() + .rooms + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; + + // Event has passed all auth/stateres checks + drop(state_lock); + debug!( + elapsed = ?timer.elapsed(), + "Appended incoming pdu", + ); + + Ok(pdu_id) } - Ok(()) -} + async fn resolve_state( + &self, room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap>, + ) -> Result>> { + debug!("Loading current room state ids"); + let current_sstatehash = services() + .rooms + .state + .get_room_shortstatehash(room_id)? + .expect("every room has state"); -fn get_room_version_id(create_event: &PduEvent) -> Result { - let content: RoomCreateEventContent = create_event.get_content()?; - let room_version = content.room_version; + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_sstatehash) + .await?; - Ok(room_version) -} + let fork_states = [current_state_ids, incoming_state]; -#[inline] -fn to_room_version(room_version_id: &RoomVersionId) -> RoomVersion { - RoomVersion::new(room_version_id).expect("room version is supported") + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + services() + .rooms + .auth_chain + .event_ids_iter(room_id, state.iter().map(|(_, id)| id.clone()).collect()) + .await? + .collect(), + ); + } + + debug!("Loading fork states"); + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + services() + .rooms + .short + .get_statekey_from_short(k) + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .ok() + }) + .collect::>() + }) + .collect(); + + let lock = services().globals.stateres_mutex.lock(); + + debug!("Resolving state"); + let state_resolve = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = services().rooms.timeline.get_pdu(id); + if let Err(e) = &res { + error!("Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + + let state = match state_resolve { + Ok(new_state) => new_state, + Err(e) => { + error!("State resolution failed: {}", e); + return Err(Error::bad_database( + "State resolution failed, either an event could not be found or deserialization", + )); + }, + }; + + drop(lock); + + debug!("State resolution done. Compressing state"); + let new_room_state = state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; + services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &event_id) + }) + .collect::>()?; + + Ok(Arc::new(new_room_state)) + } + + // TODO: if we know the prev_events of the incoming event we can avoid the + // request and build the state from a known point and resolve if > 1 prev_event + #[tracing::instrument(skip_all, name = "state")] + pub async fn state_at_incoming_degree_one( + &self, incoming_pdu: &Arc, + ) -> Result>>> { + let prev_event = &*incoming_pdu.prev_events[0]; + let prev_event_sstatehash = services() + .rooms + .state_accessor + .pdu_shortstatehash(prev_event)?; + + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some( + services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await, + ) + } else { + None + }; + + if let Some(Ok(mut state)) = state { + debug!("Using cached state"); + let prev_pdu = services() + .rooms + .timeline + .get_pdu(prev_event) + .ok() + .flatten() + .ok_or_else(|| Error::bad_database("Could not find prev event, but we know the state."))?; + + if let Some(state_key) = &prev_pdu.state_key { + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)?; + + state.insert(shortstatekey, Arc::from(prev_event)); + // Now it's the state after the pdu + } + + return Ok(Some(state)); + } + + Ok(None) + } + + #[tracing::instrument(skip_all, name = "state")] + pub async fn state_at_incoming_resolved( + &self, incoming_pdu: &Arc, room_id: &RoomId, room_version_id: &RoomVersionId, + ) -> Result>>> { + debug!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::new(); + + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let Ok(Some(prev_event)) = services().rooms.timeline.get_pdu(prev_eventid) else { + okay = false; + break; + }; + + let Ok(Some(sstatehash)) = services() + .rooms + .state_accessor + .pdu_shortstatehash(prev_eventid) + else { + okay = false; + break; + }; + + extremity_sstatehashes.insert(sstatehash, prev_event); + } + + if !okay { + return Ok(None); + } + + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state: HashMap<_, _> = services() + .rooms + .state_accessor + .state_full_ids(sstatehash) + .await?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key)?; + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + // Now it's the state after the pdu + } + + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); + + for (k, id) in leaf_state { + if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } + starting_events.push(id); + } + + auth_chain_sets.push( + services() + .rooms + .auth_chain + .event_ids_iter(room_id, starting_events) + .await? + .collect(), + ); + + fork_states.push(state); + } + + let lock = services().globals.stateres_mutex.lock(); + let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = services().rooms.timeline.get_pdu(id); + if let Err(e) = &res { + error!("Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + Ok(match result { + Ok(new_state) => Some( + new_state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; + Ok((shortstatekey, event_id)) + }) + .collect::>()?, + ), + Err(e) => { + warn!( + "State resolution on prev events failed, either an event could not be found or deserialization: {}", + e + ); + None + }, + }) + } + + /// Call /state_ids to find out what the state at this pdu is. We trust the + /// server's response to some extend (sic), but we still do a lot of checks + /// on the events + #[tracing::instrument(skip_all)] + async fn fetch_state( + &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, + pub_key_map: &RwLock>>, event_id: &EventId, + ) -> Result>>> { + debug!("Fetching state ids"); + match services() + .sending + .send_federation_request( + origin, + get_room_state_ids::v1::Request { + room_id: room_id.to_owned(), + event_id: (*event_id).to_owned(), + }, + ) + .await + { + Ok(res) => { + debug!("Fetching state events"); + let collect = res + .pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(); + + let state_vec = self + .fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id, pub_key_map) + .await; + + let mut state: HashMap<_, Arc> = HashMap::new(); + for (pdu, _) in state_vec { + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; + + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key)?; + + match state.entry(shortstatekey) { + hash_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + }, + hash_map::Entry::Occupied(_) => { + return Err(Error::bad_database( + "State event's type and state_key combination exists multiple times.", + )) + }, + } + } + + // The original create event must still be in the state + let create_shortstatekey = services() + .rooms + .short + .get_shortstatekey(&StateEventType::RoomCreate, "")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(&create_event.event_id) { + return Err(Error::bad_database("Incoming event refers to wrong create event.")); + } + + Ok(Some(state)) + }, + Err(e) => { + warn!("Fetching state for event failed: {}", e); + Err(e) + }, + } + } + + /// Find the event and auth it. Once the event is validated (steps 1 - 8) + /// it is appended to the outliers Tree. + /// + /// Returns pdu and if we fetched it over federation the raw json. + /// + /// a. Look in the main timeline (pduid_pdu tree) + /// b. Look at outlier pdu tree + /// c. Ask origin server over federation + /// d. TODO: Ask other servers over federation? + pub(crate) fn fetch_and_handle_outliers<'a>( + &'a self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, + room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveCanonicalJsonVec<'a> { + Box::pin(async move { + let back_off = |id| async { + match services() + .globals + .bad_event_ratelimiter + .write() + .await + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + } + }; + + let mut events_with_auth_events = vec![]; + for id in events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { + trace!("Found {} in db", id); + events_with_auth_events.push((id, Some(local_pdu), vec![])); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(&*next_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", next_id); + continue; + } + } + + if events_all.contains(&next_id) { + continue; + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { + trace!("Found {} in db", next_id); + continue; + } + + debug!("Fetching {} over federation.", next_id); + match services() + .sending + .send_federation_request( + origin, + get_event::v1::Request { + event_id: (*next_id).to_owned(), + }, + ) + .await + { + Ok(res) => { + debug!("Got {} over federation", next_id); + let Ok((calculated_event_id, value)) = + pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) + else { + back_off((*next_id).to_owned()).await; + continue; + }; + + if calculated_event_id != *next_id { + warn!( + "Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu + ); + } + + if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + for auth_event in auth_events { + if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + }, + Err(e) => { + warn!("Failed to fetch event {next_id}: {e}"); + back_off((*next_id).to_owned()).await; + }, + } + } + events_with_auth_events.push((id, None, events_in_reverse_order)); + } + + // We go through all the signatures we see on the PDUs and their unresolved + // dependencies and fetch the corresponding signing keys + self.fetch_required_signing_keys( + events_with_auth_events + .iter() + .flat_map(|(_id, _local_pdu, events)| events) + .map(|(_event_id, event)| event), + pub_key_map, + ) + .await + .unwrap_or_else(|e| { + warn!("Could not fetch all signatures for PDUs from {}: {:?}", origin, e); + }); + + let mut pdus = vec![]; + for (id, local_pdu, events_in_reverse_order) in events_with_auth_events { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Some(local_pdu) = local_pdu { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + } + for (next_id, value) in events_in_reverse_order.iter().rev() { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(&**next_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", next_id); + continue; + } + } + + match self + .handle_outlier_pdu(origin, create_event, next_id, room_id, value.clone(), true, pub_key_map) + .await + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); + } + }, + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()).await; + }, + } + } + } + pdus + }) + } + + #[allow(clippy::type_complexity)] + #[tracing::instrument(skip_all)] + async fn fetch_prev( + &self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, + pub_key_map: &RwLock>>, initial_set: Vec>, + ) -> Result<( + Vec>, + HashMap, (Arc, BTreeMap)>, + )> { + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = initial_set; + + let first_pdu_in_room = services() + .rooms + .timeline + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + + let mut amount = 0; + + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = self + .fetch_and_handle_outliers( + origin, + &[prev_event_id.clone()], + create_event, + room_id, + room_version_id, + pub_key_map, + ) + .await + .pop() + { + self.check_room_id(room_id, &pdu)?; + + if amount > services().globals.max_fetch_prev_events() { + // Max limit reached + debug!( + "Max prev event limit reached! Limit: {}", + services().globals.max_fetch_prev_events() + ); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + if let Some(json) = json_opt.or_else(|| { + services() + .rooms + .outlier + .get_outlier_pdu_json(&prev_event_id) + .ok() + .flatten() + }) { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(prev_prev.clone()); + } + } + + graph.insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } + + let sorted = state_res::lexicographical_topological_sort(&graph, |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + )) + }) + .map_err(|e| { + error!("Error sorting prev events: {e}"); + Error::bad_database("Error sorting prev events") + })?; + + Ok((sorted, eventid_info)) + } + + /// Returns Ok if the acl allows the server + #[tracing::instrument(skip_all)] + pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { + let acl_event = if let Some(acl) = + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? + { + trace!("ACL event found: {acl:?}"); + acl + } else { + trace!("No ACL event found"); + return Ok(()); + }; + + let acl_event_content: RoomServerAclEventContent = match serde_json::from_str(acl_event.content.get()) { + Ok(content) => { + trace!("Found ACL event contents: {content:?}"); + content + }, + Err(e) => { + warn!("Invalid ACL event: {e}"); + return Ok(()); + }, + }; + + if acl_event_content.allow.is_empty() { + warn!("Ignoring broken ACL event (allow key is empty)"); + // Ignore broken acl events + return Ok(()); + } + + if acl_event_content.is_allowed(server_name) { + trace!("server {server_name} is allowed by ACL"); + Ok(()) + } else { + debug!("Server {} was denied by room ACL in {}", server_name, room_id); + Err(Error::BadRequest(ErrorKind::forbidden(), "Server was denied by room ACL")) + } + } + + fn check_room_id(&self, room_id: &RoomId, pdu: &PduEvent) -> Result<()> { + if pdu.room_id != room_id { + warn!("Found event from room {} in room {}", pdu.room_id, room_id); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has wrong room id")); + } + Ok(()) + } + + fn get_room_version_id(&self, create_event: &PduEvent) -> Result { + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + error!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") + })?; + + Ok(create_event_content.room_version) + } + + fn to_room_version(&self, room_version_id: &RoomVersionId) -> RoomVersion { + RoomVersion::new(room_version_id).expect("room version is supported") + } } diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs deleted file mode 100644 index a49fc541..00000000 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ /dev/null @@ -1,31 +0,0 @@ -use conduwuit::{Result, err, implement, pdu::gen_event_id_canonical_json, result::FlatOk}; -use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; -use serde_json::value::RawValue as RawJsonValue; - -type Parsed = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); - -#[implement(super::Service)] -pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result { - let value = serde_json::from_str::(pdu.get()).map_err(|e| { - err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))) - })?; - - let room_id: OwnedRoomId = value - .get("room_id") - .and_then(CanonicalJsonValue::as_str) - .map(OwnedRoomId::parse) - .flat_ok_or(err!(Request(InvalidParam("Invalid room_id in pdu"))))?; - - let room_version_id = self - .services - .state - .get_room_version(&room_id) - .await - .map_err(|_| err!("Server is not in room {room_id}"))?; - - let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id).map_err(|e| { - err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))) - })?; - - Ok((room_id, event_id, value)) -} diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs deleted file mode 100644 index b3a7a71b..00000000 --- a/src/service/rooms/event_handler/resolve_state.rs +++ /dev/null @@ -1,125 +0,0 @@ -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use conduwuit::{ - Error, Result, err, implement, - state_res::{self, StateMap}, - trace, - utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width}, -}; -use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; -use ruma::{OwnedEventId, RoomId, RoomVersionId}; - -use crate::rooms::state_compressor::CompressedState; - -#[implement(super::Service)] -#[tracing::instrument(name = "resolve", level = "debug", skip_all)] -pub async fn resolve_state( - &self, - room_id: &RoomId, - room_version_id: &RoomVersionId, - incoming_state: HashMap, -) -> Result> { - trace!("Loading current room state ids"); - let current_sstatehash = self - .services - .state - .get_room_shortstatehash(room_id) - .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}")))) - .await?; - - let current_state_ids: HashMap<_, _> = self - .services - .state_accessor - .state_full_ids(current_sstatehash) - .collect() - .await; - - trace!("Loading fork states"); - let fork_states = [current_state_ids, incoming_state]; - let auth_chain_sets = fork_states - .iter() - .try_stream() - .wide_and_then(|state| { - self.services - .auth_chain - .event_ids_iter(room_id, state.values().map(Borrow::borrow)) - .try_collect() - }) - .try_collect::>>(); - - let fork_states = fork_states - .iter() - .stream() - .wide_then(|fork_state| { - let shortstatekeys = fork_state.keys().copied().stream(); - let event_ids = fork_state.values().cloned().stream(); - self.services - .short - .multi_get_statekey_from_short(shortstatekeys) - .zip(event_ids) - .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) - .collect() - }) - .map(Ok::<_, Error>) - .try_collect::>>(); - - let (fork_states, auth_chain_sets) = try_join(fork_states, auth_chain_sets).await?; - - trace!("Resolving state"); - let state = self - .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) - .boxed() - .await?; - - trace!("State resolution done."); - let state_events: Vec<_> = state - .iter() - .stream() - .wide_then(|((event_type, state_key), event_id)| { - self.services - .short - .get_or_create_shortstatekey(event_type, state_key) - .map(move |shortstatekey| (shortstatekey, event_id)) - }) - .collect() - .await; - - trace!("Compressing state..."); - let new_room_state: CompressedState = self - .services - .state_compressor - .compress_state_events(state_events.iter().map(|(ssk, eid)| (ssk, (*eid).borrow()))) - .collect() - .await; - - Ok(Arc::new(new_room_state)) -} - -#[implement(super::Service)] -#[tracing::instrument(name = "ruma", level = "debug", skip_all)] -pub async fn state_resolution<'a, StateSets>( - &'a self, - room_version: &'a RoomVersionId, - state_sets: StateSets, - auth_chain_sets: &'a [HashSet], -) -> Result> -where - StateSets: Iterator> + Clone + Send, -{ - let event_fetch = |event_id| self.event_fetch(event_id); - let event_exists = |event_id| self.event_exists(event_id); - state_res::resolve( - room_version, - state_sets, - auth_chain_sets, - &event_fetch, - &event_exists, - automatic_width(), - ) - .map_err(|e| err!(error!("State resolution failed: {e:?}"))) - .await -} diff --git a/src/service/rooms/event_handler/signing_keys.rs b/src/service/rooms/event_handler/signing_keys.rs new file mode 100644 index 00000000..a01082f5 --- /dev/null +++ b/src/service/rooms/event_handler/signing_keys.rs @@ -0,0 +1,610 @@ +use std::{ + collections::{hash_map, HashSet}, + time::{Duration, Instant, SystemTime}, +}; + +use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::{ + api::federation::{ + discovery::{ + get_remote_server_keys, + get_remote_server_keys_batch::{self, v2::QueryCriteria}, + get_server_keys, + }, + membership::create_join_event, + }, + serde::Base64, + CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedServerName, + OwnedServerSigningKeyId, RoomVersionId, ServerName, +}; +use serde_json::value::RawValue as RawJsonValue; +use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore}; +use tracing::{debug, error, info, trace, warn}; + +use crate::{ + service::{Arc, BTreeMap, HashMap, Result}, + services, Error, +}; + +impl super::Service { + pub(crate) async fn fetch_required_signing_keys<'a, E>( + &'a self, events: E, pub_key_map: &RwLock>>, + ) -> Result<()> + where + E: IntoIterator>, + { + let mut server_key_ids = HashMap::new(); + for event in events { + for (signature_server, signature) in event + .get("signatures") + .ok_or(Error::BadServerResponse("No signatures in server response pdu."))? + .as_object() + .ok_or(Error::BadServerResponse("Invalid signatures object in server response pdu."))? + { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + for signature_id in signature_object.keys() { + server_key_ids + .entry(signature_server.clone()) + .or_insert_with(HashSet::new) + .insert(signature_id.clone()); + } + } + } + + if server_key_ids.is_empty() { + // Nothing to do, can exit early + trace!("server_key_ids is empty, not fetching any keys"); + return Ok(()); + } + + trace!( + "Fetch keys for {}", + server_key_ids + .keys() + .cloned() + .collect::>() + .join(", ") + ); + + let mut server_keys: FuturesUnordered<_> = server_key_ids + .into_iter() + .map(|(signature_server, signature_ids)| async { + let fetch_res = self + .fetch_signing_keys_for_server( + signature_server.as_str().try_into().map_err(|_| { + ( + signature_server.clone(), + Error::BadServerResponse("Invalid servername in signatures of server response pdu."), + ) + })?, + signature_ids.into_iter().collect(), // HashSet to Vec + ) + .await; + + match fetch_res { + Ok(keys) => Ok((signature_server, keys)), + Err(e) => { + warn!("Signature verification failed: Could not fetch signing key for {signature_server}: {e}",); + Err((signature_server, e)) + }, + } + }) + .collect(); + + while let Some(fetch_res) = server_keys.next().await { + match fetch_res { + Ok((signature_server, keys)) => { + pub_key_map + .write() + .await + .insert(signature_server.clone(), keys); + }, + Err((signature_server, e)) => { + warn!("Failed to fetch keys for {}: {:?}", signature_server, e); + }, + } + } + + Ok(()) + } + + // Gets a list of servers for which we don't have the signing key yet. We go + // over the PDUs and either cache the key or add it to the list that needs to be + // retrieved. + async fn get_server_keys_from_cache( + &self, pdu: &RawJsonValue, + servers: &mut BTreeMap>, + room_version: &RoomVersionId, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + ) -> Result<()> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&value, room_version).expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()).expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .await + .get(event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse("No signatures in server response pdu."))? + .as_object() + .ok_or(Error::BadServerResponse("Invalid signatures object in server response pdu."))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let origin = <&ServerName>::try_from(signature_server.as_str()) + .map_err(|_| Error::BadServerResponse("Invalid servername in signatures of server response pdu."))?; + + if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + continue; + } + + debug!("Loading signing keys for {}", origin); + + let result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if !contains_all_ids(&result) { + debug!("Signing key not loaded for {}", origin); + servers.insert(origin.to_owned(), BTreeMap::new()); + } + + pub_key_map.insert(origin.to_string(), result); + } + + Ok(()) + } + + /// Batch requests homeserver signing keys from trusted notary key servers + /// (`trusted_servers` config option) + async fn batch_request_signing_keys( + &self, mut servers: BTreeMap>, + pub_key_map: &RwLock>>, + ) -> Result<()> { + for server in services().globals.trusted_servers() { + debug!("Asking batch signing keys from trusted server {}", server); + match services() + .sending + .send_federation_request( + server, + get_remote_server_keys_batch::v2::Request { + server_keys: servers.clone(), + }, + ) + .await + { + Ok(keys) => { + debug!("Got signing keys: {:?}", keys); + let mut pkm = pub_key_map.write().await; + for k in keys.server_keys { + let k = match k.deserialize() { + Ok(key) => key, + Err(e) => { + warn!("Received error {e} while fetching keys from trusted server {server}"); + warn!("{}", k.into_json()); + continue; + }, + }; + + // TODO: Check signature from trusted server? + servers.remove(&k.server_name); + + let result = services() + .globals + .add_signing_key(&k.server_name, k.clone())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pkm.insert(k.server_name.to_string(), result); + } + }, + Err(e) => { + warn!( + "Failed sending batched key request to trusted key server {server} for the remote servers \ + {:?}: {e}", + servers + ); + }, + } + } + + Ok(()) + } + + /// Requests multiple homeserver signing keys from individual servers (not + /// trused notary servers) + async fn request_signing_keys( + &self, servers: BTreeMap>, + pub_key_map: &RwLock>>, + ) -> Result<()> { + debug!("Asking individual servers for signing keys: {servers:?}"); + let mut futures: FuturesUnordered<_> = servers + .into_keys() + .map(|server| async move { + ( + services() + .sending + .send_federation_request(&server, get_server_keys::v2::Request::new()) + .await, + server, + ) + }) + .collect(); + + while let Some(result) = futures.next().await { + debug!("Received new Future result"); + if let (Ok(get_keys_response), origin) = result { + debug!("Result is from {origin}"); + if let Ok(key) = get_keys_response.server_key.deserialize() { + let result: BTreeMap<_, _> = services() + .globals + .add_signing_key(&origin, key)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + pub_key_map.write().await.insert(origin.to_string(), result); + } + } + debug!("Done handling Future result"); + } + + Ok(()) + } + + pub(crate) async fn fetch_join_signing_keys( + &self, event: &create_join_event::v2::Response, room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let mut servers: BTreeMap> = BTreeMap::new(); + + { + let mut pkm = pub_key_map.write().await; + + // Try to fetch keys, failure is okay + // Servers we couldn't find in the cache will be added to `servers` + for pdu in &event.room_state.state { + _ = self + .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) + .await; + } + for pdu in &event.room_state.auth_chain { + _ = self + .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) + .await; + } + + drop(pkm); + }; + + if servers.is_empty() { + trace!("We had all keys cached locally, not fetching any keys from remote servers"); + return Ok(()); + } + + if services().globals.query_trusted_key_servers_first() { + info!( + "query_trusted_key_servers_first is set to true, querying notary trusted key servers first for \ + homeserver signing keys." + ); + + self.batch_request_signing_keys(servers.clone(), pub_key_map) + .await?; + + if servers.is_empty() { + debug!("Trusted server supplied all signing keys, no more keys to fetch"); + return Ok(()); + } + + debug!("Remaining servers left that the notary/trusted servers did not provide: {servers:?}"); + + self.request_signing_keys(servers.clone(), pub_key_map) + .await?; + } else { + debug!("query_trusted_key_servers_first is set to false, querying individual homeservers first"); + + self.request_signing_keys(servers.clone(), pub_key_map) + .await?; + + if servers.is_empty() { + debug!("Individual homeservers supplied all signing keys, no more keys to fetch"); + return Ok(()); + } + + debug!("Remaining servers left the individual homeservers did not provide: {servers:?}"); + + self.batch_request_signing_keys(servers.clone(), pub_key_map) + .await?; + } + + debug!("Search for signing keys done"); + + /*if servers.is_empty() { + warn!("Failed to find homeserver signing keys for the remaining servers: {servers:?}"); + }*/ + + Ok(()) + } + + /// Search the DB for the signing keys of the given server, if we don't have + /// them fetch them from the server and save to our DB. + #[tracing::instrument(skip_all)] + pub async fn fetch_signing_keys_for_server( + &self, origin: &ServerName, signature_ids: Vec, + ) -> Result> { + let contains_all_ids = |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let permit = services() + .globals + .servername_ratelimiter + .read() + .await + .get(origin) + .map(|s| Arc::clone(s).acquire_owned()); + + let permit = if let Some(p) = permit { + p + } else { + let mut write = services().globals.servername_ratelimiter.write().await; + let s = Arc::clone( + write + .entry(origin.to_owned()) + .or_insert_with(|| Arc::new(Semaphore::new(1))), + ); + + s.acquire_owned() + } + .await; + + let back_off = |id| async { + match services() + .globals + .bad_signature_ratelimiter + .write() + .await + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + } + }; + + if let Some((time, tries)) = services() + .globals + .bad_signature_ratelimiter + .read() + .await + .get(&signature_ids) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {:?}", signature_ids); + return Err(Error::BadServerResponse("bad signature, still backing off")); + } + } + + let mut result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if contains_all_ids(&result) { + trace!("We have all homeserver signing keys locally for {origin}, not fetching any remotely"); + return Ok(result); + } + + // i didnt split this out into their own functions because it's relatively small + if services().globals.query_trusted_key_servers_first() { + info!( + "query_trusted_key_servers_first is set to true, querying notary trusted servers first for {origin} \ + keys" + ); + + for server in services().globals.trusted_servers() { + debug!("Asking notary server {server} for {origin}'s signing key"); + if let Some(server_keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys::v2::Request::new( + origin.to_owned(), + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime too large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) { + debug!("Got signing keys: {:?}", server_keys); + for k in server_keys { + services().globals.add_signing_key(origin, k.clone())?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); + } + } + } + + debug!("Asking {origin} for their signing keys over federation"); + if let Some(server_key) = services() + .sending + .send_federation_request(origin, get_server_keys::v2::Request::new()) + .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) + { + services() + .globals + .add_signing_key(origin, server_key.clone())?; + + result.extend( + server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + } else { + info!("query_trusted_key_servers_first is set to false, querying {origin} first"); + + debug!("Asking {origin} for their signing keys over federation"); + if let Some(server_key) = services() + .sending + .send_federation_request(origin, get_server_keys::v2::Request::new()) + .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) + { + services() + .globals + .add_signing_key(origin, server_key.clone())?; + + result.extend( + server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in services().globals.trusted_servers() { + debug!("Asking notary server {server} for {origin}'s signing key"); + if let Some(server_keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys::v2::Request::new( + origin.to_owned(), + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime too large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) { + debug!("Got signing keys: {:?}", server_keys); + for k in server_keys { + services().globals.add_signing_key(origin, k.clone())?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); + } + } + } + } + + drop(permit); + + back_off(signature_ids).await; + + warn!("Failed to find public key for server: {origin}"); + Err(Error::BadServerResponse("Failed to find public key for server")) + } +} diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs deleted file mode 100644 index eb38c2c3..00000000 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ /dev/null @@ -1,181 +0,0 @@ -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - iter::Iterator, -}; - -use conduwuit::{ - Result, debug, err, implement, - matrix::{PduEvent, StateMap}, - trace, - utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, -}; -use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; -use ruma::{OwnedEventId, RoomId, RoomVersionId}; - -use crate::rooms::short::ShortStateHash; - -// TODO: if we know the prev_events of the incoming event we can avoid the -#[implement(super::Service)] -// request and build the state from a known point and resolve if > 1 prev_event -#[tracing::instrument(name = "state", level = "debug", skip_all)] -pub(super) async fn state_at_incoming_degree_one( - &self, - incoming_pdu: &PduEvent, -) -> Result>> { - let prev_event = &incoming_pdu.prev_events[0]; - let Ok(prev_event_sstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(prev_event) - .await - else { - return Ok(None); - }; - - let mut state: HashMap<_, _> = self - .services - .state_accessor - .state_full_ids(prev_event_sstatehash) - .collect() - .await; - - debug!("Using cached state"); - let prev_pdu = self - .services - .timeline - .get_pdu(prev_event) - .await - .map_err(|e| err!(Database("Could not find prev event, but we know the state: {e:?}")))?; - - if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) - .await; - - state.insert(shortstatekey, prev_event.clone()); - // Now it's the state after the pdu - } - - debug_assert!(!state.is_empty(), "should be returning None for empty HashMap result"); - - Ok(Some(state)) -} - -#[implement(super::Service)] -#[tracing::instrument(name = "state", level = "debug", skip_all)] -pub(super) async fn state_at_incoming_resolved( - &self, - incoming_pdu: &PduEvent, - room_id: &RoomId, - room_version_id: &RoomVersionId, -) -> Result>> { - trace!("Calculating extremity statehashes..."); - let Ok(extremity_sstatehashes) = incoming_pdu - .prev_events - .iter() - .try_stream() - .broad_and_then(|prev_eventid| { - self.services - .timeline - .get_pdu(prev_eventid) - .map_ok(move |prev_event| (prev_eventid, prev_event)) - }) - .broad_and_then(|(prev_eventid, prev_event)| { - self.services - .state_accessor - .pdu_shortstatehash(prev_eventid) - .map_ok(move |sstatehash| (sstatehash, prev_event)) - }) - .try_collect::>() - .await - else { - return Ok(None); - }; - - trace!("Calculating fork states..."); - let (fork_states, auth_chain_sets): (Vec>, Vec>) = - extremity_sstatehashes - .into_iter() - .try_stream() - .wide_and_then(|(sstatehash, prev_event)| { - self.state_at_incoming_fork(room_id, sstatehash, prev_event) - }) - .try_collect() - .map_ok(Vec::into_iter) - .map_ok(Iterator::unzip) - .await?; - - let Ok(new_state) = self - .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) - .boxed() - .await - else { - return Ok(None); - }; - - new_state - .into_iter() - .stream() - .broad_then(|((event_type, state_key), event_id)| async move { - self.services - .short - .get_or_create_shortstatekey(&event_type, &state_key) - .map(move |shortstatekey| (shortstatekey, event_id)) - .await - }) - .collect() - .map(Some) - .map(Ok) - .await -} - -#[implement(super::Service)] -async fn state_at_incoming_fork( - &self, - room_id: &RoomId, - sstatehash: ShortStateHash, - prev_event: PduEvent, -) -> Result<(StateMap, HashSet)> { - let mut leaf_state: HashMap<_, _> = self - .services - .state_accessor - .state_full_ids(sstatehash) - .collect() - .await; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) - .await; - - let event_id = &prev_event.event_id; - leaf_state.insert(shortstatekey, event_id.clone()); - // Now it's the state after the pdu - } - - let auth_chain = self - .services - .auth_chain - .event_ids_iter(room_id, leaf_state.values().map(Borrow::borrow)) - .try_collect(); - - let fork_state = leaf_state - .iter() - .stream() - .broad_then(|(k, id)| { - self.services - .short - .get_statekey_from_short(*k) - .map_ok(|(ty, sk)| ((ty, sk), id.clone())) - }) - .ready_filter_map(Result::ok) - .collect() - .map(Ok); - - try_join(fork_state, auth_chain).await -} diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs deleted file mode 100644 index 97d3df97..00000000 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ /dev/null @@ -1,275 +0,0 @@ -use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; - -use conduwuit::{ - Err, Result, debug, debug_info, err, implement, - matrix::{EventTypeExt, PduEvent, StateKey, state_res}, - trace, - utils::stream::{BroadbandExt, ReadyExt}, - warn, -}; -use futures::{FutureExt, StreamExt, future::ready}; -use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; - -use super::{get_room_version_id, to_room_version}; -use crate::rooms::{ - state_compressor::{CompressedState, HashSetCompressStateEvent}, - timeline::RawPduId, -}; - -#[implement(super::Service)] -pub(super) async fn upgrade_outlier_to_timeline_pdu( - &self, - incoming_pdu: PduEvent, - val: BTreeMap, - create_event: &PduEvent, - origin: &ServerName, - room_id: &RoomId, -) -> Result> { - // Skip the PDU if we already have it as a timeline event - if let Ok(pduid) = self - .services - .timeline - .get_pdu_id(&incoming_pdu.event_id) - .await - { - return Ok(Some(pduid)); - } - - if self - .services - .pdu_metadata - .is_event_soft_failed(&incoming_pdu.event_id) - .await - { - return Err!(Request(InvalidParam("Event has been soft failed"))); - } - - debug!("Upgrading to timeline pdu"); - let timer = Instant::now(); - let room_version_id = get_room_version_id(create_event)?; - - // 10. Fetch missing state and auth chain events by calling /state_ids at - // backwards extremities doing all the checks in this list starting at 1. - // These are not timeline events. - - debug!("Resolving state at event"); - let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 { - self.state_at_incoming_degree_one(&incoming_pdu).await? - } else { - self.state_at_incoming_resolved(&incoming_pdu, room_id, &room_version_id) - .await? - }; - - if state_at_incoming_event.is_none() { - state_at_incoming_event = self - .fetch_state(origin, create_event, room_id, &incoming_pdu.event_id) - .await?; - } - - let state_at_incoming_event = - state_at_incoming_event.expect("we always set this to some above"); - let room_version = to_room_version(&room_version_id); - - debug!("Performing auth check"); - // 11. Check the auth of the event passes based on the state of the event - let state_fetch_state = &state_at_incoming_event; - let state_fetch = |k: StateEventType, s: StateKey| async move { - let shortstatekey = self.services.short.get_shortstatekey(&k, &s).await.ok()?; - - let event_id = state_fetch_state.get(&shortstatekey)?; - self.services.timeline.get_pdu(event_id).await.ok() - }; - - let auth_check = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None, // TODO: third party invite - |ty, sk| state_fetch(ty.clone(), sk.into()), - ) - .await - .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - - if !auth_check { - return Err!(Request(Forbidden("Event has failed auth check with state at the event."))); - } - - debug!("Gathering auth events"); - let auth_events = self - .services - .state - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .await?; - - let state_fetch = |k: &StateEventType, s: &str| { - let key = k.with_state_key(s); - ready(auth_events.get(&key).cloned()) - }; - - let auth_check = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None, // third-party invite - state_fetch, - ) - .await - .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; - - // Soft fail check before doing state res - debug!("Performing soft-fail check"); - let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) { - | (false, _) => true, - | (true, None) => false, - | (true, Some(redact_id)) => - !self - .services - .state_accessor - .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) - .await?, - }; - - // 13. Use state resolution to find new room state - - // We start looking at current room state now, so lets lock the room - trace!("Locking the room"); - let state_lock = self.services.state.mutex.lock(room_id).await; - - // Now we calculate the set of extremities this room has after the incoming - // event has been applied. We start with the previous extremities (aka leaves) - trace!("Calculating extremities"); - let extremities: Vec<_> = self - .services - .state - .get_forward_extremities(room_id) - .map(ToOwned::to_owned) - .ready_filter(|event_id| { - // Remove any that are referenced by this incoming event's prev_events - !incoming_pdu.prev_events.contains(event_id) - }) - .broad_filter_map(|event_id| async move { - // Only keep those extremities were not referenced yet - self.services - .pdu_metadata - .is_event_referenced(room_id, &event_id) - .await - .eq(&false) - .then_some(event_id) - }) - .collect() - .await; - - debug!( - "Retained {} extremities checked against {} prev_events", - extremities.len(), - incoming_pdu.prev_events.len() - ); - - let state_ids_compressed: Arc = self - .services - .state_compressor - .compress_state_events( - state_at_incoming_event - .iter() - .map(|(ssk, eid)| (ssk, eid.borrow())), - ) - .collect() - .map(Arc::new) - .await; - - if incoming_pdu.state_key.is_some() { - debug!("Event is a state-event. Deriving new room state"); - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) - .await; - - let event_id = &incoming_pdu.event_id; - state_after.insert(shortstatekey, event_id.clone()); - } - - let new_room_state = self - .resolve_state(room_id, &room_version_id, state_after) - .await?; - - // Set the new room state to the resolved state - debug!("Forcing new room state"); - let HashSetCompressStateEvent { shortstatehash, added, removed } = self - .services - .state_compressor - .save_state(room_id, new_room_state) - .await?; - - self.services - .state - .force_state(room_id, shortstatehash, added, removed, &state_lock) - .await?; - } - - // 14. Check if the event passes auth based on the "current state" of the room, - // if not soft fail it - if soft_fail { - debug!("Soft failing event"); - let extremities = extremities.iter().map(Borrow::borrow); - - self.services - .timeline - .append_incoming_pdu( - &incoming_pdu, - val, - extremities, - state_ids_compressed, - soft_fail, - &state_lock, - ) - .await?; - - // Soft fail, we keep the event as an outlier but don't add it to the timeline - self.services - .pdu_metadata - .mark_event_soft_failed(&incoming_pdu.event_id); - - warn!("Event was soft failed: {incoming_pdu:?}"); - return Err!(Request(InvalidParam("Event has been soft failed"))); - } - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - trace!("Appending pdu to timeline"); - let extremities = extremities - .iter() - .map(Borrow::borrow) - .chain(once(incoming_pdu.event_id.borrow())); - - let pdu_id = self - .services - .timeline - .append_incoming_pdu( - &incoming_pdu, - val, - extremities, - state_ids_compressed, - soft_fail, - &state_lock, - ) - .await?; - - // Event has passed all auth/stateres checks - drop(state_lock); - debug_info!( - elapsed = ?timer.elapsed(), - "Accepted", - ); - - Ok(pdu_id) -} diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs new file mode 100644 index 00000000..890a2f98 --- /dev/null +++ b/src/service/rooms/lazy_loading/data.rs @@ -0,0 +1,16 @@ +use ruma::{DeviceId, RoomId, UserId}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn lazy_load_was_sent_before( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, + ) -> Result; + + fn lazy_load_confirm_delivery( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, + confirmed_user_ids: &mut dyn Iterator, + ) -> Result<()>; + + fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) -> Result<()>; +} diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 346314d1..13a45987 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,156 +1,61 @@ -//! Lazy Loading +mod data; +use std::collections::{HashMap, HashSet}; -use std::{collections::HashSet, sync::Arc}; +pub use data::Data; +use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; +use tokio::sync::Mutex; -use conduwuit::{ - Result, implement, - utils::{IterStream, ReadyExt, stream::TryIgnore}, -}; -use database::{Database, Deserialized, Handle, Interfix, Map, Qry}; -use futures::{Stream, StreamExt, pin_mut}; -use ruma::{DeviceId, OwnedUserId, RoomId, UserId, api::client::filter::LazyLoadOptions}; +use super::timeline::PduCount; +use crate::Result; pub struct Service { - db: Data, + pub db: &'static dyn Data, + + #[allow(clippy::type_complexity)] + pub lazy_load_waiting: Mutex>>, } -struct Data { - lazyloadedids: Arc, - db: Arc, -} - -pub trait Options: Send + Sync { - fn is_enabled(&self) -> bool; - fn include_redundant_members(&self) -> bool; -} - -#[derive(Clone, Debug)] -pub struct Context<'a> { - pub user_id: &'a UserId, - pub device_id: &'a DeviceId, - pub room_id: &'a RoomId, - pub token: Option, - pub options: Option<&'a LazyLoadOptions>, -} - -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum Status { - Unseen, - Seen(u64), -} - -pub type Witness = HashSet; -type Key<'a> = (&'a UserId, &'a DeviceId, &'a RoomId, &'a UserId); - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - lazyloadedids: args.db["lazyloadedids"].clone(), - db: args.db.clone(), - }, - })) +impl Service { + #[tracing::instrument(skip(self))] + pub fn lazy_load_was_sent_before( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, + ) -> Result { + self.db + .lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub async fn reset(&self, ctx: &Context<'_>) { - let prefix = (ctx.user_id, ctx.device_id, ctx.room_id, Interfix); - self.db - .lazyloadedids - .keys_prefix_raw(&prefix) - .ignore_err() - .ready_for_each(|key| self.db.lazyloadedids.remove(key)) - .await; -} - -#[implement(Service)] -#[tracing::instrument(name = "retain", level = "debug", skip_all)] -pub async fn witness_retain(&self, senders: Witness, ctx: &Context<'_>) -> Witness { - debug_assert!( - ctx.options.is_none_or(Options::is_enabled), - "lazy loading should be enabled by your options" - ); - - let include_redundant = cfg!(feature = "element_hacks") - || ctx.options.is_some_and(Options::include_redundant_members); - - let witness = self - .witness(ctx, senders.iter().map(AsRef::as_ref)) - .zip(senders.iter().stream()); - - pin_mut!(witness); - let _cork = self.db.db.cork(); - let mut senders = Witness::with_capacity(senders.len()); - while let Some((status, sender)) = witness.next().await { - if include_redundant || status == Status::Unseen { - senders.insert(sender.into()); - continue; - } - - if let Status::Seen(seen) = status { - if seen == 0 || ctx.token == Some(seen) { - senders.insert(sender.into()); - continue; - } - } + #[tracing::instrument(skip(self))] + pub async fn lazy_load_mark_sent( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, lazy_load: HashSet, + count: PduCount, + ) { + self.lazy_load_waiting + .lock() + .await + .insert((user_id.to_owned(), device_id.to_owned(), room_id.to_owned(), count), lazy_load); } - senders -} - -#[implement(Service)] -fn witness<'a, I>( - &'a self, - ctx: &'a Context<'a>, - senders: I, -) -> impl Stream + Send + 'a -where - I: Iterator + Send + Clone + 'a, -{ - let make_key = - |sender: &'a UserId| -> Key<'a> { (ctx.user_id, ctx.device_id, ctx.room_id, sender) }; - - senders - .clone() - .stream() - .map(make_key) - .qry(&self.db.lazyloadedids) - .map(into_status) - .zip(senders.stream()) - .map(move |(status, sender)| { - if matches!(status, Status::Unseen) { - self.db - .lazyloadedids - .put_aput::<8, _, _>(make_key(sender), 0_u64); - } else if matches!(status, Status::Seen(0)) { - self.db - .lazyloadedids - .put_aput::<8, _, _>(make_key(sender), ctx.token.unwrap_or(0_u64)); - } - - status - }) -} - -fn into_status(result: Result>) -> Status { - match result.and_then(|handle| handle.deserialized()) { - | Ok(seen) => Status::Seen(seen), - | Err(_) => Status::Unseen, - } -} - -impl Options for LazyLoadOptions { - fn include_redundant_members(&self) -> bool { - if let Self::Enabled { include_redundant_members } = self { - *include_redundant_members + #[tracing::instrument(skip(self))] + pub async fn lazy_load_confirm_delivery( + &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, since: PduCount, + ) -> Result<()> { + if let Some(user_ids) = self.lazy_load_waiting.lock().await.remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + self.db + .lazy_load_confirm_delivery(user_id, device_id, room_id, &mut user_ids.iter().map(|u| &**u))?; } else { - false + // Ignore } + + Ok(()) } - fn is_enabled(&self) -> bool { !self.is_disabled() } + #[tracing::instrument(skip(self))] + pub fn lazy_load_reset(&self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId) -> Result<()> { + self.db.lazy_load_reset(user_id, device_id, room_id) + } } diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs new file mode 100644 index 00000000..d702b203 --- /dev/null +++ b/src/service/rooms/metadata/data.rs @@ -0,0 +1,13 @@ +use ruma::{OwnedRoomId, RoomId}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn exists(&self, room_id: &RoomId) -> Result; + fn iter_ids<'a>(&'a self) -> Box> + 'a>; + fn is_disabled(&self, room_id: &RoomId) -> Result; + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; + fn is_banned(&self, room_id: &RoomId) -> Result; + fn ban_room(&self, room_id: &RoomId, banned: bool) -> Result<()>; + fn list_banned_rooms<'a>(&'a self) -> Box> + 'a>; +} diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 54eef47d..500ddcff 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,100 +1,32 @@ -use std::sync::Arc; +mod data; -use conduwuit::{Result, implement, utils::stream::TryIgnore}; -use database::Map; -use futures::{Stream, StreamExt}; -use ruma::RoomId; +pub use data::Data; +use ruma::{OwnedRoomId, RoomId}; -use crate::{Dep, rooms}; +use crate::Result; pub struct Service { - db: Data, - services: Services, + pub db: &'static dyn Data, } -struct Data { - disabledroomids: Arc, - bannedroomids: Arc, - roomid_shortroomid: Arc, - pduid_pdu: Arc, -} +impl Service { + /// Checks if a room exists. + #[tracing::instrument(skip(self))] + pub fn exists(&self, room_id: &RoomId) -> Result { self.db.exists(room_id) } -struct Services { - short: Dep, -} + pub fn iter_ids<'a>(&'a self) -> Box> + 'a> { self.db.iter_ids() } -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - disabledroomids: args.db["disabledroomids"].clone(), - bannedroomids: args.db["bannedroomids"].clone(), - roomid_shortroomid: args.db["roomid_shortroomid"].clone(), - pduid_pdu: args.db["pduid_pdu"].clone(), - }, - services: Services { - short: args.depend::("rooms::short"), - }, - })) + pub fn is_disabled(&self, room_id: &RoomId) -> Result { self.db.is_disabled(room_id) } + + pub fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + self.db.disable_room(room_id, disabled) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} + pub fn is_banned(&self, room_id: &RoomId) -> Result { self.db.is_banned(room_id) } -#[implement(Service)] -pub async fn exists(&self, room_id: &RoomId) -> bool { - let Ok(prefix) = self.services.short.get_shortroomid(room_id).await else { - return false; - }; + pub fn ban_room(&self, room_id: &RoomId, banned: bool) -> Result<()> { self.db.ban_room(room_id, banned) } - // Look for PDUs in that room. - self.db - .pduid_pdu - .keys_prefix_raw(&prefix) - .ignore_err() - .next() - .await - .is_some() -} - -#[implement(Service)] -pub fn iter_ids(&self) -> impl Stream + Send + '_ { - self.db.roomid_shortroomid.keys().ignore_err() -} - -#[implement(Service)] -#[inline] -pub fn disable_room(&self, room_id: &RoomId, disabled: bool) { - if disabled { - self.db.disabledroomids.insert(room_id, []); - } else { - self.db.disabledroomids.remove(room_id); + pub fn list_banned_rooms<'a>(&'a self) -> Box> + 'a> { + self.db.list_banned_rooms() } } - -#[implement(Service)] -#[inline] -pub fn ban_room(&self, room_id: &RoomId, banned: bool) { - if banned { - self.db.bannedroomids.insert(room_id, []); - } else { - self.db.bannedroomids.remove(room_id); - } -} - -#[implement(Service)] -pub fn list_banned_rooms(&self) -> impl Stream + Send + '_ { - self.db.bannedroomids.keys().ignore_err() -} - -#[implement(Service)] -#[inline] -pub async fn is_disabled(&self, room_id: &RoomId) -> bool { - self.db.disabledroomids.get(room_id).await.is_ok() -} - -#[implement(Service)] -#[inline] -pub async fn is_banned(&self, room_id: &RoomId) -> bool { - self.db.bannedroomids.get(room_id).await.is_ok() -} diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 44a83582..baf3f7b5 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -19,27 +19,46 @@ pub mod timeline; pub mod typing; pub mod user; -use std::sync::Arc; +pub trait Data: + alias::Data + + auth_chain::Data + + directory::Data + + lazy_loading::Data + + metadata::Data + + outlier::Data + + pdu_metadata::Data + + read_receipt::Data + + search::Data + + short::Data + + state::Data + + state_accessor::Data + + state_cache::Data + + state_compressor::Data + + timeline::Data + + threads::Data + + user::Data +{ +} pub struct Service { - pub alias: Arc, - pub auth_chain: Arc, - pub directory: Arc, - pub event_handler: Arc, - pub lazy_loading: Arc, - pub metadata: Arc, - pub outlier: Arc, - pub pdu_metadata: Arc, - pub read_receipt: Arc, - pub search: Arc, - pub short: Arc, - pub spaces: Arc, - pub state: Arc, - pub state_accessor: Arc, - pub state_cache: Arc, - pub state_compressor: Arc, - pub threads: Arc, - pub timeline: Arc, - pub typing: Arc, - pub user: Arc, + pub alias: alias::Service, + pub auth_chain: auth_chain::Service, + pub directory: directory::Service, + pub event_handler: event_handler::Service, + pub lazy_loading: lazy_loading::Service, + pub metadata: metadata::Service, + pub outlier: outlier::Service, + pub pdu_metadata: pdu_metadata::Service, + pub read_receipt: read_receipt::Service, + pub search: search::Service, + pub short: short::Service, + pub state: state::Service, + pub state_accessor: state_accessor::Service, + pub state_cache: state_cache::Service, + pub state_compressor: state_compressor::Service, + pub timeline: timeline::Service, + pub threads: threads::Service, + pub typing: typing::Service, + pub spaces: spaces::Service, + pub user: user::Service, } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs new file mode 100644 index 00000000..18eb3190 --- /dev/null +++ b/src/service/rooms/outlier/data.rs @@ -0,0 +1,9 @@ +use ruma::{CanonicalJsonObject, EventId}; + +use crate::{PduEvent, Result}; + +pub trait Data: Send + Sync { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; +} diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 12b56935..7a6a1d01 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,52 +1,26 @@ -use std::sync::Arc; +mod data; -use conduwuit::{Result, implement, matrix::pdu::PduEvent}; -use conduwuit_database::{Deserialized, Json, Map}; +pub use data::Data; use ruma::{CanonicalJsonObject, EventId}; +use crate::{PduEvent, Result}; + pub struct Service { - db: Data, + pub db: &'static dyn Data, } -struct Data { - eventid_outlierpdu: Arc, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - eventid_outlierpdu: args.db["eventid_outlierpdu"].clone(), - }, - })) +impl Service { + /// Returns the pdu from the outlier tree. + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.db.get_outlier_pdu_json(event_id) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.db.get_outlier_pdu(event_id) } -/// Returns the pdu from the outlier tree. -#[implement(Service)] -pub async fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result { - self.db - .eventid_outlierpdu - .get(event_id) - .await - .deserialized() -} - -/// Returns the pdu from the outlier tree. -#[implement(Service)] -pub async fn get_pdu_outlier(&self, event_id: &EventId) -> Result { - self.db - .eventid_outlierpdu - .get(event_id) - .await - .deserialized() -} - -/// Append the PDU as an outlier. -#[implement(Service)] -#[tracing::instrument(skip(self, pdu), level = "debug")] -pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) { - self.db.eventid_outlierpdu.raw_put(event_id, Json(pdu)); + /// Append the PDU as an outlier. + #[tracing::instrument(skip(self, pdu))] + pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + self.db.add_pdu_outlier(event_id, pdu) + } } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index f0beab5a..8d9a2058 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,114 +1,17 @@ -use std::{mem::size_of, sync::Arc}; +use std::sync::Arc; -use conduwuit::{ - PduCount, PduEvent, - arrayvec::ArrayVec, - result::LogErr, - utils::{ - ReadyExt, - stream::{TryIgnore, WidebandExt}, - u64_from_u8, - }, -}; -use database::Map; -use futures::{Stream, StreamExt}; -use ruma::{EventId, RoomId, UserId, api::Direction}; +use ruma::{EventId, RoomId, UserId}; -use crate::{ - Dep, rooms, - rooms::{ - short::{ShortEventId, ShortRoomId}, - timeline::{PduId, RawPduId}, - }, -}; +use crate::{service::rooms::timeline::PduCount, PduEvent, Result}; -pub(super) struct Data { - tofrom_relation: Arc, - referencedevents: Arc, - softfailedeventids: Arc, - services: Services, -} - -struct Services { - timeline: Dep, -} - -pub(super) type PdusIterItem = (PduCount, PduEvent); - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - tofrom_relation: db["tofrom_relation"].clone(), - referencedevents: db["referencedevents"].clone(), - softfailedeventids: db["softfailedeventids"].clone(), - services: Services { - timeline: args.depend::("rooms::timeline"), - }, - } - } - - pub(super) fn add_relation(&self, from: u64, to: u64) { - const BUFSIZE: usize = size_of::() * 2; - - let key: &[u64] = &[to, from]; - self.tofrom_relation.aput_raw::(key, []); - } - - pub(super) fn get_relations<'a>( - &'a self, - user_id: &'a UserId, - shortroomid: ShortRoomId, - target: ShortEventId, - from: PduCount, - dir: Direction, - ) -> impl Stream + Send + '_ { - let mut current = ArrayVec::::new(); - current.extend(target.to_be_bytes()); - current.extend(from.saturating_inc(dir).into_unsigned().to_be_bytes()); - let current = current.as_slice(); - match dir { - | Direction::Forward => self.tofrom_relation.raw_keys_from(current).boxed(), - | Direction::Backward => self.tofrom_relation.rev_raw_keys_from(current).boxed(), - } - .ignore_err() - .ready_take_while(move |key| key.starts_with(&target.to_be_bytes())) - .map(|to_from| u64_from_u8(&to_from[8..16])) - .map(PduCount::from_unsigned) - .wide_filter_map(move |shorteventid| async move { - let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into(); - - let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; - - if pdu.sender != user_id { - pdu.remove_transaction_id().log_err().ok(); - } - - Some((shorteventid, pdu)) - }) - } - - #[inline] - pub(super) fn mark_as_referenced<'a, I>(&self, room_id: &RoomId, event_ids: I) - where - I: Iterator, - { - for prev in event_ids { - let key = (room_id, prev); - self.referencedevents.put_raw(key, []); - } - } - - pub(super) async fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> bool { - let key = (room_id, event_id); - self.referencedevents.qry(&key).await.is_ok() - } - - pub(super) fn mark_event_soft_failed(&self, event_id: &EventId) { - self.softfailedeventids.insert(event_id, []); - } - - pub(super) async fn is_event_soft_failed(&self, event_id: &EventId) -> bool { - self.softfailedeventids.get(event_id).await.is_ok() - } +pub trait Data: Send + Sync { + fn add_relation(&self, from: u64, to: u64) -> Result<()>; + #[allow(clippy::type_complexity)] + fn relations_until<'a>( + &'a self, user_id: &'a UserId, room_id: u64, target: u64, until: PduCount, + ) -> Result> + 'a>>; + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; + fn is_event_soft_failed(&self, event_id: &EventId) -> Result; } diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 18221c2d..c2483475 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,136 +1,232 @@ mod data; use std::sync::Arc; -use conduwuit::{PduCount, Result}; -use futures::{StreamExt, future::try_join}; -use ruma::{EventId, RoomId, UserId, api::Direction}; +pub use data::Data; +use ruma::{ + api::{client::relations::get_relating_events, Direction}, + events::{relation::RelationType, TimelineEventType}, + EventId, RoomId, UInt, UserId, +}; +use serde::Deserialize; -use self::data::{Data, PdusIterItem}; -use crate::{Dep, rooms}; +use super::timeline::PduCount; +use crate::{services, PduEvent, Result}; pub struct Service { - services: Services, - db: Data, + pub db: &'static dyn Data, } -struct Services { - short: Dep, - timeline: Dep, +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelType { + rel_type: RelationType, } - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - short: args.depend::("rooms::short"), - timeline: args.depend::("rooms::timeline"), - }, - db: Data::new(&args), - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelatesToEventId { + #[serde(rename = "m.relates_to")] + relates_to: ExtractRelType, } impl Service { - #[tracing::instrument(skip(self, from, to), level = "debug")] - pub fn add_relation(&self, from: PduCount, to: PduCount) { + #[tracing::instrument(skip(self, from, to))] + pub fn add_relation(&self, from: PduCount, to: PduCount) -> Result<()> { match (from, to) { - | (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), - | _ => { + (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), + _ => { // TODO: Relations with backfilled pdus + + Ok(()) }, } } #[allow(clippy::too_many_arguments)] - pub async fn get_relations( - &self, - user_id: &UserId, - room_id: &RoomId, - target: &EventId, - from: PduCount, - limit: usize, - max_depth: u8, - dir: Direction, - ) -> Vec { - let room_id = self.services.short.get_shortroomid(room_id); - - let target = self.services.timeline.get_pdu_count(target); - - let Ok((room_id, target)) = try_join(room_id, target).await else { - return Vec::new(); + pub fn paginate_relations_with_filter( + &self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: &Option, + filter_rel_type: &Option, from: &Option, to: &Option, limit: &Option, + recurse: bool, dir: Direction, + ) -> Result { + let from = match from { + Some(from) => PduCount::try_from_string(from)?, + None => match dir { + Direction::Forward => PduCount::min(), + Direction::Backward => PduCount::max(), + }, }; - let target = match target { - | PduCount::Normal(c) => c, - // TODO: Support backfilled relations - | _ => 0, // This will result in an empty iterator + let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok()); + + // Use limit or else 10, with maximum 100 + let limit = limit + .and_then(|u| u32::try_from(u).ok()) + .map_or(10_usize, |u| u as usize) + .min(100); + + let next_token; + + // Spec (v1.10) recommends depth of at least 3 + let depth: u8 = if recurse { + 3 + } else { + 1 }; - let mut pdus: Vec<_> = self - .db - .get_relations(user_id, room_id, target, from, dir) - .collect() - .await; + match dir { + Direction::Forward => { + let relations_until = + &services() + .rooms + .pdu_metadata + .relations_until(sender_user, room_id, target, from, depth)?; + let events_after: Vec<_> = relations_until // TODO: should be relations_after + .iter() + .filter(|(_, pdu)| { + filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) + && if let Ok(content) = + serde_json::from_str::(pdu.content.get()) + { + filter_rel_type + .as_ref() + .map_or(true, |r| &content.relates_to.rel_type == r) + } else { + false + } + }) + .take(limit) + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, room_id, &pdu.event_id) + .unwrap_or(false) + }) + .take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to` + .collect(); - let mut stack: Vec<_> = pdus - .iter() - .filter(|_| max_depth > 0) - .map(|pdu| (pdu.clone(), 1)) - .collect(); + next_token = events_after.last().map(|(count, _)| count).copied(); - 'limit: while let Some(stack_pdu) = stack.pop() { - let target = match stack_pdu.0.0 { - | PduCount::Normal(c) => c, - // TODO: Support backfilled relations - | PduCount::Backfilled(_) => 0, // This will result in an empty iterator - }; + let events_after: Vec<_> = events_after + .into_iter() + .rev() // relations are always most recent first + .map(|(_, pdu)| pdu.to_message_like_event()) + .collect(); - let relations: Vec<_> = self - .db - .get_relations(user_id, room_id, target, from, dir) - .collect() - .await; + Ok(get_relating_events::v1::Response { + chunk: events_after, + next_batch: next_token.map(|t| t.stringify()), + prev_batch: Some(from.stringify()), + recursion_depth: if recurse { + Some(depth.into()) + } else { + None + }, + }) + }, + Direction::Backward => { + let relations_until = + &services() + .rooms + .pdu_metadata + .relations_until(sender_user, room_id, target, from, depth)?; + let events_before: Vec<_> = relations_until + .iter() + .filter(|(_, pdu)| { + filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) + && if let Ok(content) = + serde_json::from_str::(pdu.content.get()) + { + filter_rel_type + .as_ref() + .map_or(true, |r| &content.relates_to.rel_type == r) + } else { + false + } + }) + .take(limit) + .filter(|(_, pdu)| { + services() + .rooms + .state_accessor + .user_can_see_event(sender_user, room_id, &pdu.event_id) + .unwrap_or(false) + }) + .take_while(|&(k, _)| Some(k) != to.as_ref()) // Stop at `to` + .collect(); - for relation in relations { - if stack_pdu.1 < max_depth { - stack.push((relation.clone(), stack_pdu.1.saturating_add(1))); - } + next_token = events_before.last().map(|(count, _)| count).copied(); - pdus.push(relation); - if pdus.len() >= limit { - break 'limit; - } - } + let events_before: Vec<_> = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_message_like_event()) + .collect(); + + Ok(get_relating_events::v1::Response { + chunk: events_before, + next_batch: next_token.map(|t| t.stringify()), + prev_batch: Some(from.stringify()), + recursion_depth: if recurse { + Some(depth.into()) + } else { + None + }, + }) + }, } - - pdus } - #[tracing::instrument(skip_all, level = "debug")] - pub fn mark_as_referenced<'a, I>(&self, room_id: &RoomId, event_ids: I) - where - I: Iterator, - { - self.db.mark_as_referenced(room_id, event_ids); + pub fn relations_until<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, target: &'a EventId, until: PduCount, max_depth: u8, + ) -> Result> { + let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?; + #[allow(unknown_lints)] + #[allow(clippy::manual_unwrap_or_default)] + let target = match services().rooms.timeline.get_pdu_count(target)? { + Some(PduCount::Normal(c)) => c, + // TODO: Support backfilled relations + _ => 0, // This will result in an empty iterator + }; + + self.db + .relations_until(user_id, room_id, target, until) + .map(|mut relations| { + let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect(); + let mut stack: Vec<_> = pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect(); + + while let Some(stack_pdu) = stack.pop() { + let target = match stack_pdu.0 .0 { + PduCount::Normal(c) => c, + // TODO: Support backfilled relations + PduCount::Backfilled(_) => 0, // This will result in an empty iterator + }; + + if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until) { + for relation in relations.flatten() { + if stack_pdu.1 < max_depth { + stack.push((relation.clone(), stack_pdu.1 + 1)); + } + + pdus.push(relation); + } + } + } + + pdus.sort_by(|a, b| a.0.partial_cmp(&b.0).expect("u64s can always be compared")); + pdus + }) } - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub async fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> bool { - self.db.is_event_referenced(room_id, event_id).await + #[tracing::instrument(skip(self, room_id, event_ids))] + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + self.db.mark_as_referenced(room_id, event_ids) } - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_event_soft_failed(&self, event_id: &EventId) { - self.db.mark_event_soft_failed(event_id); + #[tracing::instrument(skip(self))] + pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + self.db.is_event_referenced(room_id, event_id) } - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub async fn is_event_soft_failed(&self, event_id: &EventId) -> bool { - self.db.is_event_soft_failed(event_id).await - } + #[tracing::instrument(skip(self))] + pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { self.db.mark_event_soft_failed(event_id) } + + #[tracing::instrument(skip(self))] + pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { self.db.is_event_soft_failed(event_id) } } diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index 62f87948..dcb550f8 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,118 +1,28 @@ -use std::sync::Arc; - -use conduwuit::{ - Result, - utils::{ReadyExt, stream::TryIgnore}, -}; -use database::{Deserialized, Json, Map}; -use futures::{Stream, StreamExt}; use ruma::{ - CanonicalJsonObject, RoomId, UserId, - events::{AnySyncEphemeralRoomEvent, receipt::ReceiptEvent}, + events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, serde::Raw, + OwnedUserId, RoomId, UserId, }; -use crate::{Dep, globals}; +use crate::Result; -pub(super) struct Data { - roomuserid_privateread: Arc, - roomuserid_lastprivatereadupdate: Arc, - services: Services, - readreceiptid_readreceipt: Arc, -} - -struct Services { - globals: Dep, -} - -pub(super) type ReceiptItem<'a> = (&'a UserId, u64, Raw); - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - roomuserid_privateread: db["roomuserid_privateread"].clone(), - roomuserid_lastprivatereadupdate: db["roomuserid_lastprivatereadupdate"].clone(), - readreceiptid_readreceipt: db["readreceiptid_readreceipt"].clone(), - services: Services { - globals: args.depend::("globals"), - }, - } - } - - pub(super) async fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: &ReceiptEvent, - ) { - // Remove old entry - let last_possible_key = (room_id, u64::MAX); - self.readreceiptid_readreceipt - .rev_keys_from_raw(&last_possible_key) - .ignore_err() - .ready_take_while(|key| key.starts_with(room_id.as_bytes())) - .ready_filter_map(|key| key.ends_with(user_id.as_bytes()).then_some(key)) - .ready_for_each(|key| self.readreceiptid_readreceipt.del(key)) - .await; - - let count = self.services.globals.next_count().unwrap(); - let latest_id = (room_id, count, user_id); - self.readreceiptid_readreceipt.put(latest_id, Json(event)); - } - - pub(super) fn readreceipts_since<'a>( - &'a self, - room_id: &'a RoomId, - since: u64, - ) -> impl Stream> + Send + 'a { - type Key<'a> = (&'a RoomId, u64, &'a UserId); - type KeyVal<'a> = (Key<'a>, CanonicalJsonObject); - - let after_since = since.saturating_add(1); // +1 so we don't send the event at since - let first_possible_edu = (room_id, after_since); - - self.readreceiptid_readreceipt - .stream_from(&first_possible_edu) - .ignore_err() - .ready_take_while(move |((r, ..), _): &KeyVal<'_>| *r == room_id) - .map(move |((_, count, user_id), mut json): KeyVal<'_>| { - json.remove("room_id"); - - let event = serde_json::value::to_raw_value(&json)?; - - Ok((user_id, count, Raw::from_json(event))) - }) - .ignore_err() - } - - pub(super) fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, pdu_count: u64) { - let key = (room_id, user_id); - let next_count = self.services.globals.next_count().unwrap(); - - self.roomuserid_privateread.put(key, pdu_count); - self.roomuserid_lastprivatereadupdate.put(key, next_count); - } - - pub(super) async fn private_read_get_count( - &self, - room_id: &RoomId, - user_id: &UserId, - ) -> Result { - let key = (room_id, user_id); - self.roomuserid_privateread.qry(&key).await.deserialized() - } - - pub(super) async fn last_privateread_update( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> u64 { - let key = (room_id, user_id); - self.roomuserid_lastprivatereadupdate - .qry(&key) - .await - .deserialized() - .unwrap_or(0) - } +type AnySyncEphemeralRoomEventIter<'a> = + Box)>> + 'a>; + +pub trait Data: Send + Sync { + /// Replaces the previous read receipt. + fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent) -> Result<()>; + + /// Returns an iterator over the most recent read_receipts in a room that + /// happened after the event with id `since`. + fn readreceipts_since(&self, room_id: &RoomId, since: u64) -> AnySyncEphemeralRoomEventIter<'_>; + + /// Sets a private read marker at `count`. + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; + + /// Returns the private read marker. + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns the count of the last typing update in this room. + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 69e859c4..85291182 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -1,169 +1,46 @@ mod data; -use std::{collections::BTreeMap, sync::Arc}; +pub use data::Data; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; -use conduwuit::{ - Result, debug, err, - matrix::pdu::{PduCount, PduId, RawPduId}, - warn, -}; -use futures::{Stream, TryFutureExt, try_join}; -use ruma::{ - OwnedEventId, OwnedUserId, RoomId, UserId, - events::{ - AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, - receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, - }, - serde::Raw, -}; - -use self::data::{Data, ReceiptItem}; -use crate::{Dep, rooms, sending}; +use crate::{services, Result}; pub struct Service { - services: Services, - db: Data, -} - -struct Services { - sending: Dep, - short: Dep, - timeline: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - sending: args.depend::("sending"), - short: args.depend::("rooms::short"), - timeline: args.depend::("rooms::timeline"), - }, - db: Data::new(&args), - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, } impl Service { /// Replaces the previous read receipt. - pub async fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: &ReceiptEvent, - ) { - self.db.readreceipt_update(user_id, room_id, event).await; - self.services - .sending - .flush_room(room_id) - .await - .expect("room flush failed"); - } + pub fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent) -> Result<()> { + self.db.readreceipt_update(user_id, room_id, event)?; + services().sending.flush_room(room_id)?; - /// Gets the latest private read receipt from the user in the room - pub async fn private_read_get( - &self, - room_id: &RoomId, - user_id: &UserId, - ) -> Result> { - let pdu_count = self.private_read_get_count(room_id, user_id).map_err(|e| { - err!(Database(warn!("No private read receipt was set in {room_id}: {e}"))) - }); - let shortroomid = self.services.short.get_shortroomid(room_id).map_err(|e| { - err!(Database(warn!("Short room ID does not exist in database for {room_id}: {e}"))) - }); - let (pdu_count, shortroomid) = try_join!(pdu_count, shortroomid)?; - - let shorteventid = PduCount::Normal(pdu_count); - let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into(); - - let pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await?; - - let event_id: OwnedEventId = pdu.event_id; - let user_id: OwnedUserId = user_id.to_owned(); - let content: BTreeMap = BTreeMap::from_iter([( - event_id, - BTreeMap::from_iter([( - ruma::events::receipt::ReceiptType::ReadPrivate, - BTreeMap::from_iter([(user_id, ruma::events::receipt::Receipt { - ts: None, // TODO: start storing the timestamp so we can return one - thread: ruma::events::receipt::ReceiptThread::Unthreaded, - })]), - )]), - )]); - let receipt_event_content = ReceiptEventContent(content); - let receipt_sync_event = SyncEphemeralRoomEvent { content: receipt_event_content }; - - let event = serde_json::value::to_raw_value(&receipt_sync_event) - .expect("receipt created manually"); - - Ok(Raw::from_json(event)) + Ok(()) } /// Returns an iterator over the most recent read_receipts in a room that /// happened after the event with id `since`. - #[inline] - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self))] pub fn readreceipts_since<'a>( - &'a self, - room_id: &'a RoomId, - since: u64, - ) -> impl Stream> + Send + 'a { + &'a self, room_id: &RoomId, since: u64, + ) -> impl Iterator)>> + 'a { self.db.readreceipts_since(room_id, since) } - /// Sets a private read marker at PDU `count`. - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) { - self.db.private_read_set(room_id, user_id, count); + /// Sets a private read marker at `count`. + #[tracing::instrument(skip(self))] + pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + self.db.private_read_set(room_id, user_id, count) } - /// Returns the private read marker PDU count. - #[inline] - #[tracing::instrument(skip(self), level = "debug")] - pub async fn private_read_get_count( - &self, - room_id: &RoomId, - user_id: &UserId, - ) -> Result { - self.db.private_read_get_count(room_id, user_id).await + /// Returns the private read marker. + #[tracing::instrument(skip(self))] + pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + self.db.private_read_get(room_id, user_id) } - /// Returns the PDU count of the last typing update in this room. - #[inline] - pub async fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - self.db.last_privateread_update(user_id, room_id).await + /// Returns the count of the last typing update in this room. + pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.last_privateread_update(user_id, room_id) } } - -#[must_use] -pub fn pack_receipts(receipts: I) -> Raw> -where - I: Iterator>, -{ - let mut json = BTreeMap::new(); - for value in receipts { - let receipt = serde_json::from_str::>( - value.json().get(), - ); - match receipt { - | Ok(value) => - for (event, receipt) in value.content { - json.insert(event, receipt); - }, - | _ => { - debug!("failed to parse receipt: {:?}", receipt); - }, - } - } - let content = ReceiptEventContent::from_iter(json); - - conduwuit::trace!(?content); - Raw::from_json( - serde_json::value::to_raw_value(&SyncEphemeralRoomEvent { content }) - .expect("received valid json"), - ) -} diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs new file mode 100644 index 00000000..96439adf --- /dev/null +++ b/src/service/rooms/search/data.rs @@ -0,0 +1,11 @@ +use ruma::RoomId; + +use crate::Result; + +type SearchPdusResult<'a> = Result> + 'a>, Vec)>>; + +pub trait Data: Send + Sync { + fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + + fn search_pdus<'a>(&'a self, room_id: &RoomId, search_string: &str) -> SearchPdusResult<'a>; +} diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 4100dd75..e75f7d14 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,235 +1,24 @@ -use std::sync::Arc; +mod data; -use conduwuit::{ - PduCount, PduEvent, Result, - arrayvec::ArrayVec, - implement, - utils::{ - ArrayVecExt, IterStream, ReadyExt, set, - stream::{TryIgnore, WidebandExt}, - }, -}; -use database::{Map, keyval::Val}; -use futures::{Stream, StreamExt}; -use ruma::{RoomId, UserId, api::client::search::search_events::v3::Criteria}; +pub use data::Data; +use ruma::RoomId; -use crate::{ - Dep, rooms, - rooms::{ - short::ShortRoomId, - timeline::{PduId, RawPduId}, - }, -}; +use crate::Result; pub struct Service { - db: Data, - services: Services, + pub db: &'static dyn Data, } -struct Data { - tokenids: Arc, -} - -struct Services { - short: Dep, - state_accessor: Dep, - timeline: Dep, -} - -#[derive(Clone, Debug)] -pub struct RoomQuery<'a> { - pub room_id: &'a RoomId, - pub user_id: Option<&'a UserId>, - pub criteria: &'a Criteria, - pub limit: usize, - pub skip: usize, -} - -type TokenId = ArrayVec; - -const TOKEN_ID_MAX_LEN: usize = - size_of::() + WORD_MAX_LEN + 1 + size_of::(); -const WORD_MAX_LEN: usize = 50; - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { tokenids: args.db["tokenids"].clone() }, - services: Services { - short: args.depend::("rooms::short"), - state_accessor: args - .depend::("rooms::state_accessor"), - timeline: args.depend::("rooms::timeline"), - }, - })) +impl Service { + #[tracing::instrument(skip(self))] + pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { + self.db.index_pdu(shortroomid, pdu_id, message_body) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -pub fn index_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_body: &str) { - let batch = tokenize(message_body) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xFF); - key.extend_from_slice(pdu_id.as_ref()); // TODO: currently we save the room id a second time here - key - }) - .collect::>(); - - self.db - .tokenids - .insert_batch(batch.iter().map(|k| (k.as_slice(), &[]))); -} - -#[implement(Service)] -pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_body: &str) { - let batch = tokenize(message_body).map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xFF); - key.extend_from_slice(pdu_id.as_ref()); // TODO: currently we save the room id a second time here - key - }); - - for token in batch { - self.db.tokenids.remove(&token); + #[tracing::instrument(skip(self))] + pub fn search_pdus<'a>( + &'a self, room_id: &RoomId, search_string: &str, + ) -> Result> + 'a, Vec)>> { + self.db.search_pdus(room_id, search_string) } } - -#[implement(Service)] -pub async fn search_pdus<'a>( - &'a self, - query: &'a RoomQuery<'a>, -) -> Result<(usize, impl Stream + Send + 'a)> { - let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await; - - let count = pdu_ids.len(); - let pdus = pdu_ids - .into_iter() - .stream() - .wide_filter_map(move |result_pdu_id: RawPduId| async move { - self.services - .timeline - .get_pdu_from_id(&result_pdu_id) - .await - .ok() - }) - .ready_filter(|pdu| !pdu.is_redacted()) - .ready_filter(|pdu| pdu.matches(&query.criteria.filter)) - .wide_filter_map(move |pdu| async move { - self.services - .state_accessor - .user_can_see_event(query.user_id?, &pdu.room_id, &pdu.event_id) - .await - .then_some(pdu) - }) - .skip(query.skip) - .take(query.limit); - - Ok((count, pdus)) -} - -// result is modeled as a stream such that callers don't have to be refactored -// though an additional async/wrap still exists for now -#[implement(Service)] -pub async fn search_pdu_ids( - &self, - query: &RoomQuery<'_>, -) -> Result + Send + '_ + use<'_>> { - let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; - - let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; - - let iters = pdu_ids.into_iter().map(IntoIterator::into_iter); - - Ok(set::intersection(iters).stream()) -} - -#[implement(Service)] -async fn search_pdu_ids_query_room( - &self, - query: &RoomQuery<'_>, - shortroomid: ShortRoomId, -) -> Vec> { - tokenize(&query.criteria.search_term) - .stream() - .wide_then(|word| async move { - self.search_pdu_ids_query_words(shortroomid, &word) - .collect::>() - .await - }) - .collect::>() - .await -} - -/// Iterate over PduId's containing a word -#[implement(Service)] -fn search_pdu_ids_query_words<'a>( - &'a self, - shortroomid: ShortRoomId, - word: &'a str, -) -> impl Stream + Send + '_ { - self.search_pdu_ids_query_word(shortroomid, word) - .map(move |key| -> RawPduId { - let key = &key[prefix_len(word)..]; - key.into() - }) -} - -/// Iterate over raw database results for a word -#[implement(Service)] -fn search_pdu_ids_query_word( - &self, - shortroomid: ShortRoomId, - word: &str, -) -> impl Stream> + Send + '_ + use<'_> { - // rustc says const'ing this not yet stable - let end_id: RawPduId = PduId { - shortroomid, - shorteventid: PduCount::max(), - } - .into(); - - // Newest pdus first - let end = make_tokenid(shortroomid, word, &end_id); - let prefix = make_prefix(shortroomid, word); - self.db - .tokenids - .rev_raw_keys_from(&end) - .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) -} - -/// Splits a string into tokens used as keys in the search inverted index -/// -/// This may be used to tokenize both message bodies (for indexing) or search -/// queries (for querying). -fn tokenize(body: &str) -> impl Iterator + Send + '_ { - body.split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= WORD_MAX_LEN) - .map(str::to_lowercase) -} - -fn make_tokenid(shortroomid: ShortRoomId, word: &str, pdu_id: &RawPduId) -> TokenId { - let mut key = make_prefix(shortroomid, word); - key.extend_from_slice(pdu_id.as_ref()); - key -} - -fn make_prefix(shortroomid: ShortRoomId, word: &str) -> TokenId { - let mut key = TokenId::new(); - key.extend_from_slice(&shortroomid.to_be_bytes()); - key.extend_from_slice(word.as_bytes()); - key.push(database::SEP); - key -} - -fn prefix_len(word: &str) -> usize { - size_of::() - .saturating_add(word.len()) - .saturating_add(1) -} diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs new file mode 100644 index 00000000..d0e2085f --- /dev/null +++ b/src/service/rooms/short/data.rs @@ -0,0 +1,26 @@ +use std::sync::Arc; + +use ruma::{events::StateEventType, EventId, RoomId}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result; + + fn multi_get_or_create_shorteventid(&self, event_id: &[&EventId]) -> Result>; + + fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result>; + + fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result; + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result>; + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)>; + + fn get_shortroomid(&self, room_id: &RoomId) -> Result>; + + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result; +} diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 06ff6493..1490b38a 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,260 +1,48 @@ -use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; +mod data; +use std::sync::Arc; -pub use conduwuit::matrix::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; -use conduwuit::{Result, err, implement, matrix::StateKey, utils, utils::IterStream}; -use database::{Deserialized, Get, Map, Qry}; -use futures::{Stream, StreamExt}; -use ruma::{EventId, RoomId, events::StateEventType}; -use serde::Deserialize; +pub use data::Data; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Dep, globals}; +use crate::Result; pub struct Service { - db: Data, - services: Services, + pub db: &'static dyn Data, } -struct Data { - eventid_shorteventid: Arc, - shorteventid_eventid: Arc, - statekey_shortstatekey: Arc, - shortstatekey_statekey: Arc, - roomid_shortroomid: Arc, - statehash_shortstatehash: Arc, -} - -struct Services { - globals: Dep, -} - -pub type ShortStateHash = ShortId; - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - eventid_shorteventid: args.db["eventid_shorteventid"].clone(), - shorteventid_eventid: args.db["shorteventid_eventid"].clone(), - statekey_shortstatekey: args.db["statekey_shortstatekey"].clone(), - shortstatekey_statekey: args.db["shortstatekey_statekey"].clone(), - roomid_shortroomid: args.db["roomid_shortroomid"].clone(), - statehash_shortstatehash: args.db["statehash_shortstatehash"].clone(), - }, - services: Services { - globals: args.depend::("globals"), - }, - })) +impl Service { + pub fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { + self.db.get_or_create_shorteventid(event_id) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -pub async fn get_or_create_shorteventid(&self, event_id: &EventId) -> ShortEventId { - if let Ok(shorteventid) = self.get_shorteventid(event_id).await { - return shorteventid; + pub fn multi_get_or_create_shorteventid(&self, event_ids: &[&EventId]) -> Result> { + self.db.multi_get_or_create_shorteventid(event_ids) } - self.create_shorteventid(event_id) -} - -#[implement(Service)] -pub fn multi_get_or_create_shorteventid<'a, I>( - &'a self, - event_ids: I, -) -> impl Stream + Send + '_ -where - I: Iterator + Clone + Debug + Send + 'a, -{ - event_ids - .clone() - .stream() - .get(&self.db.eventid_shorteventid) - .zip(event_ids.into_iter().stream()) - .map(|(result, event_id)| match result { - | Ok(ref short) => utils::u64_from_u8(short), - | Err(_) => self.create_shorteventid(event_id), - }) -} - -#[implement(Service)] -fn create_shorteventid(&self, event_id: &EventId) -> ShortEventId { - const BUFSIZE: usize = size_of::(); - - let short = self.services.globals.next_count().unwrap(); - debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); - - self.db - .eventid_shorteventid - .raw_aput::(event_id, short); - - self.db - .shorteventid_eventid - .aput_raw::(short, event_id); - - short -} - -#[implement(Service)] -pub async fn get_shorteventid(&self, event_id: &EventId) -> Result { - self.db - .eventid_shorteventid - .get(event_id) - .await - .deserialized() -} - -#[implement(Service)] -pub async fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, -) -> ShortStateKey { - const BUFSIZE: usize = size_of::(); - - if let Ok(shortstatekey) = self.get_shortstatekey(event_type, state_key).await { - return shortstatekey; + pub fn get_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result> { + self.db.get_shortstatekey(event_type, state_key) } - let key = (event_type, state_key); - let shortstatekey = self.services.globals.next_count().unwrap(); - debug_assert!(size_of_val(&shortstatekey) == BUFSIZE, "buffer requirement changed"); - - self.db - .statekey_shortstatekey - .put_aput::(key, shortstatekey); - - self.db - .shortstatekey_statekey - .aput_put::(shortstatekey, key); - - shortstatekey -} - -#[implement(Service)] -pub async fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, -) -> Result { - let key = (event_type, state_key); - self.db - .statekey_shortstatekey - .qry(&key) - .await - .deserialized() -} - -#[implement(Service)] -pub async fn get_eventid_from_short(&self, shorteventid: ShortEventId) -> Result -where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, -{ - const BUFSIZE: usize = size_of::(); - - self.db - .shorteventid_eventid - .aqry::(&shorteventid) - .await - .deserialized() - .map_err(|e| err!(Database("Failed to find EventId from short {shorteventid:?}: {e:?}"))) -} - -#[implement(Service)] -pub fn multi_get_eventid_from_short<'a, Id, S>( - &'a self, - shorteventid: S, -) -> impl Stream> + Send + 'a -where - S: Stream + Send + 'a, - Id: for<'de> Deserialize<'de> + Sized + ToOwned + 'a, - ::Owned: Borrow, -{ - shorteventid - .qry(&self.db.shorteventid_eventid) - .map(Deserialized::deserialized) -} - -#[implement(Service)] -pub async fn get_statekey_from_short( - &self, - shortstatekey: ShortStateKey, -) -> Result<(StateEventType, StateKey)> { - const BUFSIZE: usize = size_of::(); - - self.db - .shortstatekey_statekey - .aqry::(&shortstatekey) - .await - .deserialized() - .map_err(|e| { - err!(Database( - "Failed to find (StateEventType, state_key) from short {shortstatekey:?}: {e:?}" - )) - }) -} - -#[implement(Service)] -pub fn multi_get_statekey_from_short<'a, S>( - &'a self, - shortstatekey: S, -) -> impl Stream> + Send + 'a -where - S: Stream + Send + 'a, -{ - shortstatekey - .qry(&self.db.shortstatekey_statekey) - .map(Deserialized::deserialized) -} - -/// Returns (shortstatehash, already_existed) -#[implement(Service)] -pub async fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> (ShortStateHash, bool) { - const BUFSIZE: usize = size_of::(); - - if let Ok(shortstatehash) = self - .db - .statehash_shortstatehash - .get(state_hash) - .await - .deserialized() - { - return (shortstatehash, true); + pub fn get_or_create_shortstatekey(&self, event_type: &StateEventType, state_key: &str) -> Result { + self.db.get_or_create_shortstatekey(event_type, state_key) } - let shortstatehash = self.services.globals.next_count().unwrap(); - debug_assert!(size_of_val(&shortstatehash) == BUFSIZE, "buffer requirement changed"); + pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + self.db.get_eventid_from_short(shorteventid) + } - self.db - .statehash_shortstatehash - .raw_aput::(state_hash, shortstatehash); + pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + self.db.get_statekey_from_short(shortstatekey) + } - (shortstatehash, false) -} - -#[implement(Service)] -pub async fn get_shortroomid(&self, room_id: &RoomId) -> Result { - self.db.roomid_shortroomid.get(room_id).await.deserialized() -} - -#[implement(Service)] -pub async fn get_or_create_shortroomid(&self, room_id: &RoomId) -> ShortRoomId { - self.db - .roomid_shortroomid - .get(room_id) - .await - .deserialized() - .unwrap_or_else(|_| { - const BUFSIZE: usize = size_of::(); - - let short = self.services.globals.next_count().unwrap(); - debug_assert!(size_of_val(&short) == BUFSIZE, "buffer requirement changed"); - - self.db - .roomid_shortroomid - .raw_aput::(room_id, short); - - short - }) + /// Returns (shortstatehash, already_existed) + pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { + self.db.get_or_create_shortstatehash(state_hash) + } + + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.db.get_shortroomid(room_id) } + + pub fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { + self.db.get_or_create_shortroomid(room_id) + } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index ea9756ba..5f31828f 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,524 +1,301 @@ -mod pagination_token; -#[cfg(test)] -mod tests; - -use std::{fmt::Write, sync::Arc}; - -use async_trait::async_trait; -use conduwuit::{ - Err, Error, PduEvent, Result, implement, - utils::{ - IterStream, - future::{BoolExt, TryExtExt}, - math::usize_from_f64, - stream::{BroadbandExt, ReadyExt}, - }, +use std::{ + fmt::{Display, Formatter}, + str::FromStr, }; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, pin_mut, stream::FuturesUnordered}; + use lru_cache::LruCache; use ruma::{ - OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, api::{ - client::space::SpaceHierarchyRoomsChunk, + client::{self, error::ErrorKind, space::SpaceHierarchyRoomsChunk}, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, }, }, events::{ - StateEventType, + room::{ + avatar::RoomAvatarEventContent, + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, + topic::RoomTopicEventContent, + }, space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, + StateEventType, }, serde::Raw, space::SpaceRoomJoinRule, + OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, }; -use tokio::sync::{Mutex, MutexGuard}; +use tokio::sync::Mutex; +use tracing::{debug, error, warn}; -pub use self::pagination_token::PaginationToken; -use crate::{Dep, rooms, sending}; - -pub struct Service { - services: Services, - pub roomid_spacehierarchy_cache: Mutex, -} - -struct Services { - state_accessor: Dep, - state_cache: Dep, - state: Dep, - event_handler: Dep, - timeline: Dep, - sending: Dep, -} +use crate::{services, Error, Result}; pub struct CachedSpaceHierarchySummary { summary: SpaceHierarchyParentSummary, } -#[allow(clippy::large_enum_variant)] pub enum SummaryAccessibility { - Accessible(SpaceHierarchyParentSummary), + Accessible(Box), Inaccessible, } -/// Identifier used to check if rooms are accessible. None is used if you want -/// to return the room, no matter if accessible or not -pub enum Identifier<'a> { - UserId(&'a UserId), - ServerName(&'a ServerName), +pub struct Arena { + nodes: Vec, + max_depth: usize, + first_untraversed: Option, } -type Cache = LruCache>; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let cache_size = f64::from(config.roomid_spacehierarchy_cache_capacity); - let cache_size = cache_size * config.cache_capacity_modifier; - Ok(Arc::new(Self { - services: Services { - state_accessor: args - .depend::("rooms::state_accessor"), - state_cache: args.depend::("rooms::state_cache"), - state: args.depend::("rooms::state"), - event_handler: args - .depend::("rooms::event_handler"), - timeline: args.depend::("rooms::timeline"), - sending: args.depend::("sending"), - }, - roomid_spacehierarchy_cache: Mutex::new(LruCache::new(usize_from_f64(cache_size)?)), - })) - } - - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - let roomid_spacehierarchy_cache = self.roomid_spacehierarchy_cache.lock().await.len(); - - writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; - - Ok(()) - } - - async fn clear_cache(&self) { self.roomid_spacehierarchy_cache.lock().await.clear(); } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +pub struct Node { + parent: Option, + // Next meaning: + // --> + // o o o o + next_sibling: Option, + // First meaning: + // | + // v + // o o o o + first_child: Option, + pub room_id: OwnedRoomId, + pub via: Vec, + traversed: bool, } -/// Gets the summary of a space using solely local information -#[implement(Service)] -pub async fn get_summary_and_children_local( - &self, - current_room: &RoomId, - identifier: &Identifier<'_>, -) -> Result> { - match self - .roomid_spacehierarchy_cache - .lock() - .await - .get_mut(current_room) - .as_ref() - { - | None => (), // cache miss - | Some(None) => return Ok(None), - | Some(Some(cached)) => { - let allowed_rooms = cached.summary.allowed_room_ids.iter().map(AsRef::as_ref); - - let is_accessible_child = self.is_accessible_child( - current_room, - &cached.summary.join_rule, - identifier, - allowed_rooms, - ); - - let accessibility = if is_accessible_child.await { - SummaryAccessibility::Accessible(cached.summary.clone()) - } else { - SummaryAccessibility::Inaccessible - }; - - return Ok(Some(accessibility)); - }, - } - - let children_pdus: Vec<_> = self - .get_space_child_events(current_room) - .map(PduEvent::into_stripped_spacechild_state_event) - .collect() - .await; - - let Ok(summary) = self - .get_room_summary(current_room, children_pdus, identifier) - .boxed() - .await - else { - return Ok(None); - }; - - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.to_owned(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); - - Ok(Some(SummaryAccessibility::Accessible(summary))) +#[derive(Clone, Copy, PartialEq, Debug, PartialOrd)] +pub struct NodeId { + index: usize, } -/// Gets the summary of a space using solely federation -#[implement(Service)] -#[tracing::instrument(level = "debug", skip(self))] -async fn get_summary_and_children_federation( - &self, - current_room: &RoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], -) -> Result> { - let request = federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }; +impl Arena { + /// Checks if a given node is traversed + fn traversed(&self, id: NodeId) -> Option { Some(self.get(id)?.traversed) } - let mut requests: FuturesUnordered<_> = via - .iter() - .map(|server| { - self.services - .sending - .send_federation_request(server, request.clone()) - }) - .collect(); + /// Gets the previous sibling of a given node + fn next_sibling(&self, id: NodeId) -> Option { self.get(id)?.next_sibling } - let Some(Ok(response)) = requests.next().await else { - self.roomid_spacehierarchy_cache - .lock() - .await - .insert(current_room.to_owned(), None); + /// Gets the parent of a given node + fn parent(&self, id: NodeId) -> Option { self.get(id)?.parent } - return Ok(None); - }; + /// Gets the last child of a given node + fn first_child(&self, id: NodeId) -> Option { self.get(id)?.first_child } - let summary = response.room; - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.to_owned(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); + /// Sets traversed to true for a given node + fn traverse(&mut self, id: NodeId) { self.nodes[id.index].traversed = true; } - response - .children - .into_iter() - .stream() - .then(|child| { - self.roomid_spacehierarchy_cache - .lock() - .map(|lock| (child, lock)) - }) - .ready_filter_map(|(child, mut cache)| { - (!cache.contains_key(current_room)).then_some((child, cache)) - }) - .for_each(|(child, cache)| self.cache_insert(cache, current_room, child)) - .await; + /// Gets the node of a given id + fn get(&self, id: NodeId) -> Option<&Node> { self.nodes.get(id.index) } - let identifier = Identifier::UserId(user_id); - let allowed_room_ids = summary.allowed_room_ids.iter().map(AsRef::as_ref); + /// Gets a mutable reference of a node of a given id + fn get_mut(&mut self, id: NodeId) -> Option<&mut Node> { self.nodes.get_mut(id.index) } - let is_accessible_child = self - .is_accessible_child(current_room, &summary.join_rule, &identifier, allowed_room_ids) - .await; + /// Returns the first untraversed node, marking it as traversed in the + /// process + fn first_untraversed(&mut self) -> Option { + if self.nodes.is_empty() { + None + } else if let Some(untraversed) = self.first_untraversed { + let mut current = untraversed; - let accessibility = if is_accessible_child { - SummaryAccessibility::Accessible(summary) - } else { - SummaryAccessibility::Inaccessible - }; + self.traverse(untraversed); - Ok(Some(accessibility)) -} - -/// Simply returns the stripped m.space.child events of a room -#[implement(Service)] -fn get_space_child_events<'a>( - &'a self, - room_id: &'a RoomId, -) -> impl Stream + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|current_shortstatehash| { - self.services - .state_accessor - .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) - .boxed() - }) - .map(Result::into_iter) - .map(IterStream::stream) - .map(StreamExt::flatten) - .flatten_stream() - .broad_filter_map(move |(state_key, event_id): (_, OwnedEventId)| async move { - self.services - .timeline - .get_pdu(&event_id) - .map_ok(move |pdu| (state_key, pdu)) - .ok() - .await - }) - .ready_filter_map(move |(state_key, pdu)| { - if let Ok(content) = pdu.get_content::() { - if content.via.is_empty() { - return None; + // Possible paths: + // 1) Next child exists, and hence is not traversed + // 2) Next child does not exist, so go to the parent, then repeat + // 3) If both the parent and child do not exist, then we have just traversed the + // whole space tree. + // + // You should only ever encounter a traversed node when going up through parents + while let Some(true) = self.traversed(current) { + if let Some(next) = self.next_sibling(current) { + current = next; + } else if let Some(parent) = self.parent(current) { + current = parent; + } else { + break; } } - if RoomId::parse(&state_key).is_err() { - return None; + // Traverses down the children until it reaches one without children + while let Some(child) = self.first_child(current) { + current = child; } - Some(pdu) - }) -} + if self.traversed(current)? { + self.first_untraversed = None; + } else { + self.first_untraversed = Some(current); + } -/// Gets the summary of a space using either local or remote (federation) -/// sources -#[implement(Service)] -pub async fn get_summary_and_children_client( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], -) -> Result> { - let identifier = Identifier::UserId(user_id); - - if let Ok(Some(response)) = self - .get_summary_and_children_local(current_room, &identifier) - .await - { - return Ok(Some(response)); - } - - self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) - .await -} - -#[implement(Service)] -async fn get_room_summary( - &self, - room_id: &RoomId, - children_state: Vec>, - identifier: &Identifier<'_>, -) -> Result { - let join_rule = self.services.state_accessor.get_join_rules(room_id).await; - - let is_accessible_child = self - .is_accessible_child( - room_id, - &join_rule.clone().into(), - identifier, - join_rule.allowed_rooms(), - ) - .await; - - if !is_accessible_child { - return Err!(Request(Forbidden("User is not allowed to see the room"))); - } - - let name = self.services.state_accessor.get_name(room_id).ok(); - - let topic = self.services.state_accessor.get_room_topic(room_id).ok(); - - let room_type = self.services.state_accessor.get_room_type(room_id).ok(); - - let world_readable = self.services.state_accessor.is_world_readable(room_id); - - let guest_can_join = self.services.state_accessor.guest_can_join(room_id); - - let num_joined_members = self - .services - .state_cache - .room_joined_count(room_id) - .unwrap_or(0); - - let canonical_alias = self - .services - .state_accessor - .get_canonical_alias(room_id) - .ok(); - - let avatar_url = self - .services - .state_accessor - .get_avatar(room_id) - .map(|res| res.into_option().unwrap_or_default().url); - - let room_version = self.services.state.get_room_version(room_id).ok(); - - let encryption = self - .services - .state_accessor - .get_room_encryption(room_id) - .ok(); - - let ( - canonical_alias, - name, - num_joined_members, - topic, - world_readable, - guest_can_join, - avatar_url, - room_type, - room_version, - encryption, - ) = futures::join!( - canonical_alias, - name, - num_joined_members, - topic, - world_readable, - guest_can_join, - avatar_url, - room_type, - room_version, - encryption, - ); - - let summary = SpaceHierarchyParentSummary { - canonical_alias, - name, - topic, - world_readable, - guest_can_join, - avatar_url, - room_type, - children_state, - encryption, - room_version, - room_id: room_id.to_owned(), - num_joined_members: num_joined_members.try_into().unwrap_or_default(), - allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), - join_rule: join_rule.clone().into(), - }; - - Ok(summary) -} - -/// With the given identifier, checks if a room is accessable -#[implement(Service)] -async fn is_accessible_child<'a, I>( - &self, - current_room: &RoomId, - join_rule: &SpaceRoomJoinRule, - identifier: &Identifier<'_>, - allowed_rooms: I, -) -> bool -where - I: Iterator + Send, -{ - if let Identifier::ServerName(server_name) = identifier { - // Checks if ACLs allow for the server to participate - if self - .services - .event_handler - .acl_check(server_name, current_room) - .await - .is_err() - { - return false; + Some(untraversed) + } else { + None } } - if let Identifier::UserId(user_id) = identifier { - let is_joined = self.services.state_cache.is_joined(user_id, current_room); + /// Adds all the given nodes as children of the parent node + fn push(&mut self, parent: NodeId, mut children: Vec<(OwnedRoomId, Vec)>) { + if children.is_empty() { + self.traverse(parent); + } else if self.nodes.get(parent.index).is_some() { + let mut parents = vec![( + parent, + self.get(parent) + .expect("It is some, as above") + .room_id + // Cloning cause otherwise when iterating over the parents, below, there would + // be a mutable and immutable reference to self.nodes + .clone(), + )]; - let is_invited = self.services.state_cache.is_invited(user_id, current_room); + while let Some(parent) = self.parent(parents.last().expect("Has at least one value, as above").0) { + parents.push(( + parent, + self.get(parent) + .expect("It is some, as above") + .room_id + .clone(), + )); + } - pin_mut!(is_joined, is_invited); - if is_joined.or(is_invited).await { - return true; + // If at max_depth, don't add new rooms + if self.max_depth < parents.len() { + return; + } + + children.reverse(); + + let mut next_id = None; + + for (child, via) in children { + // Prevent adding a child which is a parent (recursion) + if !parents.iter().any(|parent| parent.1 == child) { + self.nodes.push(Node { + parent: Some(parent), + next_sibling: next_id, + first_child: None, + room_id: child, + traversed: false, + via, + }); + + next_id = Some(NodeId { + index: self.nodes.len() - 1, + }); + } + } + + if self.first_untraversed.is_none() + || parent + >= self + .first_untraversed + .expect("Should have already continued if none") + { + self.first_untraversed = next_id; + } + + self.traverse(parent); + + // This is done as if we use an if-let above, we cannot reference self.nodes + // above as then we would have multiple mutable references + let node = self + .get_mut(parent) + .expect("Must be some, as inside this block"); + + node.first_child = next_id; } } - match *join_rule { - | SpaceRoomJoinRule::Public - | SpaceRoomJoinRule::Knock - | SpaceRoomJoinRule::KnockRestricted => true, - | SpaceRoomJoinRule::Restricted => - allowed_rooms - .stream() - .any(async |room| match identifier { - | Identifier::UserId(user) => - self.services.state_cache.is_joined(user, room).await, - | Identifier::ServerName(server) => - self.services.state_cache.server_in_room(server, room).await, + fn new(root: OwnedRoomId, max_depth: usize) -> Self { + let zero_depth = max_depth == 0; + + Arena { + nodes: vec![Node { + parent: None, + next_sibling: None, + first_child: None, + room_id: root, + traversed: zero_depth, + via: vec![], + }], + max_depth, + first_untraversed: if zero_depth { + None + } else { + Some(NodeId { + index: 0, }) - .await, - - // Invite only, Private, or Custom join rule - | _ => false, + }, + } } } -/// Returns the children of a SpaceHierarchyParentSummary, making use of the -/// children_state field -pub fn get_parent_children_via( - parent: &SpaceHierarchyParentSummary, - suggested_only: bool, -) -> impl DoubleEndedIterator + use<>)> -+ Send -+ '_ { - parent - .children_state - .iter() - .map(Raw::deserialize) - .filter_map(Result::ok) - .filter_map(move |ce| { - (!suggested_only || ce.content.suggested) - .then_some((ce.state_key, ce.content.via.into_iter())) - }) +// Note: perhaps use some better form of token rather than just room count +#[derive(Debug, PartialEq)] +pub(crate) struct PagnationToken { + pub(crate) skip: UInt, + pub(crate) limit: UInt, + pub(crate) max_depth: UInt, + pub(crate) suggested_only: bool, } -#[implement(Service)] -async fn cache_insert( - &self, - mut cache: MutexGuard<'_, Cache>, - current_room: &RoomId, - child: SpaceHierarchyChildSummary, -) { - let SpaceHierarchyChildSummary { - canonical_alias, - name, - num_joined_members, - room_id, - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - allowed_room_ids, - encryption, - room_version, - } = child; +impl FromStr for PagnationToken { + type Err = Error; - let summary = SpaceHierarchyParentSummary { - canonical_alias, - name, - num_joined_members, - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - allowed_room_ids, - room_id: room_id.clone(), - children_state: self - .get_space_child_events(&room_id) - .map(PduEvent::into_stripped_spacechild_state_event) - .collect() - .await, - encryption, - room_version, - }; + fn from_str(value: &str) -> Result { + let mut values = value.split('_'); - cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); + let mut pag_tok = || { + Some(PagnationToken { + skip: UInt::from_str(values.next()?).ok()?, + limit: UInt::from_str(values.next()?).ok()?, + max_depth: UInt::from_str(values.next()?).ok()?, + suggested_only: { + let slice = values.next()?; + + if values.next().is_none() { + if slice == "true" { + true + } else if slice == "false" { + false + } else { + None? + } + } else { + None? + } + }, + }) + }; + + if let Some(token) = pag_tok() { + Ok(token) + } else { + Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) + } + } +} + +impl Display for PagnationToken { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}_{}_{}_{}", self.skip, self.limit, self.max_depth, self.suggested_only) + } +} + +/// Identifier used to check if rooms are accessible +/// +/// None is used if you want to return the room, no matter if accessible or not +pub enum Identifier<'a> { + UserId(&'a UserId), + ServerName(&'a ServerName), + None, +} + +pub struct Service { + pub roomid_spacehierarchy_cache: Mutex>>, } // Here because cannot implement `From` across ruma-federation-api and @@ -537,12 +314,10 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, - allowed_room_ids, - encryption, - room_version, + .. } = value.summary; - Self { + SpaceHierarchyRoomsChunk { canonical_alias, name, num_joined_members, @@ -554,17 +329,548 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, - encryption, - room_version, - allowed_room_ids, } } } -/// Here because cannot implement `From` across ruma-federation-api and -/// ruma-client-api types -#[must_use] -pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { +impl Service { + ///Gets the response for the space hierarchy over federation request + /// + ///Panics if the room does not exist, so a check if the room exists should + /// be done + pub async fn get_federation_hierarchy( + &self, room_id: &RoomId, server_name: &ServerName, suggested_only: bool, + ) -> Result { + match self + .get_summary_and_children_local(&room_id.to_owned(), Identifier::None) + .await? + { + Some(SummaryAccessibility::Accessible(room)) => { + let mut children = Vec::new(); + let mut inaccessible_children = Vec::new(); + + for (child, _via) in get_parent_children_via(&room, suggested_only) { + match self + .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) + .await? + { + Some(SummaryAccessibility::Accessible(summary)) => { + children.push((*summary).into()); + }, + Some(SummaryAccessibility::Inaccessible) => { + inaccessible_children.push(child); + }, + None => (), + } + } + + Ok(federation::space::get_hierarchy::v1::Response { + room: *room, + children, + inaccessible_children, + }) + }, + Some(SummaryAccessibility::Inaccessible) => { + Err(Error::BadRequest(ErrorKind::NotFound, "The requested room is inaccessible")) + }, + None => Err(Error::BadRequest(ErrorKind::NotFound, "The requested room was not found")), + } + } + + async fn get_summary_and_children_local( + &self, current_room: &OwnedRoomId, identifier: Identifier<'_>, + ) -> Result> { + if let Some(cached) = self + .roomid_spacehierarchy_cache + .lock() + .await + .get_mut(¤t_room.to_owned()) + .as_ref() + { + return Ok(if let Some(cached) = cached { + if is_accessable_child( + current_room, + &cached.summary.join_rule, + &identifier, + &cached.summary.allowed_room_ids, + )? { + Some(SummaryAccessibility::Accessible(Box::new(cached.summary.clone()))) + } else { + Some(SummaryAccessibility::Inaccessible) + } + } else { + None + }); + } + + Ok( + if let Some(children_pdus) = get_stripped_space_child_events(current_room).await? { + let summary = Self::get_room_summary(current_room, children_pdus, &identifier); + if let Ok(summary) = summary { + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.clone(), + Some(CachedSpaceHierarchySummary { + summary: summary.clone(), + }), + ); + + Some(SummaryAccessibility::Accessible(Box::new(summary))) + } else { + None + } + } else { + None + }, + ) + } + + async fn get_summary_and_children_federation( + &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &Vec, + ) -> Result> { + for server in via { + debug!("Asking {server} for /hierarchy"); + if let Ok(response) = services() + .sending + .send_federation_request( + server, + federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }, + ) + .await + { + debug!("Got response from {server} for /hierarchy\n{response:?}"); + let summary = response.room.clone(); + + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.clone(), + Some(CachedSpaceHierarchySummary { + summary: summary.clone(), + }), + ); + + for child in response.children { + let mut guard = self.roomid_spacehierarchy_cache.lock().await; + if !guard.contains_key(current_room) { + guard.insert( + current_room.clone(), + Some(CachedSpaceHierarchySummary { + summary: { + let SpaceHierarchyChildSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + } = child; + + SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + room_id: room_id.clone(), + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + children_state: get_stripped_space_child_events(&room_id).await?.unwrap(), + allowed_room_ids, + } + }, + }), + ); + } + } + if is_accessable_child( + current_room, + &response.room.join_rule, + &Identifier::UserId(user_id), + &response.room.allowed_room_ids, + )? { + return Ok(Some(SummaryAccessibility::Accessible(Box::new(summary.clone())))); + } + + return Ok(Some(SummaryAccessibility::Inaccessible)); + } + + self.roomid_spacehierarchy_cache + .lock() + .await + .insert(current_room.clone(), None); + } + Ok(None) + } + + async fn get_summary_and_children_client( + &self, current_room: &OwnedRoomId, suggested_only: bool, user_id: &UserId, via: &Vec, + ) -> Result> { + if let Ok(Some(response)) = self + .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) + .await + { + Ok(Some(response)) + } else { + self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) + .await + } + } + + fn get_room_summary( + current_room: &OwnedRoomId, children_state: Vec>, identifier: &Identifier<'_>, + ) -> Result { + let room_id: &RoomId = current_room; + + let join_rule = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| c.join_rule) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .unwrap_or(JoinRule::Invite); + + let allowed_room_ids = allowed_room_ids(join_rule.clone()); + + if !is_accessable_child(current_room, &join_rule.clone().into(), identifier, &allowed_room_ids)? { + debug!("User is not allowed to see room {room_id}"); + // This error will be caught later + return Err(Error::BadRequest(ErrorKind::forbidden(), "User is not allowed to see the room")); + } + + let join_rule = join_rule.into(); + + Ok(SpaceHierarchyParentSummary { + canonical_alias: services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomCanonicalAliasEventContent| c.alias) + .map_err(|_| Error::bad_database("Invalid canonical alias event in database.")) + })?, + name: services().rooms.state_accessor.get_name(room_id)?, + num_joined_members: services() + .rooms + .state_cache + .room_joined_count(room_id)? + .unwrap_or_else(|| { + warn!("Room {} has no member count", room_id); + 0 + }) + .try_into() + .expect("user count should not be that big"), + room_id: room_id.to_owned(), + topic: services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomTopic, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomTopicEventContent| Some(c.topic)) + .map_err(|_| { + error!("Invalid room topic event in database for room {}", room_id); + Error::bad_database("Invalid room topic event in database.") + }) + }) + .unwrap_or(None), + world_readable: world_readable(room_id)?, + guest_can_join: guest_can_join(room_id)?, + avatar_url: services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomAvatarEventContent| c.url) + .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) + }) + .transpose()? + // url is now an Option so we must flatten + .flatten(), + join_rule, + room_type: services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .map(|s| { + serde_json::from_str::(s.content.get()).map_err(|e| { + error!("Invalid room create event in database: {}", e); + Error::BadDatabase("Invalid room create event in database.") + }) + }) + .transpose()? + .and_then(|e| e.room_type), + children_state, + allowed_room_ids, + }) + } + + pub async fn get_client_hierarchy( + &self, sender_user: &UserId, room_id: &RoomId, limit: usize, skip: usize, max_depth: usize, + suggested_only: bool, + ) -> Result { + match self + .get_summary_and_children_client( + &room_id.to_owned(), + suggested_only, + sender_user, + &match room_id.server_name() { + Some(server_name) => vec![server_name.into()], + None => vec![], + }, + ) + .await? + { + Some(SummaryAccessibility::Accessible(summary)) => { + let mut left_to_skip = skip; + let mut arena = Arena::new(summary.room_id.clone(), max_depth); + + let mut results = Vec::new(); + let root = arena + .first_untraversed() + .expect("The node just added is not traversed"); + + arena.push(root, get_parent_children_via(&summary, suggested_only)); + if left_to_skip > 0 { + left_to_skip -= 1; + } else { + results.push(summary_to_chunk(*summary.clone())); + } + + while let Some(current_room) = arena.first_untraversed() { + if limit > results.len() { + let node = arena + .get(current_room) + .expect("We added this node, it must exist"); + if let Some(SummaryAccessibility::Accessible(summary)) = self + .get_summary_and_children_client(&node.room_id, suggested_only, sender_user, &node.via) + .await? + { + let children = get_parent_children_via(&summary, suggested_only); + arena.push(current_room, children); + + if left_to_skip > 0 { + left_to_skip -= 1; + } else { + results.push(summary_to_chunk(*summary.clone())); + } + } + } else { + break; + } + } + + Ok(client::space::get_hierarchy::v1::Response { + next_batch: if results.len() < limit { + None + } else { + let skip = UInt::new((skip + limit) as u64); + + skip.map(|skip| { + PagnationToken { + skip, + limit: UInt::new(max_depth as u64) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth as u64) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string() + }) + }, + rooms: results, + }) + }, + Some(SummaryAccessibility::Inaccessible) => { + Err(Error::BadRequest(ErrorKind::forbidden(), "The requested room is inaccessible")) + }, + None => Err(Error::BadRequest(ErrorKind::forbidden(), "The requested room was not found")), + } + } +} + +/// Simply returns the stripped m.space.child events of a room +async fn get_stripped_space_child_events( + room_id: &RoomId, +) -> Result>>, Error> { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + let state = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let mut children_pdus = Vec::new(); + for (key, id) in state { + let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?; + if event_type != StateEventType::SpaceChild { + continue; + } + + let pdu = services() + .rooms + .timeline + .get_pdu(&id)? + .ok_or_else(|| Error::bad_database("Event in space state not found"))?; + + if serde_json::from_str::(pdu.content.get()) + .ok() + .map(|c| c.via) + .map_or(true, |v| v.is_empty()) + { + continue; + } + + if OwnedRoomId::try_from(state_key).is_ok() { + children_pdus.push(pdu.to_stripped_spacechild_state_event()); + } + } + Ok(Some(children_pdus)) + } else { + Ok(None) + } +} + +/// With the given identifier, checks if a room is accessable +fn is_accessable_child( + current_room: &OwnedRoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, + allowed_room_ids: &Vec, +) -> Result { + is_accessable_child_recurse(current_room, join_rule, identifier, allowed_room_ids, 0) +} + +fn is_accessable_child_recurse( + current_room: &OwnedRoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, + allowed_room_ids: &Vec, recurse_num: usize, +) -> Result { + // Set limit at 10, as we cannot keep going up parents forever + // and it is very unlikely to have 10 space parents + if recurse_num < 10 { + match identifier { + Identifier::ServerName(server_name) => { + let room_id: &RoomId = current_room; + + // Checks if ACLs allow for the server to participate + if services() + .rooms + .event_handler + .acl_check(server_name, room_id) + .is_err() + { + return Ok(false); + } + }, + Identifier::UserId(user_id) => { + if services() + .rooms + .state_cache + .is_joined(user_id, current_room)? + || services() + .rooms + .state_cache + .is_invited(user_id, current_room)? + { + return Ok(true); + } + }, + Identifier::None => (), + } // Takes care of joinrules + Ok(match join_rule { + SpaceRoomJoinRule::Restricted => { + for room in allowed_room_ids { + if let Ok((join_rule, allowed_room_ids)) = get_join_rule(room) { + if let Ok(true) = is_accessable_child_recurse( + room, + &join_rule, + identifier, + &allowed_room_ids, + recurse_num + 1, + ) { + return Ok(true); + } + } + } + false + }, + SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true, + // Custom join rules, Invite, or Private + _ => false, + }) + } else { + // If you need to go up 10 parents, we just assume it is inaccessable + Ok(false) + } +} + +/// Checks if guests are able to join a given room +fn guest_can_join(room_id: &RoomId) -> Result { + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin) + .map_err(|_| Error::bad_database("Invalid room guest access event in database.")) + }) +} + +/// Checks if guests are able to view room content without joining +fn world_readable(room_id: &RoomId) -> Result { + Ok(services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility == HistoryVisibility::WorldReadable) + .map_err(|e| { + error!( + "Invalid room history visibility event in database for room {room_id}, assuming is \ + \"shared\": {e} " + ); + Error::bad_database("Invalid room history visibility event in database.") + }) + }) + .unwrap_or(false)) +} + +/// Returns the join rule for a given room +fn get_join_rule(current_room: &RoomId) -> Result<(SpaceRoomJoinRule, Vec), Error> { + Ok(services() + .rooms + .state_accessor + .room_state_get(current_room, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| (c.join_rule.clone().into(), allowed_room_ids(c.join_rule))) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .unwrap_or((SpaceRoomJoinRule::Invite, vec![]))) +} + +// Here because cannot implement `From` across ruma-federation-api and +// ruma-client-api types +fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { let SpaceHierarchyParentSummary { canonical_alias, name, @@ -577,9 +883,7 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, - allowed_room_ids, - encryption, - room_version, + .. } = summary; SpaceHierarchyRoomsChunk { @@ -594,8 +898,405 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, - encryption, - room_version, - allowed_room_ids, + } +} + +/// Returns an empty vec if not a restricted room +fn allowed_room_ids(join_rule: JoinRule) -> Vec { + let mut room_ids = vec![]; + if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { + for rule in r.allow { + if let AllowRule::RoomMembership(RoomMembership { + room_id: membership, + }) = rule + { + room_ids.push(membership.clone()); + } + } + } + room_ids +} + +/// Returns the children of a SpaceHierarchyParentSummary, making use of the +/// children_state field +fn get_parent_children_via( + parent: &SpaceHierarchyParentSummary, suggested_only: bool, +) -> Vec<(OwnedRoomId, Vec)> { + parent + .children_state + .iter() + .filter_map(|raw_ce| { + raw_ce.deserialize().map_or(None, |ce| { + if suggested_only && !ce.content.suggested { + None + } else { + Some((ce.state_key, ce.content.via)) + } + }) + }) + .collect() +} + +#[cfg(test)] +mod tests { + use ruma::{ + api::federation::space::SpaceHierarchyParentSummaryInit, events::room::join_rules::Restricted, owned_room_id, + owned_server_name, + }; + + use super::*; + + fn first(arena: &mut Arena, room_id: &OwnedRoomId) { + let first_untrav = arena.first_untraversed().unwrap(); + + assert_eq!(arena.get(first_untrav).unwrap().room_id, *room_id); + } + + #[test] + fn zero_depth() { + let mut arena = Arena::new(owned_room_id!("!foo:example.org"), 0); + + assert_eq!(arena.first_untraversed(), None); + } + + #[test] + fn two_depth() { + let mut arena = Arena::new(owned_room_id!("!root:example.org"), 2); + + let root = arena.first_untraversed().unwrap(); + arena.push( + root, + vec![ + (owned_room_id!("!subspace1:example.org"), vec![]), + (owned_room_id!("!subspace2:example.org"), vec![]), + (owned_room_id!("!foo:example.org"), vec![]), + ], + ); + + let subspace1 = arena.first_untraversed().unwrap(); + let subspace2 = arena.first_untraversed().unwrap(); + + arena.push( + subspace1, + vec![ + (owned_room_id!("!room1:example.org"), vec![]), + (owned_room_id!("!room2:example.org"), vec![]), + ], + ); + + first(&mut arena, &owned_room_id!("!room1:example.org")); + first(&mut arena, &owned_room_id!("!room2:example.org")); + + arena.push( + subspace2, + vec![ + (owned_room_id!("!room3:example.org"), vec![]), + (owned_room_id!("!room4:example.org"), vec![]), + ], + ); + first(&mut arena, &owned_room_id!("!room3:example.org")); + first(&mut arena, &owned_room_id!("!room4:example.org")); + + let foo_node = NodeId { + index: 1, + }; + + assert_eq!(arena.first_untraversed(), Some(foo_node)); + assert_eq!( + arena.get(foo_node).map(|node| node.room_id.clone()), + Some(owned_room_id!("!foo:example.org")) + ); + } + + #[test] + fn empty_push() { + let mut arena = Arena::new(owned_room_id!("!root:example.org"), 5); + + let root = arena.first_untraversed().unwrap(); + arena.push( + root, + vec![ + (owned_room_id!("!room1:example.org"), vec![]), + (owned_room_id!("!room2:example.org"), vec![]), + ], + ); + + let room1 = arena.first_untraversed().unwrap(); + arena.push(room1, vec![]); + + first(&mut arena, &owned_room_id!("!room2:example.org")); + assert!(arena.first_untraversed().is_none()); + } + + #[test] + fn beyond_max_depth() { + let mut arena = Arena::new(owned_room_id!("!root:example.org"), 0); + + let root = NodeId { + index: 0, + }; + + arena.push(root, vec![(owned_room_id!("!too_deep:example.org"), vec![])]); + + assert_eq!(arena.first_child(root), None); + assert_eq!(arena.nodes.len(), 1); + } + + #[test] + fn order_check() { + let mut arena = Arena::new(owned_room_id!("!root:example.org"), 3); + + let root = arena.first_untraversed().unwrap(); + arena.push( + root, + vec![ + (owned_room_id!("!subspace1:example.org"), vec![]), + (owned_room_id!("!subspace2:example.org"), vec![]), + (owned_room_id!("!foo:example.org"), vec![]), + ], + ); + + let subspace1 = arena.first_untraversed().unwrap(); + arena.push( + subspace1, + vec![ + (owned_room_id!("!room1:example.org"), vec![]), + (owned_room_id!("!room3:example.org"), vec![]), + (owned_room_id!("!room5:example.org"), vec![]), + ], + ); + + first(&mut arena, &owned_room_id!("!room1:example.org")); + first(&mut arena, &owned_room_id!("!room3:example.org")); + first(&mut arena, &owned_room_id!("!room5:example.org")); + + let subspace2 = arena.first_untraversed().unwrap(); + + assert_eq!(arena.get(subspace2).unwrap().room_id, owned_room_id!("!subspace2:example.org")); + + arena.push( + subspace2, + vec![ + (owned_room_id!("!room1:example.org"), vec![]), + (owned_room_id!("!room2:example.org"), vec![]), + ], + ); + + first(&mut arena, &owned_room_id!("!room1:example.org")); + first(&mut arena, &owned_room_id!("!room2:example.org")); + first(&mut arena, &owned_room_id!("!foo:example.org")); + + assert_eq!(arena.first_untraversed(), None); + } + + #[test] + fn get_summary_children() { + let summary: SpaceHierarchyParentSummary = SpaceHierarchyParentSummaryInit { + num_joined_members: UInt::from(1_u32), + room_id: owned_room_id!("!root:example.org"), + world_readable: true, + guest_can_join: true, + join_rule: SpaceRoomJoinRule::Public, + children_state: vec![ + serde_json::from_str( + r#"{ + "content": { + "via": [ + "example.org" + ], + "suggested": false + }, + "origin_server_ts": 1629413349153, + "sender": "@alice:example.org", + "state_key": "!foo:example.org", + "type": "m.space.child" + }"#, + ) + .unwrap(), + serde_json::from_str( + r#"{ + "content": { + "via": [ + "example.org" + ], + "suggested": true + }, + "origin_server_ts": 1629413349157, + "sender": "@alice:example.org", + "state_key": "!bar:example.org", + "type": "m.space.child" + }"#, + ) + .unwrap(), + serde_json::from_str( + r#"{ + "content": { + "via": [ + "example.org" + ] + }, + "origin_server_ts": 1629413349160, + "sender": "@alice:example.org", + "state_key": "!baz:example.org", + "type": "m.space.child" + }"#, + ) + .unwrap(), + ], + allowed_room_ids: vec![], + } + .into(); + + assert_eq!( + get_parent_children_via(&summary, false), + vec![ + (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) + ] + ); + assert_eq!( + get_parent_children_via(&summary, true), + vec![(owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")])] + ); + } + + #[test] + fn allowed_room_ids_from_join_rule() { + let restricted_join_rule = JoinRule::Restricted(Restricted { + allow: vec![ + AllowRule::RoomMembership(RoomMembership { + room_id: owned_room_id!("!foo:example.org"), + }), + AllowRule::RoomMembership(RoomMembership { + room_id: owned_room_id!("!bar:example.org"), + }), + AllowRule::RoomMembership(RoomMembership { + room_id: owned_room_id!("!baz:example.org"), + }), + ], + }); + + let invite_join_rule = JoinRule::Invite; + + assert_eq!( + allowed_room_ids(restricted_join_rule), + vec![ + owned_room_id!("!foo:example.org"), + owned_room_id!("!bar:example.org"), + owned_room_id!("!baz:example.org") + ] + ); + + let empty_vec: Vec = vec![]; + + assert_eq!(allowed_room_ids(invite_join_rule), empty_vec); + } + + #[test] + fn invalid_pagnation_tokens() { + fn token_is_err(token: &str) { + let token: Result = PagnationToken::from_str(token); + token.unwrap_err(); + } + + token_is_err("231_2_noabool"); + token_is_err(""); + token_is_err("111_3_"); + token_is_err("foo_not_int"); + token_is_err("11_4_true_"); + token_is_err("___"); + token_is_err("__false"); + } + + #[test] + fn valid_pagnation_tokens() { + assert_eq!( + PagnationToken { + skip: UInt::from(40_u32), + limit: UInt::from(20_u32), + max_depth: UInt::from(1_u32), + suggested_only: true + }, + PagnationToken::from_str("40_20_1_true").unwrap() + ); + + assert_eq!( + PagnationToken { + skip: UInt::from(27645_u32), + limit: UInt::from(97_u32), + max_depth: UInt::from(10539_u32), + suggested_only: false + }, + PagnationToken::from_str("27645_97_10539_false").unwrap() + ); + } + + #[test] + fn pagnation_token_to_string() { + assert_eq!( + PagnationToken { + skip: UInt::from(27645_u32), + limit: UInt::from(97_u32), + max_depth: UInt::from(9420_u32), + suggested_only: false + } + .to_string(), + "27645_97_9420_false" + ); + + assert_eq!( + PagnationToken { + skip: UInt::from(12_u32), + limit: UInt::from(3_u32), + max_depth: UInt::from(1_u32), + suggested_only: true + } + .to_string(), + "12_3_1_true" + ); + } + + #[test] + fn forbid_recursion() { + let mut arena = Arena::new(owned_room_id!("!root:example.org"), 5); + let root_node_id = arena.first_untraversed().unwrap(); + + arena.push( + root_node_id, + vec![ + (owned_room_id!("!subspace1:example.org"), vec![]), + (owned_room_id!("!room1:example.org"), vec![]), + (owned_room_id!("!subspace2:example.org"), vec![]), + ], + ); + + let subspace1_node_id = arena.first_untraversed().unwrap(); + arena.push( + subspace1_node_id, + vec![ + (owned_room_id!("!subspace2:example.org"), vec![]), + (owned_room_id!("!room1:example.org"), vec![]), + ], + ); + + let subspace2_node_id = arena.first_untraversed().unwrap(); + // Here, both subspaces should be ignored and not added, as they are both + // parents of subspace2 + arena.push( + subspace2_node_id, + vec![ + (owned_room_id!("!subspace1:example.org"), vec![]), + (owned_room_id!("!subspace2:example.org"), vec![]), + (owned_room_id!("!room1:example.org"), vec![]), + ], + ); + + assert_eq!(arena.nodes.len(), 7); + first(&mut arena, &owned_room_id!("!room1:example.org")); + first(&mut arena, &owned_room_id!("!room1:example.org")); + first(&mut arena, &owned_room_id!("!room1:example.org")); + first(&mut arena, &owned_room_id!("!subspace2:example.org")); + assert!(arena.first_untraversed().is_none()); } } diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs deleted file mode 100644 index d97b7a2f..00000000 --- a/src/service/rooms/spaces/pagination_token.rs +++ /dev/null @@ -1,76 +0,0 @@ -use std::{ - fmt::{Display, Formatter}, - str::FromStr, -}; - -use conduwuit::{Error, Result}; -use ruma::{UInt, api::client::error::ErrorKind}; - -use crate::rooms::short::ShortRoomId; - -// TODO: perhaps use some better form of token rather than just room count -#[derive(Debug, Eq, PartialEq)] -pub struct PaginationToken { - /// Path down the hierarchy of the room to start the response at, - /// excluding the root space. - pub short_room_ids: Vec, - pub limit: UInt, - pub max_depth: UInt, - pub suggested_only: bool, -} - -impl FromStr for PaginationToken { - type Err = Error; - - fn from_str(value: &str) -> Result { - let mut values = value.split('_'); - let mut pag_tok = || { - let short_room_ids = values - .next()? - .split(',') - .filter_map(|room_s| u64::from_str(room_s).ok()) - .collect(); - - let limit = UInt::from_str(values.next()?).ok()?; - let max_depth = UInt::from_str(values.next()?).ok()?; - let slice = values.next()?; - let suggested_only = if values.next().is_none() { - if slice == "true" { - true - } else if slice == "false" { - false - } else { - None? - } - } else { - None? - }; - - Some(Self { - short_room_ids, - limit, - max_depth, - suggested_only, - }) - }; - - if let Some(token) = pag_tok() { - Ok(token) - } else { - Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) - } - } -} - -impl Display for PaginationToken { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - let short_room_ids = self - .short_room_ids - .iter() - .map(ToString::to_string) - .collect::>() - .join(","); - - write!(f, "{short_room_ids}_{}_{}_{}", self.limit, self.max_depth, self.suggested_only) - } -} diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs deleted file mode 100644 index d0395fdd..00000000 --- a/src/service/rooms/spaces/tests.rs +++ /dev/null @@ -1,147 +0,0 @@ -use std::str::FromStr; - -use ruma::{ - UInt, - api::federation::space::{SpaceHierarchyParentSummary, SpaceHierarchyParentSummaryInit}, - owned_room_id, owned_server_name, - space::SpaceRoomJoinRule, -}; - -use crate::rooms::spaces::{PaginationToken, get_parent_children_via}; - -#[test] -fn get_summary_children() { - let summary: SpaceHierarchyParentSummary = SpaceHierarchyParentSummaryInit { - num_joined_members: UInt::from(1_u32), - room_id: owned_room_id!("!root:example.org"), - world_readable: true, - guest_can_join: true, - join_rule: SpaceRoomJoinRule::Public, - children_state: vec![ - serde_json::from_str( - r#"{ - "content": { - "via": [ - "example.org" - ], - "suggested": false - }, - "origin_server_ts": 1629413349153, - "sender": "@alice:example.org", - "state_key": "!foo:example.org", - "type": "m.space.child" - }"#, - ) - .unwrap(), - serde_json::from_str( - r#"{ - "content": { - "via": [ - "example.org" - ], - "suggested": true - }, - "origin_server_ts": 1629413349157, - "sender": "@alice:example.org", - "state_key": "!bar:example.org", - "type": "m.space.child" - }"#, - ) - .unwrap(), - serde_json::from_str( - r#"{ - "content": { - "via": [ - "example.org" - ] - }, - "origin_server_ts": 1629413349160, - "sender": "@alice:example.org", - "state_key": "!baz:example.org", - "type": "m.space.child" - }"#, - ) - .unwrap(), - ], - allowed_room_ids: vec![], - } - .into(); - - assert_eq!( - get_parent_children_via(&summary, false) - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) - ] - ); - assert_eq!( - get_parent_children_via(&summary, true) - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![(owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")])] - ); -} - -#[test] -fn invalid_pagination_tokens() { - fn token_is_err(token: &str) { PaginationToken::from_str(token).unwrap_err(); } - - token_is_err("231_2_noabool"); - token_is_err(""); - token_is_err("111_3_"); - token_is_err("foo_not_int"); - token_is_err("11_4_true_"); - token_is_err("___"); - token_is_err("__false"); -} - -#[test] -fn valid_pagination_tokens() { - assert_eq!( - PaginationToken { - short_room_ids: vec![5383, 42934, 283, 423], - limit: UInt::from(20_u32), - max_depth: UInt::from(1_u32), - suggested_only: true - }, - PaginationToken::from_str("5383,42934,283,423_20_1_true").unwrap() - ); - - assert_eq!( - PaginationToken { - short_room_ids: vec![740], - limit: UInt::from(97_u32), - max_depth: UInt::from(10539_u32), - suggested_only: false - }, - PaginationToken::from_str("740_97_10539_false").unwrap() - ); -} - -#[test] -fn pagination_token_to_string() { - assert_eq!( - PaginationToken { - short_room_ids: vec![740], - limit: UInt::from(97_u32), - max_depth: UInt::from(10539_u32), - suggested_only: false - } - .to_string(), - "740_97_10539_false" - ); - - assert_eq!( - PaginationToken { - short_room_ids: vec![9, 34], - limit: UInt::from(3_u32), - max_depth: UInt::from(1_u32), - suggested_only: true - } - .to_string(), - "9,34_3_1_true" - ); -} diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs new file mode 100644 index 00000000..f486f1f8 --- /dev/null +++ b/src/service/rooms/state/data.rs @@ -0,0 +1,33 @@ +use std::{collections::HashSet, sync::Arc}; + +use ruma::{EventId, OwnedEventId, RoomId}; +use tokio::sync::MutexGuard; + +use crate::Result; + +pub trait Data: Send + Sync { + /// Returns the last state hash key added to the db for the given room. + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; + + /// Set the state hash to a new version, but does not update `state_cache`. + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()>; + + /// Associates a state with an event. + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()>; + + /// Returns all events we would send as the `prev_events` of the next event. + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; + + /// Replace the forward extremities of the room. + fn set_forward_extremities( + &self, + room_id: &RoomId, + event_ids: Vec, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()>; +} diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 803ba9d7..10ffda85 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,94 +1,28 @@ -use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; +mod data; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; -use async_trait::async_trait; -use conduwuit::{ - PduEvent, Result, err, - result::FlatOk, - state_res::{self, StateMap}, - utils::{ - IterStream, MutexMap, MutexMapGuard, ReadyExt, calculate_hash, - stream::{BroadbandExt, TryIgnore}, - }, - warn, -}; -use database::{Deserialized, Ignore, Interfix, Map}; -use futures::{ - FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future::join_all, pin_mut, -}; +pub(crate) use data::Data; use ruma::{ - EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, + api::client::error::ErrorKind, events::{ - AnyStrippedStateEvent, StateEventType, TimelineEventType, room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, + AnyStrippedStateEvent, StateEventType, TimelineEventType, }, serde::Raw, + state_res::{self, StateMap}, + EventId, OwnedEventId, RoomId, RoomVersionId, UserId, }; +use tokio::sync::MutexGuard; +use tracing::warn; -use crate::{ - Dep, globals, rooms, - rooms::{ - short::{ShortEventId, ShortStateHash}, - state_compressor::{CompressedState, parse_compressed_state_event}, - }, -}; +use super::state_compressor::CompressedStateEvent; +use crate::{services, utils::calculate_hash, Error, PduEvent, Result}; pub struct Service { - pub mutex: RoomMutexMap, - services: Services, - db: Data, -} - -struct Services { - globals: Dep, - short: Dep, - spaces: Dep, - state_cache: Dep, - state_accessor: Dep, - state_compressor: Dep, - timeline: Dep, -} - -struct Data { - shorteventid_shortstatehash: Arc, - roomid_shortstatehash: Arc, - roomid_pduleaves: Arc, -} - -type RoomMutexMap = MutexMap; -pub type RoomMutexGuard = MutexMapGuard; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - mutex: RoomMutexMap::new(), - services: Services { - globals: args.depend::("globals"), - short: args.depend::("rooms::short"), - spaces: args.depend::("rooms::spaces"), - state_cache: args.depend::("rooms::state_cache"), - state_accessor: args - .depend::("rooms::state_accessor"), - state_compressor: args - .depend::("rooms::state_compressor"), - timeline: args.depend::("rooms::timeline"), - }, - db: Data { - shorteventid_shortstatehash: args.db["shorteventid_shortstatehash"].clone(), - roomid_shortstatehash: args.db["roomid_shortstatehash"].clone(), - roomid_pduleaves: args.db["roomid_pduleaves"].clone(), - }, - })) - } - - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - let mutex = self.mutex.len(); - writeln!(out, "state_mutex: {mutex}")?; - - Ok(()) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, } impl Service { @@ -97,66 +31,70 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - statediffnew: Arc, - _statediffremoved: Arc, - state_lock: &RoomMutexGuard, - ) -> Result { - let event_ids = statediffnew - .iter() - .stream() - .map(|&new| parse_compressed_state_event(new).1) - .then(|shorteventid| { - self.services - .short - .get_eventid_from_short::>(shorteventid) - }) - .ignore_err(); - - pin_mut!(event_ids); - while let Some(event_id) = event_ids.next().await { - let Ok(pdu) = self.services.timeline.get_pdu(&event_id).await else { + statediffnew: Arc>, + _statediffremoved: Arc>, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + for event_id in statediffnew.iter().filter_map(|new| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(new) + .ok() + .map(|(_, id)| id) + }) { + let Some(pdu) = services().rooms.timeline.get_pdu_json(&event_id)? else { continue; }; + let pdu: PduEvent = match serde_json::from_str( + &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), + ) { + Ok(pdu) => pdu, + Err(_) => continue, + }; + match pdu.kind { - | TimelineEventType::RoomMember => { - let Some(user_id) = pdu.state_key.as_ref().map(UserId::parse).flat_ok() - else { + TimelineEventType::RoomMember => { + let Ok(membership_event) = serde_json::from_str::(pdu.content.get()) else { continue; }; - let Ok(membership_event) = pdu.get_content::() else { + let Some(state_key) = pdu.state_key else { continue; }; - self.services - .state_cache - .update_membership( - room_id, - user_id, - membership_event, - &pdu.sender, - None, - None, - false, - ) - .await?; + let Ok(user_id) = UserId::parse(state_key) else { + continue; + }; + + services().rooms.state_cache.update_membership( + room_id, + &user_id, + membership_event, + &pdu.sender, + None, + None, + false, + )?; }, - | TimelineEventType::SpaceChild => { - self.services + TimelineEventType::SpaceChild => { + services() + .rooms .spaces .roomid_spacehierarchy_cache .lock() .await .remove(&pdu.room_id); }, - | _ => continue, + _ => continue, } } - self.services.state_cache.update_joined_count(room_id).await; + services().rooms.state_cache.update_joined_count(room_id)?; - self.set_room_state(room_id, shortstatehash, state_lock); + self.db + .set_room_state(room_id, shortstatehash, state_lock)?; Ok(()) } @@ -165,60 +103,57 @@ impl Service { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed), level = "debug")] - pub async fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: Arc, - ) -> Result { - const KEY_LEN: usize = size_of::(); - const VAL_LEN: usize = size_of::(); - - let shorteventid = self - .services + #[tracing::instrument(skip(self, state_ids_compressed))] + pub fn set_event_state( + &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc>, + ) -> Result { + let shorteventid = services() + .rooms .short - .get_or_create_shorteventid(event_id) - .await; + .get_or_create_shorteventid(event_id)?; - let previous_shortstatehash = self.get_room_shortstatehash(room_id).await; + let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; - let state_hash = calculate_hash(state_ids_compressed.iter().map(|s| &s[..])); + let state_hash = calculate_hash( + &state_ids_compressed + .iter() + .map(|s| &s[..]) + .collect::>(), + ); - let (shortstatehash, already_existed) = self - .services + let (shortstatehash, already_existed) = services() + .rooms .short - .get_or_create_shortstatehash(&state_hash) - .await; + .get_or_create_shortstatehash(&state_hash)?; if !already_existed { - let states_parents = match previous_shortstatehash { - | Ok(p) => - self.services + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms .state_compressor .load_shortstatehash_info(p) - .await?, - | _ => Vec::new(), + }, + )?; + + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&state_ids_compressed) + .copied() + .collect(); + + (Arc::new(statediffnew), Arc::new(statediffremoved)) + } else { + (state_ids_compressed, Arc::new(HashSet::new())) }; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: CompressedState = state_ids_compressed - .difference(&parent_stateinfo.full_state) - .copied() - .collect(); - - let statediffremoved: CompressedState = parent_stateinfo - .full_state - .difference(&state_ids_compressed) - .copied() - .collect(); - - (Arc::new(statediffnew), Arc::new(statediffremoved)) - } else { - (state_ids_compressed, Arc::new(CompressedState::new())) - }; - self.services.state_compressor.save_state_from_diff( + services().rooms.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -227,9 +162,7 @@ impl Service { )?; } - self.db - .shorteventid_shortstatehash - .aput::(shorteventid, shortstatehash); + self.db.set_event_state(shorteventid, shortstatehash)?; Ok(shortstatehash) } @@ -238,241 +171,237 @@ impl Service { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu), level = "debug")] - pub async fn append_to_state(&self, new_pdu: &PduEvent) -> Result { - const BUFSIZE: usize = size_of::(); - - let shorteventid = self - .services + #[tracing::instrument(skip(self, new_pdu))] + pub fn append_to_state(&self, new_pdu: &PduEvent) -> Result { + let shorteventid = services() + .rooms .short - .get_or_create_shorteventid(&new_pdu.event_id) - .await; + .get_or_create_shorteventid(&new_pdu.event_id)?; - let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id).await; + let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; - if let Ok(p) = previous_shortstatehash { - self.db - .shorteventid_shortstatehash - .aput::(shorteventid, p); + if let Some(p) = previous_shortstatehash { + self.db.set_event_state(shorteventid, p)?; } - match &new_pdu.state_key { - | Some(state_key) => { - let states_parents = match previous_shortstatehash { - | Ok(p) => - self.services - .state_compressor - .load_shortstatehash_info(p) - .await?, - | _ => Vec::new(), - }; + if let Some(state_key) = &new_pdu.state_key { + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(p) + }, + )?; - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) - .await; + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key)?; - let new = self - .services - .state_compressor - .compress_state_event(shortstatekey, &new_pdu.event_id) - .await; + let new = services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &new_pdu.event_id)?; - let replaces = states_parents - .last() - .map(|info| { - info.full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); + let replaces = states_parents + .last() + .map(|info| { + info.1 + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } - // TODO: statehash with deterministic inputs - let shortstatehash = self.services.globals.next_count()?; + // TODO: statehash with deterministic inputs + let shortstatehash = services().globals.next_count()?; - let mut statediffnew = CompressedState::new(); - statediffnew.insert(new); + let mut statediffnew = HashSet::new(); + statediffnew.insert(new); - let mut statediffremoved = CompressedState::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } + let mut statediffremoved = HashSet::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } - self.services.state_compressor.save_state_from_diff( - shortstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), - 2, - states_parents, - )?; + services().rooms.state_compressor.save_state_from_diff( + shortstatehash, + Arc::new(statediffnew), + Arc::new(statediffremoved), + 2, + states_parents, + )?; - Ok(shortstatehash) - }, - | _ => - Ok(previous_shortstatehash.expect("first event in room must be a state event")), + Ok(shortstatehash) + } else { + Ok(previous_shortstatehash.expect("first event in room must be a state event")) } } - #[tracing::instrument(skip_all, level = "debug")] - pub async fn summary_stripped(&self, event: &PduEvent) -> Vec> { - let cells = [ - (&StateEventType::RoomCreate, ""), - (&StateEventType::RoomJoinRules, ""), - (&StateEventType::RoomCanonicalAlias, ""), - (&StateEventType::RoomName, ""), - (&StateEventType::RoomAvatar, ""), - (&StateEventType::RoomMember, event.sender.as_str()), // Add recommended events - (&StateEventType::RoomEncryption, ""), - (&StateEventType::RoomTopic, ""), - ]; - - let fetches = cells.iter().map(|(event_type, state_key)| { - self.services + #[tracing::instrument(skip(self, invite_event))] + pub fn calculate_invite_state(&self, invite_event: &PduEvent) -> Result>> { + let mut state = Vec::new(); + // Add recommended events + if let Some(e) = + services() + .rooms .state_accessor - .room_state_get(&event.room_id, event_type, state_key) - }); + .room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + services() + .rooms + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + services() + .rooms + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + services() + .rooms + .state_accessor + .room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomMember, + invite_event.sender.as_str(), + )? { + state.push(e.to_stripped_state_event()); + } - join_all(fetches) - .await - .into_iter() - .filter_map(Result::ok) - .map(PduEvent::into_stripped_state_event) - .chain(once(event.to_stripped_state_event())) - .collect() + state.push(invite_event.to_stripped_state_event()); + Ok(state) } /// Set the state hash to a new version, but does not update state_cache. - #[tracing::instrument(skip(self, _mutex_lock), level = "debug")] + #[tracing::instrument(skip(self))] pub fn set_room_state( &self, room_id: &RoomId, shortstatehash: u64, - _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ - ) { - const BUFSIZE: usize = size_of::(); - - self.db - .roomid_shortstatehash - .raw_aput::(room_id, shortstatehash); + mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.db.set_room_state(room_id, shortstatehash, mutex_lock) } /// Returns the room's version. - #[tracing::instrument(skip(self), level = "debug")] - pub async fn get_room_version(&self, room_id: &RoomId) -> Result { - self.services + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = services() + .rooms .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomCreate, "") - .await - .map(|content: RoomCreateEventContent| content.room_version) - .map_err(|e| err!(Request(NotFound("No create event found: {e:?}")))) + .room_state_get(room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: RoomCreateEventContent = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()? + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "No create event found"))?; + + Ok(create_event_content.room_version) } - pub async fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result { - self.db - .roomid_shortstatehash - .get(room_id) - .await - .deserialized() + pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.db.get_room_shortstatehash(room_id) } - pub fn get_forward_extremities<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + '_ { - let prefix = (room_id, Interfix); - - self.db - .roomid_pduleaves - .keys_prefix(&prefix) - .map_ok(|(_, event_id): (Ignore, &EventId)| event_id) - .ignore_err() + pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + self.db.get_forward_extremities(room_id) } - pub async fn set_forward_extremities<'a, I>( - &'a self, - room_id: &'a RoomId, - event_ids: I, - _state_lock: &'a RoomMutexGuard, - ) where - I: Iterator + Send + 'a, - { - let prefix = (room_id, Interfix); + pub fn set_forward_extremities( + &self, + room_id: &RoomId, + event_ids: Vec, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { self.db - .roomid_pduleaves - .keys_prefix_raw(&prefix) - .ignore_err() - .ready_for_each(|key| self.db.roomid_pduleaves.remove(key)) - .await; - - for event_id in event_ids { - let key = (room_id, event_id); - self.db.roomid_pduleaves.put_raw(key, event_id); - } + .set_forward_extremities(room_id, event_ids, state_lock) } /// This fetches auth events from the current state. - #[tracing::instrument(skip(self, content), level = "debug")] - pub async fn get_auth_events( - &self, - room_id: &RoomId, - kind: &TimelineEventType, - sender: &UserId, - state_key: Option<&str>, + #[tracing::instrument(skip(self))] + pub fn get_auth_events( + &self, room_id: &RoomId, kind: &TimelineEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, - ) -> Result> { - let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { + ) -> Result>> { + let Some(shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else { return Ok(HashMap::new()); }; - let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?; + let auth_events = + state_res::auth_types_for_event(kind, sender, state_key, content).expect("content is a valid JSON object"); - let sauthevents: HashMap<_, _> = auth_types - .iter() - .stream() - .broad_filter_map(|(event_type, state_key)| { - self.services + let mut sauthevents = auth_events + .into_iter() + .filter_map(|(event_type, state_key)| { + services() + .rooms .short - .get_shortstatekey(event_type, state_key) - .map_ok(move |ssk| (ssk, (event_type, state_key))) - .map(Result::ok) + .get_shortstatekey(&event_type.to_string().into(), &state_key) + .ok() + .flatten() + .map(|s| (s, (event_type, state_key))) }) - .collect() - .await; + .collect::>(); - let (state_keys, event_ids): (Vec<_>, Vec<_>) = self - .services - .state_accessor - .state_full_shortids(shortstatehash) - .ready_filter_map(Result::ok) - .ready_filter_map(|(shortstatekey, shorteventid)| { - sauthevents - .get(&shortstatekey) - .map(|(ty, sk)| ((ty, sk), shorteventid)) - }) - .unzip() - .await; + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; - self.services - .short - .multi_get_eventid_from_short(event_ids.into_iter().stream()) - .zip(state_keys.into_iter().stream()) - .ready_filter_map(|(event_id, (ty, sk))| Some(((ty, sk), event_id.ok()?))) - .broad_filter_map(|((ty, sk), event_id): (_, OwnedEventId)| async move { - self.services - .timeline - .get_pdu(&event_id) - .await - .map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu)) + Ok(full_state + .iter() + .filter_map(|compressed| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed) .ok() }) - .collect() - .map(Ok) - .await + .filter_map(|(shortstatekey, event_id)| sauthevents.remove(&shortstatekey).map(|k| (k, event_id))) + .filter_map(|(k, event_id)| { + services() + .rooms + .timeline + .get_pdu(&event_id) + .ok() + .flatten() + .map(|pdu| (k, pdu)) + }) + .collect()) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs new file mode 100644 index 00000000..5fd58864 --- /dev/null +++ b/src/service/rooms/state_accessor/data.rs @@ -0,0 +1,48 @@ +use std::{collections::HashMap, sync::Arc}; + +use async_trait::async_trait; +use ruma::{events::StateEventType, EventId, RoomId}; + +use crate::{PduEvent, Result}; + +#[async_trait] +pub trait Data: Send + Sync { + /// Builds a StateMap by iterating over all keys that start + /// with state_hash, this gives the full state for the given state_hash. + #[allow(unused_qualifications)] // async traits + async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; + + #[allow(unused_qualifications)] // async traits + async fn state_full(&self, shortstatehash: u64) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn state_get_id( + &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + ) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn state_get( + &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + ) -> Result>>; + + /// Returns the state hash for this pdu. + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; + + /// Returns the full room state. + #[allow(unused_qualifications)] // async traits + async fn room_state_full(&self, room_id: &RoomId) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn room_state_get_id( + &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + ) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + fn room_state_get( + &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + ) -> Result>>; +} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index f719fc7b..e03e5464 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,165 +1,313 @@ -mod room_state; -mod server_can; -mod state; -mod user_can; - -use std::sync::Arc; - -use async_trait::async_trait; -use conduwuit::{Result, err}; -use database::Map; -use ruma::{ - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, RoomId, UserId, - events::{ - StateEventType, - room::{ - avatar::RoomAvatarEventContent, - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - encryption::RoomEncryptionEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::RoomMemberEventContent, - name::RoomNameEventContent, - topic::RoomTopicEventContent, - }, - }, - room::RoomType, +mod data; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, }; -use crate::{Dep, rooms}; +pub use data::Data; +use lru_cache::LruCache; +use ruma::{ + events::{ + room::{ + avatar::RoomAvatarEventContent, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, + }, + StateEventType, + }, + EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; +use serde_json::value::to_raw_value; +use tokio::sync::MutexGuard; +use tracing::{error, warn}; + +use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result}; pub struct Service { - services: Services, - db: Data, -} - -struct Services { - short: Dep, - state: Dep, - state_compressor: Dep, - state_cache: Dep, - timeline: Dep, -} - -struct Data { - shorteventid_shortstatehash: Arc, -} - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - state_cache: args.depend::("rooms::state_cache"), - timeline: args.depend::("rooms::timeline"), - short: args.depend::("rooms::short"), - state: args.depend::("rooms::state"), - state_compressor: args - .depend::("rooms::state_compressor"), - }, - db: Data { - shorteventid_shortstatehash: args.db["shorteventid_shortstatehash"].clone(), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, + pub server_visibility_cache: Mutex>, + pub user_visibility_cache: Mutex>, } impl Service { - pub async fn get_name(&self, room_id: &RoomId) -> Result { - self.room_state_get_content(room_id, &StateEventType::RoomName, "") - .await - .map(|c: RoomNameEventContent| c.name) + /// Builds a StateMap by iterating over all keys that start + /// with state_hash, this gives the full state for the given state_hash. + #[tracing::instrument(skip(self))] + pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + self.db.state_full_ids(shortstatehash).await } - pub async fn get_avatar(&self, room_id: &RoomId) -> JsOption { - let content = self - .room_state_get_content(room_id, &StateEventType::RoomAvatar, "") - .await - .ok(); - - JsOption::from_option(content) + pub async fn state_full(&self, shortstatehash: u64) -> Result>> { + self.db.state_full(shortstatehash).await } - pub async fn get_member( - &self, - room_id: &RoomId, - user_id: &UserId, - ) -> Result { - self.room_state_get_content(room_id, &StateEventType::RoomMember, user_id.as_str()) - .await + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self))] + pub fn state_get_id( + &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + self.db.state_get_id(shortstatehash, event_type, state_key) } - /// Checks if guests are able to view room content without joining - pub async fn is_world_readable(&self, room_id: &RoomId) -> bool { - self.room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") - .await - .map(|c: RoomHistoryVisibilityEventContent| { - c.history_visibility == HistoryVisibility::WorldReadable - }) - .unwrap_or(false) + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + pub fn state_get( + &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + self.db.state_get(shortstatehash, event_type, state_key) } - /// Checks if guests are able to join a given room - pub async fn guest_can_join(&self, room_id: &RoomId) -> bool { - self.room_state_get_content(room_id, &StateEventType::RoomGuestAccess, "") - .await - .map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin) - .unwrap_or(false) - } - - /// Gets the primary alias from canonical alias event - pub async fn get_canonical_alias(&self, room_id: &RoomId) -> Result { - self.room_state_get_content(room_id, &StateEventType::RoomCanonicalAlias, "") - .await - .and_then(|c: RoomCanonicalAliasEventContent| { - c.alias - .ok_or_else(|| err!(Request(NotFound("No alias found in event content.")))) + /// Get membership for given user in state + fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result { + self.state_get(shortstatehash, &StateEventType::RoomMember, user_id.as_str())? + .map_or(Ok(MembershipState::Leave), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomMemberEventContent| c.membership) + .map_err(|_| Error::bad_database("Invalid room membership event in database.")) }) } - /// Gets the room topic - pub async fn get_room_topic(&self, room_id: &RoomId) -> Result { - self.room_state_get_content(room_id, &StateEventType::RoomTopic, "") - .await - .map(|c: RoomTopicEventContent| c.topic) + /// The user was a joined member at this state (potentially in the past) + fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id) + .is_ok_and(|s| s == MembershipState::Join) + // Return sensible default, i.e. + // false } - /// Returns the join rules for a given room (`JoinRule` type). Will default - /// to Invite if doesnt exist or invalid - pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { - self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule) + /// The user was an invited or joined room member at this state (potentially + /// in the past) + fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id) + .is_ok_and(|s| s == MembershipState::Join || s == MembershipState::Invite) + // Return sensible default, i.e. false } - pub async fn get_room_type(&self, room_id: &RoomId) -> Result { - self.room_state_get_content(room_id, &StateEventType::RoomCreate, "") - .await - .and_then(|content: RoomCreateEventContent| { - content - .room_type - .ok_or_else(|| err!(Request(NotFound("No type found in event content")))) + /// Whether a server is allowed to see an event through federation, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self, origin, room_id, event_id))] + pub fn server_can_see_event(&self, origin: &ServerName, room_id: &RoomId, event_id: &EventId) -> Result { + let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else { + return Ok(true); + }; + + if let Some(visibility) = self + .server_visibility_cache + .lock() + .unwrap() + .get_mut(&(origin.to_owned(), shortstatehash)) + { + return Ok(*visibility); + } + + let history_visibility = self + .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) + .map_err(|e| { + error!( + "Invalid history visibility event in database for room {room_id}, assuming is \"shared\": \ + {e}" + ); + Error::bad_database("Invalid history visibility event in database.") + }) + }) + .unwrap_or(HistoryVisibility::Shared); + + let mut current_server_members = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|member| member.server_name() == origin); + + let visibility = match history_visibility { + HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, + HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + current_server_members.any(|member| self.user_was_invited(shortstatehash, &member)) + }, + HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + current_server_members.any(|member| self.user_was_joined(shortstatehash, &member)) + }, + _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.server_visibility_cache + .lock() + .unwrap() + .insert((origin.to_owned(), shortstatehash), visibility); + + Ok(visibility) + } + + /// Whether a user is allowed to see an event, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self, user_id, room_id, event_id))] + pub fn user_can_see_event(&self, user_id: &UserId, room_id: &RoomId, event_id: &EventId) -> Result { + let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else { + return Ok(true); + }; + + if let Some(visibility) = self + .user_visibility_cache + .lock() + .unwrap() + .get_mut(&(user_id.to_owned(), shortstatehash)) + { + return Ok(*visibility); + } + + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; + + let history_visibility = self + .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) + .map_err(|e| { + error!( + "Invalid history visibility event in database for room {room_id}, assuming is \"shared\": \ + {e}" + ); + Error::bad_database("Invalid history visibility event in database.") + }) + }) + .unwrap_or(HistoryVisibility::Shared); + + let visibility = match history_visibility { + HistoryVisibility::WorldReadable => true, + HistoryVisibility::Shared => currently_member, + HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + self.user_was_invited(shortstatehash, user_id) + }, + HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + self.user_was_joined(shortstatehash, user_id) + }, + _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.user_visibility_cache + .lock() + .unwrap() + .insert((user_id.to_owned(), shortstatehash), visibility); + + Ok(visibility) + } + + /// Whether a user is allowed to see an event, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self, user_id, room_id))] + pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; + + let history_visibility = self + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) + .map_err(|e| { + error!( + "Invalid history visibility event in database for room {room_id}, assuming is \"shared\": \ + {e}" + ); + Error::bad_database("Invalid history visibility event in database.") + }) + }) + .unwrap_or(HistoryVisibility::Shared); + + Ok(currently_member || history_visibility == HistoryVisibility::WorldReadable) + } + + /// Returns the state hash for this pdu. + pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.db.pdu_shortstatehash(event_id) } + + /// Returns the full room state. + #[tracing::instrument(skip(self))] + pub async fn room_state_full(&self, room_id: &RoomId) -> Result>> { + self.db.room_state_full(room_id).await + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self))] + pub fn room_state_get_id( + &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + self.db.room_state_get_id(room_id, event_type, state_key) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self))] + pub fn room_state_get( + &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, + ) -> Result>> { + self.db.room_state_get(room_id, event_type, state_key) + } + + pub fn get_name(&self, room_id: &RoomId) -> Result> { + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomName, "")? + .map_or(Ok(None), |s| { + Ok(serde_json::from_str(s.content.get()).map_or_else(|_| None, |c: RoomNameEventContent| Some(c.name))) }) } - /// Gets the room's encryption algorithm if `m.room.encryption` state event - /// is found - pub async fn get_room_encryption( - &self, - room_id: &RoomId, - ) -> Result { - self.room_state_get_content(room_id, &StateEventType::RoomEncryption, "") - .await - .map(|content: RoomEncryptionEventContent| content.algorithm) + pub fn get_avatar(&self, room_id: &RoomId) -> Result> { + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? + .map_or(Ok(ruma::JsOption::Undefined), |s| { + serde_json::from_str(s.content.get()) + .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) + }) } - pub async fn is_encrypted_room(&self, room_id: &RoomId) -> bool { - self.room_state_get(room_id, &StateEventType::RoomEncryption, "") - .await - .is_ok() + pub fn get_member(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map_err(|_| Error::bad_database("Invalid room member event in database.")) + }) + } + + pub async fn user_can_invite( + &self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &MutexGuard<'_, ()>, + ) -> Result { + let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite)) + .expect("Event content always serializes"); + + let new_event = PduBuilder { + event_type: ruma::events::TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(target_user.into()), + redacts: None, + }; + + Ok(services() + .rooms + .timeline + .create_hash_and_sign_event(new_event, sender, room_id, state_lock) + .is_ok()) } } diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs deleted file mode 100644 index 89fa2a83..00000000 --- a/src/service/rooms/state_accessor/room_state.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::borrow::Borrow; - -use conduwuit::{ - Result, err, implement, - matrix::{PduEvent, StateKey}, -}; -use futures::{Stream, StreamExt, TryFutureExt}; -use ruma::{EventId, RoomId, events::StateEventType}; -use serde::Deserialize; - -/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). -#[implement(super::Service)] -pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, -) -> Result -where - T: for<'de> Deserialize<'de>, -{ - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) -} - -/// Returns the full room state. -#[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok).boxed()) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() -} - -/// Returns the full room state pdus -#[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok).boxed()) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() -} - -/// Returns a single EventId from `room_id` with key (`event_type`, -/// `state_key`). -#[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, -) -> Result -where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, -{ - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await -} - -/// Returns a single PDU from `room_id` with key (`event_type`, -/// `state_key`). -#[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, -) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await -} diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs deleted file mode 100644 index 2befec22..00000000 --- a/src/service/rooms/state_accessor/server_can.rs +++ /dev/null @@ -1,53 +0,0 @@ -use conduwuit::{implement, utils::stream::ReadyExt}; -use futures::StreamExt; -use ruma::{ - EventId, RoomId, ServerName, - events::{ - StateEventType, - room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - }, -}; - -/// Whether a server is allowed to see an event through federation, based on -/// the room's history_visibility at that event's state. -#[implement(super::Service)] -#[tracing::instrument(skip_all, level = "trace")] -pub async fn server_can_see_event( - &self, - origin: &ServerName, - room_id: &RoomId, - event_id: &EventId, -) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let current_server_members = self - .services - .state_cache - .room_members(room_id) - .ready_filter(|member| member.server_name() == origin); - - match history_visibility { - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members - .any(|member| self.user_was_invited(shortstatehash, member)) - .await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - current_server_members - .any(|member| self.user_was_joined(shortstatehash, member)) - .await - }, - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared | _ => true, - } -} diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs deleted file mode 100644 index 169e69e9..00000000 --- a/src/service/rooms/state_accessor/state.rs +++ /dev/null @@ -1,428 +0,0 @@ -use std::{borrow::Borrow, ops::Deref, sync::Arc}; - -use conduwuit::{ - Result, at, err, implement, - matrix::{PduEvent, StateKey}, - pair_of, - utils::{ - result::FlatOk, - stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, - }, -}; -use conduwuit_database::Deserialized; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; -use ruma::{ - EventId, OwnedEventId, UserId, - events::{ - StateEventType, - room::member::{MembershipState, RoomMemberEventContent}, - }, -}; -use serde::Deserialize; - -use crate::rooms::{ - short::{ShortEventId, ShortStateHash, ShortStateKey}, - state_compressor::{CompressedState, compress_state_event, parse_compressed_state_event}, -}; - -/// The user was a joined member at this state (potentially in the past) -#[implement(super::Service)] -#[inline] -pub async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join -} - -/// The user was an invited or joined room member at this state (potentially -/// in the past) -#[implement(super::Service)] -#[inline] -pub async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite -} - -/// Get membership for given user in state -#[implement(super::Service)] -pub async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, -) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) -} - -/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). -#[implement(super::Service)] -pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, -) -> Result -where - T: for<'de> Deserialize<'de>, -{ - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) -} - -#[implement(super::Service)] -pub async fn state_contains( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, -) -> bool { - let Ok(shortstatekey) = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await - else { - return false; - }; - - self.state_contains_shortstatekey(shortstatehash, shortstatekey) - .await -} - -#[implement(super::Service)] -pub async fn state_contains_type( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, -) -> bool { - let state_keys = self.state_keys(shortstatehash, event_type); - - pin_mut!(state_keys); - state_keys.next().await.is_some() -} - -#[implement(super::Service)] -pub async fn state_contains_shortstatekey( - &self, - shortstatehash: ShortStateHash, - shortstatekey: ShortStateKey, -) -> bool { - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - - self.load_full_state(shortstatehash) - .map_ok(|full_state| full_state.range(start..=end).next().copied()) - .await - .flat_ok() - .is_some() -} - -/// Returns a single PDU from `room_id` with key (`event_type`, -/// `state_key`). -#[implement(super::Service)] -pub async fn state_get( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, -) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) - .await -} - -/// Returns a single EventId from `room_id` with key (`event_type`, -/// `state_key`). -#[implement(super::Service)] -pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, -) -> Result -where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, -{ - let shorteventid = self - .state_get_shortid(shortstatehash, event_type, state_key) - .await?; - - self.services - .short - .get_eventid_from_short(shorteventid) - .await -} - -/// Returns a single EventId from `room_id` with key (`event_type`, -/// `state_key`). -#[implement(super::Service)] -pub async fn state_get_shortid( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, -) -> Result { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .range(start..=end) - .next() - .copied() - .map(parse_compressed_state_event) - .map(at!(1)) - .ok_or(err!(Request(NotFound("Not found in room state")))) - }) - .await? -} - -/// Iterates the state_keys for an event_type in the state; current state -/// event_id included. -#[implement(super::Service)] -pub fn state_keys_with_ids<'a, Id>( - &'a self, - shortstatehash: ShortStateHash, - event_type: &'a StateEventType, -) -> impl Stream + Send + 'a -where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, - ::Owned: Borrow, -{ - let state_keys_with_short_ids = self - .state_keys_with_shortids(shortstatehash, event_type) - .unzip() - .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) - .shared(); - - let state_keys = state_keys_with_short_ids - .clone() - .map(at!(0)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - let shorteventids = state_keys_with_short_ids - .map(at!(1)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - self.services - .short - .multi_get_eventid_from_short(shorteventids) - .zip(state_keys) - .ready_filter_map(|(eid, sk)| eid.map(move |eid| (sk, eid)).ok()) -} - -/// Iterates the state_keys for an event_type in the state; current state -/// event_id included. -#[implement(super::Service)] -pub fn state_keys_with_shortids<'a>( - &'a self, - shortstatehash: ShortStateHash, - event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { - let short_ids = self - .state_full_shortids(shortstatehash) - .ignore_err() - .unzip() - .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) - .boxed() - .shared(); - - let shortstatekeys = short_ids - .clone() - .map(at!(0)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - let shorteventids = short_ids - .map(at!(1)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - self.services - .short - .multi_get_statekey_from_short(shortstatekeys) - .zip(shorteventids) - .ready_filter_map(|(res, id)| res.map(|res| (res, id)).ok()) - .ready_filter_map(move |((event_type_, state_key), event_id)| { - event_type_.eq(event_type).then_some((state_key, event_id)) - }) -} - -/// Iterates the state_keys for an event_type in the state -#[implement(super::Service)] -pub fn state_keys<'a>( - &'a self, - shortstatehash: ShortStateHash, - event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { - let short_ids = self - .state_full_shortids(shortstatehash) - .ignore_err() - .map(at!(0)); - - self.services - .short - .multi_get_statekey_from_short(short_ids) - .ready_filter_map(Result::ok) - .ready_filter_map(move |(event_type_, state_key)| { - event_type_.eq(event_type).then_some(state_key) - }) -} - -/// Returns the state events removed between the interval (present in .0 but -/// not in .1) -#[implement(super::Service)] -#[inline] -pub fn state_removed( - &self, - shortstatehash: pair_of!(ShortStateHash), -) -> impl Stream + Send + '_ { - self.state_added((shortstatehash.1, shortstatehash.0)) -} - -/// Returns the state events added between the interval (present in .1 but -/// not in .0) -#[implement(super::Service)] -pub fn state_added( - &self, - shortstatehash: pair_of!(ShortStateHash), -) -> impl Stream + Send + '_ { - let a = self.load_full_state(shortstatehash.0); - let b = self.load_full_state(shortstatehash.1); - try_join(a, b) - .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) - .map_ok(IterStream::try_stream) - .try_flatten_stream() - .ignore_err() - .map(parse_compressed_state_event) -} - -#[implement(super::Service)] -pub fn state_full( - &self, - shortstatehash: ShortStateHash, -) -> impl Stream + Send + '_ { - self.state_full_pdus(shortstatehash) - .ready_filter_map(|pdu| { - Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) - }) -} - -#[implement(super::Service)] -pub fn state_full_pdus( - &self, - shortstatehash: ShortStateHash, -) -> impl Stream + Send + '_ { - let short_ids = self - .state_full_shortids(shortstatehash) - .ignore_err() - .map(at!(1)); - - self.services - .short - .multi_get_eventid_from_short(short_ids) - .ready_filter_map(Result::ok) - .broad_filter_map(move |event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await.ok() - }) -} - -/// Builds a StateMap by iterating over all keys that start -/// with state_hash, this gives the full state for the given state_hash. -#[implement(super::Service)] -pub fn state_full_ids<'a, Id>( - &'a self, - shortstatehash: ShortStateHash, -) -> impl Stream + Send + 'a -where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, - ::Owned: Borrow, -{ - let shortids = self - .state_full_shortids(shortstatehash) - .ignore_err() - .unzip() - .shared(); - - let shortstatekeys = shortids - .clone() - .map(at!(0)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - let shorteventids = shortids - .map(at!(1)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - self.services - .short - .multi_get_eventid_from_short(shorteventids) - .zip(shortstatekeys) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) -} - -#[implement(super::Service)] -pub fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, -) -> impl Stream> + Send + '_ { - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .deref() - .iter() - .copied() - .map(parse_compressed_state_event) - .collect() - }) - .map_ok(Vec::into_iter) - .map_ok(IterStream::try_stream) - .try_flatten_stream() - .boxed() -} - -#[implement(super::Service)] -#[tracing::instrument(name = "load", level = "debug", skip(self))] -async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_err(|e| err!(Database("Missing state IDs: {e}"))) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .await -} - -/// Returns the state hash for this pdu. -#[implement(super::Service)] -pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() -} diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs deleted file mode 100644 index 67e0b52b..00000000 --- a/src/service/rooms/state_accessor/user_can.rs +++ /dev/null @@ -1,169 +0,0 @@ -use conduwuit::{Err, Result, implement, pdu::PduBuilder}; -use ruma::{ - EventId, RoomId, UserId, - events::{ - StateEventType, TimelineEventType, - room::{ - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - member::{MembershipState, RoomMemberEventContent}, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - }, - }, -}; - -use crate::rooms::state::RoomMutexGuard; - -/// Checks if a given user can redact a given event -/// -/// If federation is true, it allows redaction events from any user of the -/// same server as the original event sender -#[implement(super::Service)] -pub async fn user_can_redact( - &self, - redacts: &EventId, - sender: &UserId, - room_id: &RoomId, - federation: bool, -) -> Result { - let redacting_event = self.services.timeline.get_pdu(redacts).await; - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) - { - return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); - } - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) - { - return Err!(Request(Forbidden( - "Redacting m.room.server_acl will result in the room being inaccessible for \ - everyone (empty allow key), forbidding." - ))); - } - - match self - .room_state_get_content::( - room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await - { - | Ok(pl_event_content) => { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && match redacting_event { - | Ok(redacting_event) => - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - }, - | _ => false, - }) - }, - | _ => { - // Falling back on m.room.create to judge power level - match self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - | Ok(room_create) => Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)), - | _ => Err!(Database( - "No m.room.power_levels or m.room.create events in database for room" - )), - } - }, - } -} - -/// Whether a user is allowed to see an event, based on -/// the room's history_visibility at that event's state. -#[implement(super::Service)] -#[tracing::instrument(skip_all, level = "trace")] -pub async fn user_can_see_event( - &self, - user_id: &UserId, - room_id: &RoomId, - event_id: &EventId, -) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - match history_visibility { - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, user_id).await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, user_id).await - }, - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared | _ => currently_member, - } -} - -/// Whether a user is allowed to see an event, based on -/// the room's history_visibility at that event's state. -#[implement(super::Service)] -#[tracing::instrument(skip_all, level = "trace")] -pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { - if self.services.state_cache.is_joined(user_id, room_id).await { - return true; - } - - let history_visibility = self - .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - match history_visibility { - | HistoryVisibility::Invited => - self.services.state_cache.is_invited(user_id, room_id).await, - | HistoryVisibility::WorldReadable => true, - | _ => false, - } -} - -#[implement(super::Service)] -pub async fn user_can_invite( - &self, - room_id: &RoomId, - sender: &UserId, - target_user: &UserId, - state_lock: &RoomMutexGuard, -) -> bool { - self.services - .timeline - .create_hash_and_sign_event( - PduBuilder::state( - target_user.as_str(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - sender, - room_id, - state_lock, - ) - .await - .is_ok() -} diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs new file mode 100644 index 00000000..4cd9c8fa --- /dev/null +++ b/src/service/rooms/state_cache/data.rs @@ -0,0 +1,87 @@ +use std::{collections::HashSet, sync::Arc}; + +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; + +use crate::{service::appservice::RegistrationInfo, Result}; + +type StrippedStateEventIter<'a> = Box>)>> + 'a>; + +type AnySyncStateEventIter<'a> = Box>)>> + 'a>; + +pub trait Data: Send + Sync { + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_invited( + &self, user_id: &UserId, room_id: &RoomId, last_state: Option>>, + invite_via: Option>, + ) -> Result<()>; + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; + + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>; + + fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result; + + /// Makes a user forget a room. + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>; + + /// Returns an iterator of all servers participating in this room. + fn room_servers<'a>(&'a self, room_id: &RoomId) -> Box> + 'a>; + + fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result; + + /// Returns an iterator of all rooms a server participates in (as far as we + /// know). + fn server_rooms<'a>(&'a self, server: &ServerName) -> Box> + 'a>; + + /// Returns an iterator over all joined members of a room. + fn room_members<'a>(&'a self, room_id: &RoomId) -> Box> + 'a>; + + fn room_joined_count(&self, room_id: &RoomId) -> Result>; + + fn room_invited_count(&self, room_id: &RoomId) -> Result>; + + /// Returns an iterator over all User IDs who ever joined a room. + fn room_useroncejoined<'a>(&'a self, room_id: &RoomId) -> Box> + 'a>; + + /// Returns an iterator over all invited members of a room. + fn room_members_invited<'a>(&'a self, room_id: &RoomId) -> Box> + 'a>; + + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns an iterator over all rooms this user joined. + fn rooms_joined<'a>(&'a self, user_id: &UserId) -> Box> + 'a>; + + /// Returns an iterator over all rooms a user was invited to. + fn rooms_invited<'a>(&'a self, user_id: &UserId) -> StrippedStateEventIter<'a>; + + fn invite_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>>; + + fn left_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>>; + + /// Returns an iterator over all rooms a user left. + fn rooms_left<'a>(&'a self, user_id: &UserId) -> AnySyncStateEventIter<'a>; + + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + /// Gets the servers to either accept or decline invites via for a given + /// room. + fn servers_invite_via(&self, room_id: &RoomId) -> Result>>; + + /// Add the given servers the list to accept or decline invites via for a + /// given room. + fn add_servers_invite_via(&self, room_id: &RoomId, servers: &[OwnedServerName]) -> Result<()>; +} diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index d3dbc143..d6f1ff07 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,127 +1,39 @@ -use std::{ - collections::{HashMap, HashSet}, - sync::{Arc, RwLock}, -}; +use std::{collections::HashSet, sync::Arc}; -use conduwuit::{ - Result, is_not_empty, - result::LogErr, - utils::{ReadyExt, StreamTools, stream::TryIgnore}, - warn, -}; -use database::{Deserialized, Ignore, Interfix, Json, Map, serialize_key}; -use futures::{Stream, StreamExt, future::join5, pin_mut, stream::iter}; +pub use data::Data; use itertools::Itertools; use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, events::{ - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, direct::DirectEvent, + ignored_user_list::IgnoredUserListEvent, room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEventType, StateEventType, }, int, serde::Raw, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; +use tracing::{error, warn}; -use crate::{Dep, account_data, appservice::RegistrationInfo, config, globals, rooms, users}; +use crate::{service::appservice::RegistrationInfo, services, Error, Result}; + +mod data; pub struct Service { - appservice_in_room_cache: AppServiceInRoomCache, - services: Services, - db: Data, -} - -struct Services { - account_data: Dep, - config: Dep, - globals: Dep, - metadata: Dep, - state_accessor: Dep, - users: Dep, -} - -struct Data { - roomid_invitedcount: Arc, - roomid_inviteviaservers: Arc, - roomid_joinedcount: Arc, - roomserverids: Arc, - roomuserid_invitecount: Arc, - roomuserid_joined: Arc, - roomuserid_leftcount: Arc, - roomuserid_knockedcount: Arc, - roomuseroncejoinedids: Arc, - serverroomids: Arc, - userroomid_invitestate: Arc, - userroomid_joined: Arc, - userroomid_leftstate: Arc, - userroomid_knockedstate: Arc, -} - -type AppServiceInRoomCache = RwLock>>; -type StrippedStateEventItem = (OwnedRoomId, Vec>); -type SyncStateEventItem = (OwnedRoomId, Vec>); - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - appservice_in_room_cache: RwLock::new(HashMap::new()), - services: Services { - account_data: args.depend::("account_data"), - config: args.depend::("config"), - globals: args.depend::("globals"), - metadata: args.depend::("rooms::metadata"), - state_accessor: args - .depend::("rooms::state_accessor"), - users: args.depend::("users"), - }, - db: Data { - roomid_invitedcount: args.db["roomid_invitedcount"].clone(), - roomid_inviteviaservers: args.db["roomid_inviteviaservers"].clone(), - roomid_joinedcount: args.db["roomid_joinedcount"].clone(), - roomserverids: args.db["roomserverids"].clone(), - roomuserid_invitecount: args.db["roomuserid_invitecount"].clone(), - roomuserid_joined: args.db["roomuserid_joined"].clone(), - roomuserid_leftcount: args.db["roomuserid_leftcount"].clone(), - roomuserid_knockedcount: args.db["roomuserid_knockedcount"].clone(), - roomuseroncejoinedids: args.db["roomuseroncejoinedids"].clone(), - serverroomids: args.db["serverroomids"].clone(), - userroomid_invitestate: args.db["userroomid_invitestate"].clone(), - userroomid_joined: args.db["userroomid_joined"].clone(), - userroomid_leftstate: args.db["userroomid_leftstate"].clone(), - userroomid_knockedstate: args.db["userroomid_knockedstate"].clone(), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, } impl Service { /// Update current membership data. - #[tracing::instrument( - level = "debug", - skip_all, - fields( - %room_id, - %user_id, - %sender, - ?membership_event, - ), - )] + #[tracing::instrument(skip(self, last_state))] #[allow(clippy::too_many_arguments)] - pub async fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership_event: RoomMemberEventContent, - sender: &UserId, - last_state: Option>>, - invite_via: Option>, + pub fn update_membership( + &self, room_id: &RoomId, user_id: &UserId, membership_event: RoomMemberEventContent, sender: &UserId, + last_state: Option>>, invite_via: Option>, update_joined_count: bool, ) -> Result<()> { let membership = membership_event.membership; @@ -131,19 +43,19 @@ impl Service { // TODO: use futures to update remote profiles without blocking the membership // update #[allow(clippy::collapsible_if)] - if !self.services.globals.user_is_local(user_id) { - if !self.services.users.exists(user_id).await { - self.services.users.create(user_id, None)?; + if user_id.server_name() != services().globals.server_name() { + if !services().users.exists(user_id)? { + services().users.create(user_id, None)?; } /* // Try to update our local copy of the user if ours does not match - if ((self.services.users.displayname(user_id)? != membership_event.displayname) - || (self.services.users.avatar_url(user_id)? != membership_event.avatar_url) - || (self.services.users.blurhash(user_id)? != membership_event.blurhash)) + if ((services().users.displayname(user_id)? != membership_event.displayname) + || (services().users.avatar_url(user_id)? != membership_event.avatar_url) + || (services().users.blurhash(user_id)? != membership_event.blurhash)) && (membership != MembershipState::Leave) { - let response = self.services + let response = services() .sending .send_federation_request( user_id.server_name(), @@ -154,27 +66,27 @@ impl Service { ) .await; - self.services.users.set_displayname(user_id, response.displayname.clone()).await?; - self.services.users.set_avatar_url(user_id, response.avatar_url).await?; - self.services.users.set_blurhash(user_id, response.blurhash).await?; + services().users.set_displayname(user_id, response.displayname.clone()).await?; + services().users.set_avatar_url(user_id, response.avatar_url).await?; + services().users.set_blurhash(user_id, response.blurhash).await?; }; */ } match &membership { - | MembershipState::Join => { + MembershipState::Join => { // Check if the user never joined this room - if !self.once_joined(user_id, room_id).await { + if !self.once_joined(user_id, room_id)? { // Add the user ID to the join list then - self.mark_as_once_joined(user_id, room_id); + self.db.mark_as_once_joined(user_id, room_id)?; // Check if the room has a predecessor - if let Ok(Some(predecessor)) = self - .services + if let Some(predecessor) = services() + .rooms .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomCreate, "") - .await - .map(|content: RoomCreateEventContent| content.predecessor) + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .and_then(|create| serde_json::from_str(create.content.get()).ok()) + .and_then(|content: RoomCreateEventContent| content.predecessor) { // Copy user settings from predecessor to the current room: // - Push rules @@ -202,39 +114,34 @@ impl Service { // .ok(); // Copy old tags to new room - if let Ok(tag_event) = self - .services + if let Some(tag_event) = services() .account_data - .get_room( - &predecessor.room_id, - user_id, - RoomAccountDataEventType::Tag, - ) - .await - { - self.services + .get(Some(&predecessor.room_id), user_id, RoomAccountDataEventType::Tag)? + .map(|event| { + serde_json::from_str(event.get()).map_err(|e| { + warn!("Invalid account data event in db: {e:?}"); + Error::BadDatabase("Invalid account data event in db.") + }) + }) { + services() .account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - ) - .await + .update(Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event?) .ok(); - } + }; // Copy direct chat flag - if let Ok(mut direct_event) = self - .services + if let Some(direct_event) = services() .account_data - .get_global::( - user_id, - GlobalAccountDataEventType::Direct, - ) - .await - { + .get(None, user_id, GlobalAccountDataEventType::Direct.to_string().into())? + .map(|event| { + serde_json::from_str::(event.get()).map_err(|e| { + warn!("Invalid account data event in db: {e:?}"); + Error::BadDatabase("Invalid account data event in db.") + }) + }) { + let mut direct_event = direct_event?; let mut room_ids_updated = false; + for room_ids in direct_event.content.0.values_mut() { if room_ids.iter().any(|r| r == &predecessor.room_id) { room_ids.push(room_id.to_owned()); @@ -243,759 +150,263 @@ impl Service { } if room_ids_updated { - self.services - .account_data - .update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &serde_json::to_value(&direct_event) - .expect("to json always works"), - ) - .await?; + services().account_data.update( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + &serde_json::to_value(&direct_event).expect("to json always works"), + )?; } - } + }; } } - self.mark_as_joined(user_id, room_id); + self.db.mark_as_joined(user_id, room_id)?; }, - | MembershipState::Invite => { + MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - if self.services.users.user_is_ignored(sender, user_id).await { + let is_ignored = services() + .account_data + .get( + None, // Ignored users are in global account data + user_id, // Receiver + GlobalAccountDataEventType::IgnoredUserList + .to_string() + .into(), + )? + .map(|event| { + serde_json::from_str::(event.get()).map_err(|e| { + warn!("Invalid account data event in db: {e:?}"); + Error::BadDatabase("Invalid account data event in db.") + }) + }) + .transpose()? + .map_or(false, |ignored| { + ignored + .content + .ignored_users + .iter() + .any(|(user, _details)| user == sender) + }); + + if is_ignored { return Ok(()); } - self.mark_as_invited(user_id, room_id, last_state, invite_via) - .await; + self.db + .mark_as_invited(user_id, room_id, last_state, invite_via)?; }, - | MembershipState::Leave | MembershipState::Ban => { - self.mark_as_left(user_id, room_id); - - if self.services.globals.user_is_local(user_id) - && (self.services.config.forget_forced_upon_leave - || self.services.metadata.is_banned(room_id).await - || self.services.metadata.is_disabled(room_id).await) - { - self.forget(room_id, user_id); - } + MembershipState::Leave | MembershipState::Ban => { + self.db.mark_as_left(user_id, room_id)?; }, - | _ => {}, + _ => {}, } if update_joined_count { - self.update_joined_count(room_id).await; + self.update_joined_count(room_id)?; } Ok(()) } - #[tracing::instrument(level = "trace", skip_all)] - pub async fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &RegistrationInfo, - ) -> bool { - if let Some(cached) = self - .appservice_in_room_cache - .read() - .expect("locked") - .get(room_id) - .and_then(|map| map.get(&appservice.registration.id)) - .copied() - { - return cached; - } + #[tracing::instrument(skip(self, room_id))] + pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { self.db.update_joined_count(room_id) } - let bridge_user_id = UserId::parse_with_server_name( - appservice.registration.sender_localpart.as_str(), - self.services.globals.server_name(), - ); - - let Ok(bridge_user_id) = bridge_user_id.log_err() else { - return false; - }; - - let in_room = self.is_joined(&bridge_user_id, room_id).await - || self - .room_members(room_id) - .ready_any(|user_id| appservice.users.is_match(user_id.as_str())) - .await; - - self.appservice_in_room_cache - .write() - .expect("locked") - .entry(room_id.into()) - .or_default() - .insert(appservice.registration.id.clone(), in_room); - - in_room + #[tracing::instrument(skip(self, room_id))] + pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { + self.db.get_our_real_users(room_id) } - /// Direct DB function to directly mark a user as joined. It is not - /// recommended to use this directly. You most likely should use - /// `update_membership` instead - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); - - self.db.userroomid_joined.insert(&userroom_id, []); - self.db.roomuserid_joined.insert(&roomuser_id, []); - - self.db.userroomid_invitestate.remove(&userroom_id); - self.db.roomuserid_invitecount.remove(&roomuser_id); - - self.db.userroomid_leftstate.remove(&userroom_id); - self.db.roomuserid_leftcount.remove(&roomuser_id); - - self.db.userroomid_knockedstate.remove(&userroom_id); - self.db.roomuserid_knockedcount.remove(&roomuser_id); - - self.db.roomid_inviteviaservers.remove(room_id); - } - - /// Direct DB function to directly mark a user as left. It is not - /// recommended to use this directly. You most likely should use - /// `update_membership` instead - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); - - // (timo) TODO - let leftstate = Vec::>::new(); - - self.db - .userroomid_leftstate - .raw_put(&userroom_id, Json(leftstate)); - self.db - .roomuserid_leftcount - .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); - - self.db.userroomid_joined.remove(&userroom_id); - self.db.roomuserid_joined.remove(&roomuser_id); - - self.db.userroomid_invitestate.remove(&userroom_id); - self.db.roomuserid_invitecount.remove(&roomuser_id); - - self.db.userroomid_knockedstate.remove(&userroom_id); - self.db.roomuserid_knockedcount.remove(&roomuser_id); - - self.db.roomid_inviteviaservers.remove(room_id); - } - - /// Direct DB function to directly mark a user as knocked. It is not - /// recommended to use this directly. You most likely should use - /// `update_membership` instead - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_knocked( - &self, - user_id: &UserId, - room_id: &RoomId, - knocked_state: Option>>, - ) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); - - self.db - .userroomid_knockedstate - .raw_put(&userroom_id, Json(knocked_state.unwrap_or_default())); - self.db - .roomuserid_knockedcount - .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); - - self.db.userroomid_joined.remove(&userroom_id); - self.db.roomuserid_joined.remove(&roomuser_id); - - self.db.userroomid_invitestate.remove(&userroom_id); - self.db.roomuserid_invitecount.remove(&roomuser_id); - - self.db.userroomid_leftstate.remove(&userroom_id); - self.db.roomuserid_leftcount.remove(&roomuser_id); - - self.db.roomid_inviteviaservers.remove(room_id); + #[tracing::instrument(skip(self, room_id, appservice))] + pub fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result { + self.db.appservice_in_room(room_id, appservice) } /// Makes a user forget a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) { - let userroom_id = (user_id, room_id); - let roomuser_id = (room_id, user_id); - - self.db.userroomid_leftstate.del(userroom_id); - self.db.roomuserid_leftcount.del(roomuser_id); - } + #[tracing::instrument(skip(self))] + pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { self.db.forget(room_id, user_id) } /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_servers<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomserverids - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, server): (Ignore, &ServerName)| server) + #[tracing::instrument(skip(self))] + pub fn room_servers<'a>(&'a self, room_id: &RoomId) -> impl Iterator> + 'a { + self.db.room_servers(room_id) } - #[tracing::instrument(skip(self), level = "trace")] - pub async fn server_in_room<'a>( - &'a self, - server: &'a ServerName, - room_id: &'a RoomId, - ) -> bool { - let key = (server, room_id); - self.db.serverroomids.qry(&key).await.is_ok() + #[tracing::instrument(skip(self))] + pub fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result { + self.db.server_in_room(server, room_id) } /// Returns an iterator of all rooms a server participates in (as far as we /// know). - #[tracing::instrument(skip(self), level = "debug")] - pub fn server_rooms<'a>( - &'a self, - server: &'a ServerName, - ) -> impl Stream + Send + 'a { - let prefix = (server, Interfix); - self.db - .serverroomids - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, room_id): (Ignore, &RoomId)| room_id) + #[tracing::instrument(skip(self))] + pub fn server_rooms<'a>(&'a self, server: &ServerName) -> impl Iterator> + 'a { + self.db.server_rooms(server) } /// Returns true if server can see user by sharing at least one room. - #[tracing::instrument(skip(self), level = "trace")] - pub async fn server_sees_user(&self, server: &ServerName, user_id: &UserId) -> bool { - self.server_rooms(server) - .any(|room_id| self.is_joined(user_id, room_id)) - .await + #[tracing::instrument(skip(self))] + pub fn server_sees_user(&self, server: &ServerName, user_id: &UserId) -> Result { + Ok(self + .server_rooms(server) + .filter_map(Result::ok) + .any(|room_id: OwnedRoomId| self.is_joined(user_id, &room_id).unwrap_or(false))) } /// Returns true if user_a and user_b share at least one room. - #[tracing::instrument(skip(self), level = "trace")] - pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool { - let get_shared_rooms = self.get_shared_rooms(user_a, user_b); + #[tracing::instrument(skip(self))] + pub fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> Result { + // Minimize number of point-queries by iterating user with least nr rooms + let (a, b) = if self.rooms_joined(user_a).count() < self.rooms_joined(user_b).count() { + (user_a, user_b) + } else { + (user_b, user_a) + }; - pin_mut!(get_shared_rooms); - get_shared_rooms.next().await.is_some() + Ok(self + .rooms_joined(a) + .filter_map(Result::ok) + .any(|room_id| self.is_joined(b, &room_id).unwrap_or(false))) } - /// List the rooms common between two users - #[tracing::instrument(skip(self), level = "debug")] - pub fn get_shared_rooms<'a>( - &'a self, - user_a: &'a UserId, - user_b: &'a UserId, - ) -> impl Stream + Send + 'a { - use conduwuit::utils::set; - - let a = self.rooms_joined(user_a); - let b = self.rooms_joined(user_b); - set::intersection_sorted_stream2(a, b) + /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] + pub fn room_members<'a>(&'a self, room_id: &RoomId) -> impl Iterator> + 'a { + self.db.room_members(room_id) } - /// Returns an iterator of all joined members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuserid_joined - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) - } + #[tracing::instrument(skip(self))] + pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { self.db.room_joined_count(room_id) } - /// Returns the number of users which are currently in a room - #[tracing::instrument(skip(self), level = "trace")] - pub async fn room_joined_count(&self, room_id: &RoomId) -> Result { - self.db.roomid_joinedcount.get(room_id).await.deserialized() - } - - #[tracing::instrument(skip(self), level = "debug")] - /// Returns an iterator of all our local users in the room, even if they're - /// deactivated/guests - pub fn local_users_in_room<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - self.room_members(room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - } - - /// Returns an iterator of all our local joined users in a room who are - /// active (not deactivated, not guest) - #[tracing::instrument(skip(self), level = "trace")] - pub fn active_local_users_in_room<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - self.local_users_in_room(room_id) - .filter(|user| self.services.users.is_active(user)) - } - - /// Returns the number of users which are currently invited to a room - #[tracing::instrument(skip(self), level = "trace")] - pub async fn room_invited_count(&self, room_id: &RoomId) -> Result { - self.db - .roomid_invitedcount - .get(room_id) - .await - .deserialized() - } + #[tracing::instrument(skip(self))] + pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { self.db.room_invited_count(room_id) } /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuseroncejoinedids - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) + #[tracing::instrument(skip(self))] + pub fn room_useroncejoined<'a>(&'a self, room_id: &RoomId) -> impl Iterator> + 'a { + self.db.room_useroncejoined(room_id) } /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members_invited<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuserid_invitecount - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) + #[tracing::instrument(skip(self))] + pub fn room_members_invited<'a>(&'a self, room_id: &RoomId) -> impl Iterator> + 'a { + self.db.room_members_invited(room_id) } - /// Returns an iterator over all knocked members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members_knocked<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuserid_knockedcount - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) + #[tracing::instrument(skip(self))] + pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + self.db.get_invite_count(room_id, user_id) } - #[tracing::instrument(skip(self), level = "trace")] - pub async fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { - let key = (room_id, user_id); - self.db - .roomuserid_invitecount - .qry(&key) - .await - .deserialized() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn get_knock_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { - let key = (room_id, user_id); - self.db - .roomuserid_knockedcount - .qry(&key) - .await - .deserialized() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { - let key = (room_id, user_id); - self.db.roomuserid_leftcount.qry(&key).await.deserialized() + #[tracing::instrument(skip(self))] + pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + self.db.get_left_count(room_id, user_id) } /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_joined<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - self.db - .userroomid_joined - .keys_raw_prefix(user_id) - .ignore_err() - .map(|(_, room_id): (Ignore, &RoomId)| room_id) + #[tracing::instrument(skip(self))] + pub fn rooms_joined<'a>(&'a self, user_id: &UserId) -> impl Iterator> + 'a { + self.db.rooms_joined(user_id) } /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self))] pub fn rooms_invited<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Key<'a>, Raw>); - type Key<'a> = (&'a UserId, &'a RoomId); - - let prefix = (user_id, Interfix); - self.db - .userroomid_invitestate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() + &'a self, user_id: &UserId, + ) -> impl Iterator>)>> + 'a { + self.db.rooms_invited(user_id) } - /// Returns an iterator over all rooms a user is currently knocking. - #[tracing::instrument(skip(self), level = "trace")] - pub fn rooms_knocked<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Key<'a>, Raw>); - type Key<'a> = (&'a UserId, &'a RoomId); - - let prefix = (user_id, Interfix); - self.db - .userroomid_knockedstate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() + #[tracing::instrument(skip(self))] + pub fn invite_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>> { + self.db.invite_state(user_id, room_id) } - #[tracing::instrument(skip(self), level = "trace")] - pub async fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.db - .userroomid_invitestate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| { - val.deserialize_as().map_err(Into::into) - }) - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn knock_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.db - .userroomid_knockedstate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| { - val.deserialize_as().map_err(Into::into) - }) - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.db - .userroomid_leftstate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| { - val.deserialize_as().map_err(Into::into) - }) + #[tracing::instrument(skip(self))] + pub fn left_state(&self, user_id: &UserId, room_id: &RoomId) -> Result>>> { + self.db.left_state(user_id, room_id) } /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self), level = "debug")] + #[tracing::instrument(skip(self))] pub fn rooms_left<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Key<'a>, Raw>>); - type Key<'a> = (&'a UserId, &'a RoomId); - - let prefix = (user_id, Interfix); - self.db - .userroomid_leftstate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() + &'a self, user_id: &UserId, + ) -> impl Iterator>)>> + 'a { + self.db.rooms_left(user_id) } - #[tracing::instrument(skip(self), level = "debug")] - pub async fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> bool { - let key = (user_id, room_id); - self.db.roomuseroncejoinedids.qry(&key).await.is_ok() + #[tracing::instrument(skip(self))] + pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.once_joined(user_id, room_id) } - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_joined<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_joined.qry(&key).await.is_ok() + #[tracing::instrument(skip(self))] + pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { self.db.is_joined(user_id, room_id) } + + #[tracing::instrument(skip(self))] + pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.is_invited(user_id, room_id) } - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_knocked<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_knockedstate.qry(&key).await.is_ok() + #[tracing::instrument(skip(self))] + pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { self.db.is_left(user_id, room_id) } + + #[tracing::instrument(skip(self))] + pub fn servers_invite_via(&self, room_id: &RoomId) -> Result>> { + self.db.servers_invite_via(room_id) } - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_invitestate.qry(&key).await.is_ok() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_leftstate.qry(&key).await.is_ok() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn user_membership( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Option { - let states = join5( - self.is_joined(user_id, room_id), - self.is_left(user_id, room_id), - self.is_knocked(user_id, room_id), - self.is_invited(user_id, room_id), - self.once_joined(user_id, room_id), - ) - .await; - - match states { - | (true, ..) => Some(MembershipState::Join), - | (_, true, ..) => Some(MembershipState::Leave), - | (_, _, true, ..) => Some(MembershipState::Knock), - | (_, _, _, true, ..) => Some(MembershipState::Invite), - | (false, false, false, false, true) => Some(MembershipState::Ban), - | _ => None, - } - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn servers_invite_via<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Ignore, Vec<&'a ServerName>); - - self.db - .roomid_inviteviaservers - .stream_raw_prefix(room_id) - .ignore_err() - .map(|(_, servers): KeyVal<'_>| *servers.last().expect("at least one server")) - } - - /// Gets up to five servers that are likely to be in the room in the + /// Gets up to three servers that are likely to be in the room in the /// distant future. /// - /// See - #[tracing::instrument(skip(self), level = "trace")] - pub async fn servers_route_via(&self, room_id: &RoomId) -> Result> { - let most_powerful_user_server = self - .services + /// See + #[tracing::instrument(skip(self))] + pub fn servers_route_via(&self, room_id: &RoomId) -> Result> { + let most_powerful_user_server = services() + .rooms .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") - .await - .map(|content: RoomPowerLevelsEventContent| { - content - .users - .iter() - .max_by_key(|(_, power)| *power) - .and_then(|x| (x.1 >= &int!(50)).then_some(x)) - .map(|(user, _power)| user.server_name().to_owned()) - }); + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? + .map(|pdu| { + serde_json::from_str(pdu.content.get()).map(|conent: RoomPowerLevelsEventContent| { + conent + .users + .iter() + .max_by_key(|(_, power)| *power) + .and_then(|x| { + if x.1 >= &int!(50) { + Some(x) + } else { + None + } + }) + .map(|(user, _power)| user.server_name().to_owned()) + }) + }) + .transpose() + .map_err(|e| { + error!("Invalid power levels event content in database: {e}"); + Error::bad_database("Invalid power levels event content in database") + })? + .flatten(); - let mut servers: Vec = self + let mut servers: Vec = services() + .rooms + .state_cache .room_members(room_id) + .filter_map(Result::ok) .counts_by(|user| user.server_name().to_owned()) - .await - .into_iter() + .iter() .sorted_by_key(|(_, users)| *users) - .map(|(server, _)| server) + .map(|(server, _)| server.to_owned()) .rev() - .take(5) - .collect(); + .take(3) + .collect_vec(); - if let Ok(Some(server)) = most_powerful_user_server { + if let Some(server) = most_powerful_user_server { servers.insert(0, server); - servers.truncate(5); + servers.truncate(3); } Ok(servers) } - - pub fn get_appservice_in_room_cache_usage(&self) -> (usize, usize) { - let cache = self.appservice_in_room_cache.read().expect("locked"); - - (cache.len(), cache.capacity()) - } - - #[tracing::instrument(level = "debug", skip_all)] - pub fn clear_appservice_in_room_cache(&self) { - self.appservice_in_room_cache - .write() - .expect("locked") - .clear(); - } - - #[tracing::instrument(level = "debug", skip(self))] - pub async fn update_joined_count(&self, room_id: &RoomId) { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut knockedcount = 0_u64; - let mut joined_servers = HashSet::new(); - - self.room_members(room_id) - .ready_for_each(|joined| { - joined_servers.insert(joined.server_name().to_owned()); - joinedcount = joinedcount.saturating_add(1); - }) - .await; - - invitedcount = invitedcount.saturating_add( - self.room_members_invited(room_id) - .count() - .await - .try_into() - .unwrap_or(0), - ); - - knockedcount = knockedcount.saturating_add( - self.room_members_knocked(room_id) - .count() - .await - .try_into() - .unwrap_or(0), - ); - - self.db.roomid_joinedcount.raw_put(room_id, joinedcount); - self.db.roomid_invitedcount.raw_put(room_id, invitedcount); - self.db - .roomuserid_knockedcount - .raw_put(room_id, knockedcount); - - self.room_servers(room_id) - .ready_for_each(|old_joined_server| { - if joined_servers.remove(old_joined_server) { - return; - } - - // Server not in room anymore - let roomserver_id = (room_id, old_joined_server); - let serverroom_id = (old_joined_server, room_id); - - self.db.roomserverids.del(roomserver_id); - self.db.serverroomids.del(serverroom_id); - }) - .await; - - // Now only new servers are in joined_servers anymore - for server in &joined_servers { - let roomserver_id = (room_id, server); - let serverroom_id = (server, room_id); - - self.db.roomserverids.put_raw(roomserver_id, []); - self.db.serverroomids.put_raw(serverroom_id, []); - } - - self.appservice_in_room_cache - .write() - .expect("locked") - .remove(room_id); - } - - #[tracing::instrument(level = "debug", skip(self))] - fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { - let key = (user_id, room_id); - self.db.roomuseroncejoinedids.put_raw(key, []); - } - - #[tracing::instrument(level = "debug", skip(self, last_state, invite_via))] - pub async fn mark_as_invited( - &self, - user_id: &UserId, - room_id: &RoomId, - last_state: Option>>, - invite_via: Option>, - ) { - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); - - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); - - self.db - .userroomid_invitestate - .raw_put(&userroom_id, Json(last_state.unwrap_or_default())); - self.db - .roomuserid_invitecount - .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); - - self.db.userroomid_joined.remove(&userroom_id); - self.db.roomuserid_joined.remove(&roomuser_id); - - self.db.userroomid_leftstate.remove(&userroom_id); - self.db.roomuserid_leftcount.remove(&roomuser_id); - - self.db.userroomid_knockedstate.remove(&userroom_id); - self.db.roomuserid_knockedcount.remove(&roomuser_id); - - if let Some(servers) = invite_via.filter(is_not_empty!()) { - self.add_servers_invite_via(room_id, servers).await; - } - } - - #[tracing::instrument(level = "debug", skip(self, servers))] - pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: Vec) { - let mut servers: Vec<_> = self - .servers_invite_via(room_id) - .map(ToOwned::to_owned) - .chain(iter(servers.into_iter())) - .collect() - .await; - - servers.sort_unstable(); - servers.dedup(); - - let servers = servers - .iter() - .map(|server| server.as_bytes()) - .collect_vec() - .join(&[0xFF][..]); - - self.db - .roomid_inviteviaservers - .insert(room_id.as_bytes(), &servers); - } } diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs new file mode 100644 index 00000000..eddc8716 --- /dev/null +++ b/src/service/rooms/state_compressor/data.rs @@ -0,0 +1,15 @@ +use std::{collections::HashSet, sync::Arc}; + +use super::CompressedStateEvent; +use crate::Result; + +pub struct StateDiff { + pub parent: Option, + pub added: Arc>, + pub removed: Arc>, +} + +pub trait Data: Send + Sync { + fn get_statediff(&self, shortstatehash: u64) -> Result; + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; +} diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 56a91d0e..36252897 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,232 +1,122 @@ +pub mod data; use std::{ - collections::{BTreeSet, HashMap}, - fmt::{Debug, Write}, + collections::HashSet, mem::size_of, sync::{Arc, Mutex}, }; -use async_trait::async_trait; -use conduwuit::{ - Result, - arrayvec::ArrayVec, - at, checked, err, expected, utils, - utils::{bytes, math::usize_from_f64, stream::IterStream}, -}; -use database::Map; -use futures::{Stream, StreamExt}; +pub use data::Data; use lru_cache::LruCache; use ruma::{EventId, RoomId}; -use crate::{ - Dep, rooms, - rooms::short::{ShortEventId, ShortId, ShortStateHash, ShortStateKey}, -}; +use self::data::StateDiff; +use crate::{services, utils, Result}; + +type StateInfoLruCache = Mutex< + LruCache< + u64, + Vec<( + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed + )>, + >, +>; + +type ShortStateInfoResult = Result< + Vec<( + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed + )>, +>; + +type ParentStatesVec = Vec<( + u64, // sstatehash + Arc>, // full state + Arc>, // added + Arc>, // removed +)>; + +type HashSetCompressStateEvent = Result<(u64, Arc>, Arc>)>; pub struct Service { - pub stateinfo_cache: Mutex, - db: Data, - services: Services, + pub db: &'static dyn Data, + + pub stateinfo_cache: StateInfoLruCache, } -struct Services { - short: Dep, - state: Dep, -} - -struct Data { - shortstatehash_statediff: Arc, -} - -#[derive(Clone)] -struct StateDiff { - parent: Option, - added: Arc, - removed: Arc, -} - -#[derive(Clone, Default)] -pub struct ShortStateInfo { - pub shortstatehash: ShortStateHash, - pub full_state: Arc, - pub added: Arc, - pub removed: Arc, -} - -#[derive(Clone, Default)] -pub struct HashSetCompressStateEvent { - pub shortstatehash: ShortStateHash, - pub added: Arc, - pub removed: Arc, -} - -type StateInfoLruCache = LruCache; -type ShortStateInfoVec = Vec; -type ParentStatesVec = Vec; - -pub type CompressedState = BTreeSet; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let cache_capacity = - f64::from(config.stateinfo_cache_capacity) * config.cache_capacity_modifier; - Ok(Arc::new(Self { - stateinfo_cache: LruCache::new(usize_from_f64(cache_capacity)?).into(), - db: Data { - shortstatehash_statediff: args.db["shortstatehash_statediff"].clone(), - }, - services: Services { - short: args.depend::("rooms::short"), - state: args.depend::("rooms::state"), - }, - })) - } - - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - let (cache_len, ents) = { - let cache = self.stateinfo_cache.lock().expect("locked"); - let ents = cache.iter().map(at!(1)).flat_map(|vec| vec.iter()).fold( - HashMap::new(), - |mut ents, ssi| { - for cs in &[&ssi.added, &ssi.removed, &ssi.full_state] { - ents.insert(Arc::as_ptr(cs), compressed_state_size(cs)); - } - - ents - }, - ); - - (cache.len(), ents) - }; - - let ents_len = ents.len(); - let bytes = ents.values().copied().fold(0_usize, usize::saturating_add); - - let bytes = bytes::pretty(bytes); - writeln!(out, "stateinfo_cache: {cache_len} {ents_len} ({bytes})")?; - - Ok(()) - } - - async fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} +pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and /// removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(name = "load", level = "debug", skip(self))] - pub async fn load_shortstatehash_info( - &self, - shortstatehash: ShortStateHash, - ) -> Result { - if let Some(r) = self.stateinfo_cache.lock()?.get_mut(&shortstatehash) { + #[tracing::instrument(skip(self))] + pub fn load_shortstatehash_info(&self, shortstatehash: u64) -> ShortStateInfoResult { + if let Some(r) = self + .stateinfo_cache + .lock() + .unwrap() + .get_mut(&shortstatehash) + { return Ok(r.clone()); } - let stack = self.new_shortstatehash_info(shortstatehash).await?; - - self.cache_shortstatehash_info(shortstatehash, stack.clone()) - .await?; - - Ok(stack) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and - /// removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument( - name = "cache", - level = "debug", - skip_all, - fields( - ?shortstatehash, - stack = stack.len(), - ), - )] - async fn cache_shortstatehash_info( - &self, - shortstatehash: ShortStateHash, - stack: ShortStateInfoVec, - ) -> Result { - self.stateinfo_cache.lock()?.insert(shortstatehash, stack); - - Ok(()) - } - - async fn new_shortstatehash_info( - &self, - shortstatehash: ShortStateHash, - ) -> Result { - let StateDiff { parent, added, removed } = self.get_statediff(shortstatehash).await?; - - let Some(parent) = parent else { - return Ok(vec![ShortStateInfo { - shortstatehash, - full_state: added.clone(), - added, - removed, - }]); - }; - - let mut stack = Box::pin(self.load_shortstatehash_info(parent)).await?; - let top = stack.last().expect("at least one frame"); - - let mut full_state = (*top.full_state).clone(); - full_state.extend(added.iter().copied()); - - let removed = (*removed).clone(); - for r in &removed { - full_state.remove(r); - } - - stack.push(ShortStateInfo { - shortstatehash, + let StateDiff { + parent, added, - removed: Arc::new(removed), - full_state: Arc::new(full_state), - }); + removed, + } = self.db.get_statediff(shortstatehash)?; - Ok(stack) + if let Some(parent) = parent { + let mut response = self.load_shortstatehash_info(parent)?; + let mut state = (*response.last().unwrap().1).clone(); + state.extend(added.iter().copied()); + let removed = (*removed).clone(); + for r in &removed { + state.remove(r); + } + + response.push((shortstatehash, Arc::new(state), added, Arc::new(removed))); + + self.stateinfo_cache + .lock() + .unwrap() + .insert(shortstatehash, response.clone()); + + Ok(response) + } else { + let response = vec![(shortstatehash, added.clone(), added, removed)]; + self.stateinfo_cache + .lock() + .unwrap() + .insert(shortstatehash, response.clone()); + Ok(response) + } } - pub fn compress_state_events<'a, I>( - &'a self, - state: I, - ) -> impl Stream + Send + 'a - where - I: Iterator + Clone + Debug + Send + 'a, - { - let event_ids = state.clone().map(at!(1)); - - let short_event_ids = self - .services - .short - .multi_get_or_create_shorteventid(event_ids); - - state - .stream() - .map(at!(0)) - .zip(short_event_ids) - .map(|(shortstatekey, shorteventid)| { - compress_state_event(*shortstatekey, shorteventid) - }) + pub fn compress_state_event(&self, shortstatekey: u64, event_id: &EventId) -> Result { + let mut v = shortstatekey.to_be_bytes().to_vec(); + v.extend_from_slice( + &services() + .rooms + .short + .get_or_create_shorteventid(event_id)? + .to_be_bytes(), + ); + Ok(v.try_into().expect("we checked the size above")) } - pub async fn compress_state_event( - &self, - shortstatekey: ShortStateKey, - event_id: &EventId, - ) -> CompressedStateEvent { - let shorteventid = self - .services - .short - .get_or_create_shorteventid(event_id) - .await; - - compress_state_event(shortstatekey, shorteventid) + /// Returns shortstatekey, event id + pub fn parse_compressed_state_event(&self, compressed_event: &CompressedStateEvent) -> Result<(u64, Arc)> { + Ok(( + utils::u64_from_bytes(&compressed_event[0..size_of::()]).expect("bytes have right length"), + services().rooms.short.get_eventid_from_short( + utils::u64_from_bytes(&compressed_event[size_of::()..]).expect("bytes have right length"), + )?, + )) } /// Creates a new shortstatehash that often is just a diff to an already @@ -247,25 +137,21 @@ impl Service { /// for this layer /// * `parent_states` - A stack with info on shortstatehash, full state, /// added diff and removed diff for each parent layer + #[tracing::instrument(skip(self, statediffnew, statediffremoved, diff_to_sibling, parent_states))] pub fn save_state_from_diff( - &self, - shortstatehash: ShortStateHash, - statediffnew: Arc, - statediffremoved: Arc, - diff_to_sibling: usize, + &self, shortstatehash: u64, statediffnew: Arc>, + statediffremoved: Arc>, diff_to_sibling: usize, mut parent_states: ParentStatesVec, - ) -> Result { - let statediffnew_len = statediffnew.len(); - let statediffremoved_len = statediffremoved.len(); - let diffsum = checked!(statediffnew_len + statediffremoved_len)?; + ) -> Result<()> { + let diffsum = statediffnew.len() + statediffremoved.len(); if parent_states.len() > 3 { // Number of layers // To many layers, we have to go deeper - let parent = parent_states.pop().expect("parent must have a state"); + let parent = parent_states.pop().unwrap(); - let mut parent_new = (*parent.added).clone(); - let mut parent_removed = (*parent.removed).clone(); + let mut parent_new = (*parent.2).clone(); + let mut parent_removed = (*parent.3).clone(); for removed in statediffremoved.iter() { if !parent_new.remove(removed) { @@ -298,28 +184,29 @@ impl Service { if parent_states.is_empty() { // There is no parent layer, create a new state - self.save_statediff(shortstatehash, &StateDiff { - parent: None, - added: statediffnew, - removed: statediffremoved, - }); + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: None, + added: statediffnew, + removed: statediffremoved, + }, + )?; return Ok(()); - } + }; // Else we have two options. // 1. We add the current diff on top of the parent layer. // 2. We replace a layer above - let parent = parent_states.pop().expect("parent must have a state"); - let parent_added_len = parent.added.len(); - let parent_removed_len = parent.removed.len(); - let parent_diff = checked!(parent_added_len + parent_removed_len)?; + let parent = parent_states.pop().unwrap(); + let parent_diff = parent.2.len() + parent.3.len(); - if checked!(diffsum * diffsum)? >= checked!(2 * diff_to_sibling * parent_diff)? { + if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { // Diff too big, we replace above layer(s) - let mut parent_new = (*parent.added).clone(); - let mut parent_removed = (*parent.removed).clone(); + let mut parent_new = (*parent.2).clone(); + let mut parent_removed = (*parent.3).clone(); for removed in statediffremoved.iter() { if !parent_new.remove(removed) { @@ -348,11 +235,14 @@ impl Service { )?; } else { // Diff small enough, we add diff as layer on top of parent - self.save_statediff(shortstatehash, &StateDiff { - parent: Some(parent.shortstatehash), - added: statediffnew, - removed: statediffremoved, - }); + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: Some(parent.0), + added: statediffnew, + removed: statediffremoved, + }, + )?; } Ok(()) @@ -360,59 +250,47 @@ impl Service { /// Returns the new shortstatehash, and the state diff from the previous /// room state - #[tracing::instrument(skip(self, new_state_ids_compressed), level = "debug")] - pub async fn save_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: Arc, - ) -> Result { - let previous_shortstatehash = self - .services - .state - .get_room_shortstatehash(room_id) - .await - .ok(); + pub fn save_state( + &self, room_id: &RoomId, new_state_ids_compressed: Arc>, + ) -> HashSetCompressStateEvent { + let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; - let state_hash = - utils::calculate_hash(new_state_ids_compressed.iter().map(|bytes| &bytes[..])); + let state_hash = utils::calculate_hash( + &new_state_ids_compressed + .iter() + .map(|bytes| &bytes[..]) + .collect::>(), + ); - let (new_shortstatehash, already_existed) = self - .services + let (new_shortstatehash, already_existed) = services() + .rooms .short - .get_or_create_shortstatehash(&state_hash) - .await; + .get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(HashSetCompressStateEvent { - shortstatehash: new_shortstatehash, - ..Default::default() - }); + return Ok((new_shortstatehash, Arc::new(HashSet::new()), Arc::new(HashSet::new()))); } - let states_parents = if let Some(p) = previous_shortstatehash { - self.load_shortstatehash_info(p).await.unwrap_or_default() + let states_parents = + previous_shortstatehash.map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = new_state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&new_state_ids_compressed) + .copied() + .collect(); + + (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - ShortStateInfoVec::new() + (new_state_ids_compressed, Arc::new(HashSet::new())) }; - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: CompressedState = new_state_ids_compressed - .difference(&parent_stateinfo.full_state) - .copied() - .collect(); - - let statediffremoved: CompressedState = parent_stateinfo - .full_state - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (Arc::new(statediffnew), Arc::new(statediffremoved)) - } else { - (new_state_ids_compressed, Arc::new(CompressedState::new())) - }; - if !already_existed { self.save_state_from_diff( new_shortstatehash, @@ -421,122 +299,8 @@ impl Service { 2, // every state change is 2 event changes on average states_parents, )?; - } + }; - Ok(HashSetCompressStateEvent { - shortstatehash: new_shortstatehash, - added: statediffnew, - removed: statediffremoved, - }) - } - - #[tracing::instrument(skip(self), level = "debug", name = "get")] - async fn get_statediff(&self, shortstatehash: ShortStateHash) -> Result { - const BUFSIZE: usize = size_of::(); - const STRIDE: usize = size_of::(); - - let value = self - .db - .shortstatehash_statediff - .aqry::(&shortstatehash) - .await - .map_err(|e| { - err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")) - })?; - - let parent = utils::u64_from_bytes(&value[0..size_of::()]) - .ok() - .take_if(|parent| *parent != 0); - - debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); - let _num_values = value.len() / STRIDE; - - let mut add_mode = true; - let mut added = CompressedState::new(); - let mut removed = CompressedState::new(); - - let mut i = STRIDE; - while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i = expected!(i + STRIDE); - continue; - } - if add_mode { - added.insert(v.try_into()?); - } else { - removed.insert(v.try_into()?); - } - i = expected!(i + 2 * STRIDE); - } - - Ok(StateDiff { - parent, - added: Arc::new(added), - removed: Arc::new(removed), - }) - } - - fn save_statediff(&self, shortstatehash: ShortStateHash, diff: &StateDiff) { - let mut value = Vec::::with_capacity( - 2_usize - .saturating_add(diff.added.len()) - .saturating_add(diff.removed.len()), - ); - - let parent = diff.parent.unwrap_or(0_u64); - value.extend_from_slice(&parent.to_be_bytes()); - - for new in diff.added.iter() { - value.extend_from_slice(&new[..]); - } - - if !diff.removed.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in diff.removed.iter() { - value.extend_from_slice(&removed[..]); - } - } - - self.db - .shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value); + Ok((new_shortstatehash, statediffnew, statediffremoved)) } } - -#[inline] -#[must_use] -pub(crate) fn compress_state_event( - shortstatekey: ShortStateKey, - shorteventid: ShortEventId, -) -> CompressedStateEvent { - const SIZE: usize = size_of::(); - - let mut v = ArrayVec::::new(); - v.extend(shortstatekey.to_be_bytes()); - v.extend(shorteventid.to_be_bytes()); - v.as_ref() - .try_into() - .expect("failed to create CompressedStateEvent") -} - -#[inline] -#[must_use] -pub(crate) fn parse_compressed_state_event( - compressed_event: CompressedStateEvent, -) -> (ShortStateKey, ShortEventId) { - use utils::u64_from_u8; - - let shortstatekey = u64_from_u8(&compressed_event[0..size_of::()]); - let shorteventid = u64_from_u8(&compressed_event[size_of::()..]); - - (shortstatekey, shorteventid) -} - -#[inline] -fn compressed_state_size(compressed_state: &CompressedState) -> usize { - compressed_state - .len() - .checked_mul(size_of::()) - .expect("CompressedState size overflow") -} diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs new file mode 100644 index 00000000..b18f4b79 --- /dev/null +++ b/src/service/rooms/threads/data.rs @@ -0,0 +1,14 @@ +use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; + +use crate::{PduEvent, Result}; + +type PduEventIterResult<'a> = Result> + 'a>>; + +pub trait Data: Send + Sync { + fn threads_until<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, include: &'a IncludeThreads, + ) -> PduEventIterResult<'a>; + + fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()>; + fn get_participants(&self, root_id: &[u8]) -> Result>>; +} diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index a680df55..e4f7b5dd 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,77 +1,46 @@ -use std::{collections::BTreeMap, sync::Arc}; +mod data; -use conduwuit::{ - Result, err, - matrix::pdu::{PduCount, PduEvent, PduId, RawPduId}, - utils::{ - ReadyExt, - stream::{TryIgnore, WidebandExt}, - }, -}; -use conduwuit_database::{Deserialized, Map}; -use futures::{Stream, StreamExt}; +use std::collections::BTreeMap; + +pub use data::Data; use ruma::{ - CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, - api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, + api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads}, + events::relation::BundledThread, + uint, CanonicalJsonValue, EventId, RoomId, UserId, }; use serde_json::json; -use crate::{Dep, rooms, rooms::short::ShortRoomId}; +use crate::{services, Error, PduEvent, Result}; pub struct Service { - db: Data, - services: Services, -} - -struct Services { - short: Dep, - timeline: Dep, -} - -pub(super) struct Data { - threadid_userids: Arc, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - threadid_userids: args.db["threadid_userids"].clone(), - }, - services: Services { - short: args.depend::("rooms::short"), - timeline: args.depend::("rooms::timeline"), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, } impl Service { - pub async fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { - let root_id = self - .services - .timeline - .get_pdu_id(root_event_id) - .await - .map_err(|e| { - err!(Request(InvalidParam("Invalid event_id in thread message: {e:?}"))) - })?; + pub fn threads_until<'a>( + &'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, include: &'a IncludeThreads, + ) -> Result> + 'a> { + self.db.threads_until(user_id, room_id, until, include) + } - let root_pdu = self - .services + pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { + let root_id = &services() + .rooms .timeline - .get_pdu_from_id(&root_id) - .await - .map_err(|e| err!(Request(InvalidParam("Thread root not found: {e:?}"))))?; + .get_pdu_id(root_event_id)? + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Invalid event id in thread message"))?; - let mut root_pdu_json = self - .services + let root_pdu = services() + .rooms .timeline - .get_pdu_json_from_id(&root_id) - .await - .map_err(|e| err!(Request(InvalidParam("Thread root pdu not found: {e:?}"))))?; + .get_pdu_from_id(root_id)? + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found"))?; + + let mut root_pdu_json = services() + .rooms + .timeline + .get_pdu_json_from_id(root_id)? + .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found"))?; if let CanonicalJsonValue::Object(unsigned) = root_pdu_json .entry("unsigned".to_owned()) @@ -81,11 +50,10 @@ impl Service { .get("m.relations") .and_then(|r| r.as_object()) .and_then(|r| r.get("m.thread")) - .and_then(|relations| { - serde_json::from_value::(relations.clone().into()).ok() - }) { + .and_then(|relations| serde_json::from_value::(relations.clone().into()).ok()) + { // Thread already existed - relations.count = relations.count.saturating_add(uint!(1)); + relations.count += uint!(1); relations.latest_event = pdu.to_message_like_event(); let content = serde_json::to_value(relations).expect("to_value always works"); @@ -114,79 +82,21 @@ impl Service { ); } - self.services + services() + .rooms .timeline - .replace_pdu(&root_id, &root_pdu_json, &root_pdu) - .await?; + .replace_pdu(root_id, &root_pdu_json, &root_pdu)?; } let mut users = Vec::new(); - match self.get_participants(&root_id).await { - | Ok(userids) => { - users.extend_from_slice(&userids); - }, - | _ => { - users.push(root_pdu.sender); - }, + if let Some(userids) = self.db.get_participants(root_id)? { + users.extend_from_slice(&userids); + users.push(pdu.sender.clone()); + } else { + users.push(root_pdu.sender); + users.push(pdu.sender.clone()); } - users.push(pdu.sender.clone()); - self.update_participants(&root_id, &users) - } - - pub async fn threads_until<'a>( - &'a self, - user_id: &'a UserId, - room_id: &'a RoomId, - shorteventid: PduCount, - _inc: &'a IncludeThreads, - ) -> Result + Send + 'a> { - let shortroomid: ShortRoomId = self.services.short.get_shortroomid(room_id).await?; - - let current: RawPduId = PduId { - shortroomid, - shorteventid: shorteventid.saturating_sub(1), - } - .into(); - - let stream = self - .db - .threadid_userids - .rev_raw_keys_from(¤t) - .ignore_err() - .map(RawPduId::from) - .ready_take_while(move |pdu_id| pdu_id.shortroomid() == shortroomid.to_be_bytes()) - .wide_filter_map(move |pdu_id| async move { - let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; - let pdu_id: PduId = pdu_id.into(); - - if pdu.sender != user_id { - pdu.remove_transaction_id().ok(); - } - - Some((pdu_id.shorteventid, pdu)) - }); - - Ok(stream) - } - - pub(super) fn update_participants( - &self, - root_id: &RawPduId, - participants: &[OwnedUserId], - ) -> Result { - let users = participants - .iter() - .map(|user| user.as_bytes()) - .collect::>() - .join(&[0xFF][..]); - - self.db.threadid_userids.insert(root_id, &users); - - Ok(()) - } - - pub(super) async fn get_participants(&self, root_id: &RawPduId) -> Result> { - self.db.threadid_userids.get(root_id).await.deserialized() + self.db.update_participants(root_id, &users) } } diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 94c78bb0..a036b455 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,323 +1,68 @@ -use std::{borrow::Borrow, sync::Arc}; +use std::sync::Arc; -use conduwuit::{ - Err, PduCount, PduEvent, Result, at, err, - result::{LogErr, NotFound}, - utils, - utils::stream::TryReadyExt, -}; -use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; -use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, api::Direction}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; -use super::{PduId, RawPduId}; -use crate::{Dep, rooms, rooms::short::ShortRoomId}; +use super::PduCount; +use crate::{PduEvent, Result}; -pub(super) struct Data { - eventid_outlierpdu: Arc, - eventid_pduid: Arc, - pduid_pdu: Arc, - userroomid_highlightcount: Arc, - userroomid_notificationcount: Arc, - pub(super) db: Arc, - services: Services, -} - -struct Services { - short: Dep, -} - -pub type PdusIterItem = (PduCount, PduEvent); - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - eventid_outlierpdu: db["eventid_outlierpdu"].clone(), - eventid_pduid: db["eventid_pduid"].clone(), - pduid_pdu: db["pduid_pdu"].clone(), - userroomid_highlightcount: db["userroomid_highlightcount"].clone(), - userroomid_notificationcount: db["userroomid_notificationcount"].clone(), - db: args.db.clone(), - services: Services { - short: args.depend::("rooms::short"), - }, - } - } - - #[inline] - pub(super) async fn last_timeline_count( - &self, - sender_user: Option<&UserId>, - room_id: &RoomId, - ) -> Result { - let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max()); - - pin_mut!(pdus_rev); - let last_count = pdus_rev - .try_next() - .await? - .map(at!(0)) - .filter(|&count| matches!(count, PduCount::Normal(_))) - .unwrap_or_else(PduCount::max); - - Ok(last_count) - } - - #[inline] - pub(super) async fn latest_pdu_in_room( - &self, - sender_user: Option<&UserId>, - room_id: &RoomId, - ) -> Result { - let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max()); - - pin_mut!(pdus_rev); - pdus_rev - .try_next() - .await? - .map(at!(1)) - .ok_or_else(|| err!(Request(NotFound("no PDU's found in room")))) - } +pub trait Data: Send + Sync { + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. - pub(super) async fn get_pdu_count(&self, event_id: &EventId) -> Result { - self.get_pdu_id(event_id) - .await - .map(|pdu_id| pdu_id.pdu_count()) - } + fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub(super) async fn get_pdu_json(&self, event_id: &EventId) -> Result { - let accepted = self.get_non_outlier_pdu_json(event_id).boxed(); - let outlier = self - .eventid_outlierpdu - .get(event_id) - .map(Deserialized::deserialized) - .boxed(); - - select_ok([accepted, outlier]).await.map(at!(0)) - } + fn get_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub(super) async fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result { - let pduid = self.get_pdu_id(event_id).await?; - - self.pduid_pdu.get(&pduid).await.deserialized() - } + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the pdu's id. - #[inline] - pub(super) async fn get_pdu_id(&self, event_id: &EventId) -> Result { - self.eventid_pduid - .get(event_id) - .await - .map(|handle| RawPduId::from(&*handle)) - } - - /// Returns the pdu directly from `eventid_pduid` only. - pub(super) async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { - let pduid = self.get_pdu_id(event_id).await?; - - self.pduid_pdu.get(&pduid).await.deserialized() - } - - /// Like get_non_outlier_pdu(), but without the expense of fetching and - /// parsing the PduEvent - pub(super) async fn non_outlier_pdu_exists(&self, event_id: &EventId) -> Result { - let pduid = self.get_pdu_id(event_id).await?; - - self.pduid_pdu.exists(&pduid).await - } + fn get_pdu_id(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub(super) async fn get_pdu(&self, event_id: &EventId) -> Result { - let accepted = self.get_non_outlier_pdu(event_id).boxed(); - let outlier = self - .eventid_outlierpdu - .get(event_id) - .map(Deserialized::deserialized) - .boxed(); + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; - select_ok([accepted, outlier]).await.map(at!(0)) - } - - /// Like get_non_outlier_pdu(), but without the expense of fetching and - /// parsing the PduEvent - #[inline] - pub(super) async fn outlier_pdu_exists(&self, event_id: &EventId) -> Result { - self.eventid_outlierpdu.exists(event_id).await - } - - /// Like get_pdu(), but without the expense of fetching and parsing the data - pub(super) async fn pdu_exists(&self, event_id: &EventId) -> Result { - let non_outlier = self.non_outlier_pdu_exists(event_id).boxed(); - let outlier = self.outlier_pdu_exists(event_id).boxed(); - - select_ok([non_outlier, outlier]).await.map(at!(0)) - } + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + fn get_pdu(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub(super) async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { - self.pduid_pdu.get(pdu_id).await.deserialized() - } + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the pdu as a `BTreeMap`. - pub(super) async fn get_pdu_json_from_id( - &self, - pdu_id: &RawPduId, - ) -> Result { - self.pduid_pdu.get(pdu_id).await.deserialized() - } + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; - pub(super) async fn append_pdu( - &self, - pdu_id: &RawPduId, - pdu: &PduEvent, - json: &CanonicalJsonObject, - count: PduCount, - ) { - debug_assert!(matches!(count, PduCount::Normal(_)), "PduCount not Normal"); + /// Adds a new pdu to the timeline + fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()>; - self.pduid_pdu.raw_put(pdu_id, Json(json)); - self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id); - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes()); - } - - pub(super) fn prepend_backfill_pdu( - &self, - pdu_id: &RawPduId, - event_id: &EventId, - json: &CanonicalJsonObject, - ) { - self.pduid_pdu.raw_put(pdu_id, Json(json)); - self.eventid_pduid.insert(event_id, pdu_id); - self.eventid_outlierpdu.remove(event_id); - } + // Adds a new pdu to the backfilled timeline + fn prepend_backfill_pdu(&self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject) -> Result<()>; /// Removes a pdu and creates a new one with the same id. - pub(super) async fn replace_pdu( - &self, - pdu_id: &RawPduId, - pdu_json: &CanonicalJsonObject, - _pdu: &PduEvent, - ) -> Result { - if self.pduid_pdu.get(pdu_id).await.is_not_found() { - return Err!(Request(NotFound("PDU does not exist."))); - } - - self.pduid_pdu.raw_put(pdu_id, Json(pdu_json)); - - Ok(()) - } + fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()>; /// Returns an iterator over all events and their tokens in a room that /// happened before the event with id `until` in reverse-chronological /// order. - pub(super) fn pdus_rev<'a>( - &'a self, - user_id: Option<&'a UserId>, - room_id: &'a RoomId, - until: PduCount, - ) -> impl Stream> + Send + 'a { - self.count_to_id(room_id, until, Direction::Backward) - .map_ok(move |current| { - let prefix = current.shortroomid(); - self.pduid_pdu - .rev_raw_stream_from(¤t) - .ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix))) - .ready_and_then(move |item| Self::each_pdu(item, user_id)) - }) - .try_flatten_stream() - } + #[allow(clippy::type_complexity)] + fn pdus_until<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, until: PduCount, + ) -> Result> + 'a>>; - pub(super) fn pdus<'a>( - &'a self, - user_id: Option<&'a UserId>, - room_id: &'a RoomId, - from: PduCount, - ) -> impl Stream> + Send + 'a { - self.count_to_id(room_id, from, Direction::Forward) - .map_ok(move |current| { - let prefix = current.shortroomid(); - self.pduid_pdu - .raw_stream_from(¤t) - .ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix))) - .ready_and_then(move |item| Self::each_pdu(item, user_id)) - }) - .try_flatten_stream() - } + /// Returns an iterator over all events in a room that happened after the + /// event with id `from` in chronological order. + #[allow(clippy::type_complexity)] + fn pdus_after<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, from: PduCount, + ) -> Result> + 'a>>; - fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: Option<&UserId>) -> Result { - let pdu_id: RawPduId = pdu_id.into(); - - let mut pdu = serde_json::from_slice::(pdu)?; - - if Some(pdu.sender.borrow()) != user_id { - pdu.remove_transaction_id().log_err().ok(); - } - - pdu.add_age().log_err().ok(); - - Ok((pdu_id.pdu_count(), pdu)) - } - - pub(super) fn increment_notification_counts( - &self, - room_id: &RoomId, - notifies: Vec, - highlights: Vec, - ) { - let _cork = self.db.cork(); - - for user in notifies { - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - increment(&self.userroomid_notificationcount, &userroom_id); - } - - for user in highlights { - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xFF); - userroom_id.extend_from_slice(room_id.as_bytes()); - increment(&self.userroomid_highlightcount, &userroom_id); - } - } - - async fn count_to_id( - &self, - room_id: &RoomId, - shorteventid: PduCount, - dir: Direction, - ) -> Result { - let shortroomid: ShortRoomId = self - .services - .short - .get_shortroomid(room_id) - .await - .map_err(|e| err!(Request(NotFound("Room {room_id:?} not found: {e:?}"))))?; - - // +1 so we don't send the base event - let pdu_id = PduId { - shortroomid, - shorteventid: shorteventid.saturating_inc(dir), - }; - - Ok(pdu_id.into()) - } -} - -//TODO: this is an ABA -fn increment(db: &Arc, key: &[u8]) { - let old = db.get_blocking(key); - let new = utils::increment(old.ok().as_deref()); - db.insert(key, new); + fn increment_notification_counts( + &self, room_id: &RoomId, notifies: Vec, highlights: Vec, + ) -> Result<()>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 947e1c38..3639c56b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,38 +1,17 @@ -mod data; +pub(crate) mod data; use std::{ - borrow::Borrow, - cmp, - collections::{BTreeMap, HashSet}, - fmt::Write, - iter::once, + cmp::Ordering, + collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; -use async_trait::async_trait; -pub use conduwuit::matrix::pdu::{PduId, RawPduId}; -use conduwuit::{ - Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, - matrix::{ - Event, - pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, - state_res::{self, RoomVersion}, - }, - utils::{ - self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, - }, - validated, warn, -}; -use futures::{ - Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, -}; +pub use data::Data; +use rand::prelude::SliceRandom; use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - RoomId, RoomVersionId, ServerName, UserId, - api::federation, + api::{client::error::ErrorKind, federation}, canonical_json::to_canonical_value, events::{ - GlobalAccountDataEventType, StateEventType, TimelineEventType, push_rules::PushRulesEvent, room::{ create::RoomCreateEventContent, @@ -41,23 +20,73 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, }, + GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - uint, + serde::Base64, + state_res::{self, Event, RoomVersion}, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; -use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use tokio::sync::{Mutex, MutexGuard, RwLock}; +use tracing::{debug, error, info, warn}; -use self::data::Data; -pub use self::data::PdusIterItem; +use super::state_compressor::CompressedStateEvent; use crate::{ - Dep, account_data, admin, appservice, - appservice::NamespaceRegex, - globals, pusher, rooms, - rooms::{short::ShortRoomId, state_compressor::CompressedState}, - sending, server_keys, users, + api::server_server, + service::{ + self, + appservice::NamespaceRegex, + pdu::{EventHash, PduBuilder}, + }, + services, utils, Error, PduEvent, Result, }; +#[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] +pub enum PduCount { + Backfilled(u64), + Normal(u64), +} + +impl PduCount { + pub fn min() -> Self { Self::Backfilled(u64::MAX) } + + pub fn max() -> Self { Self::Normal(u64::MAX) } + + pub fn try_from_string(token: &str) -> Result { + if let Some(stripped_token) = token.strip_prefix('-') { + stripped_token.parse().map(PduCount::Backfilled) + } else { + token.parse().map(PduCount::Normal) + } + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid pagination token.")) + } + + pub fn stringify(&self) -> String { + match self { + PduCount::Backfilled(x) => format!("-{x}"), + PduCount::Normal(x) => x.to_string(), + } + } +} + +impl PartialOrd for PduCount { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} + +impl Ord for PduCount { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (PduCount::Normal(s), PduCount::Normal(o)) => s.cmp(o), + (PduCount::Backfilled(s), PduCount::Backfilled(o)) => o.cmp(s), + (PduCount::Normal(_), PduCount::Backfilled(_)) => Ordering::Greater, + (PduCount::Backfilled(_), PduCount::Normal(_)) => Ordering::Less, + } + } +} + // Update Relationships #[derive(Deserialize)] struct ExtractRelatesTo { @@ -75,189 +104,111 @@ struct ExtractRelatesToEventId { relates_to: ExtractEventId, } -#[derive(Deserialize)] -struct ExtractBody { - body: Option, -} - pub struct Service { - services: Services, - db: Data, - pub mutex_insert: RoomMutexMap, -} + pub db: &'static dyn Data, -struct Services { - server: Arc, - account_data: Dep, - appservice: Dep, - admin: Dep, - alias: Dep, - globals: Dep, - short: Dep, - state: Dep, - state_cache: Dep, - state_accessor: Dep, - pdu_metadata: Dep, - read_receipt: Dep, - sending: Dep, - server_keys: Dep, - user: Dep, - users: Dep, - pusher: Dep, - threads: Dep, - search: Dep, - spaces: Dep, - event_handler: Dep, -} - -type RoomMutexMap = MutexMap; -pub type RoomMutexGuard = MutexMapGuard; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - server: args.server.clone(), - account_data: args.depend::("account_data"), - appservice: args.depend::("appservice"), - admin: args.depend::("admin"), - alias: args.depend::("rooms::alias"), - globals: args.depend::("globals"), - short: args.depend::("rooms::short"), - state: args.depend::("rooms::state"), - state_cache: args.depend::("rooms::state_cache"), - state_accessor: args - .depend::("rooms::state_accessor"), - pdu_metadata: args.depend::("rooms::pdu_metadata"), - read_receipt: args.depend::("rooms::read_receipt"), - sending: args.depend::("sending"), - server_keys: args.depend::("server_keys"), - user: args.depend::("rooms::user"), - users: args.depend::("users"), - pusher: args.depend::("pusher"), - threads: args.depend::("rooms::threads"), - search: args.depend::("rooms::search"), - spaces: args.depend::("rooms::spaces"), - event_handler: args - .depend::("rooms::event_handler"), - }, - db: Data::new(&args), - mutex_insert: RoomMutexMap::new(), - })) - } - - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - let mutex_insert = self.mutex_insert.len(); - writeln!(out, "insert_mutex: {mutex_insert}")?; - - Ok(()) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub lasttimelinecount_cache: Mutex>, } impl Service { - #[tracing::instrument(skip(self), level = "debug")] - pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result { - self.first_item_in_room(room_id).await.map(at!(1)) + #[tracing::instrument(skip(self))] + pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { + self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? + .next() + .map(|o| o.map(|(_, p)| Arc::new(p))) + .transpose() } - #[tracing::instrument(skip(self), level = "debug")] - pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, PduEvent)> { - let pdus = self.pdus(None, room_id, None); - - pin_mut!(pdus); - pdus.try_next() - .await? - .ok_or_else(|| err!(Request(NotFound("No PDU found in room")))) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { - self.db.latest_pdu_in_room(None, room_id).await - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn last_timeline_count( - &self, - sender_user: Option<&UserId>, - room_id: &RoomId, - ) -> Result { - self.db.last_timeline_count(sender_user, room_id).await + #[tracing::instrument(skip(self))] + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + self.db.last_timeline_count(sender_user, room_id) } /// Returns the `count` of this pdu's id. - pub async fn get_pdu_count(&self, event_id: &EventId) -> Result { - self.db.get_pdu_count(event_id).await + pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.db.get_pdu_count(event_id) } + + // TODO Is this the same as the function above? + /* + #[tracing::instrument(skip(self))] + pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.pduid_pdu + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map(|b| self.pdu_count(&b.0)) + .transpose() + .map(|op| op.unwrap_or_default()) + } + */ + + /// Returns the version of a room, if known + pub fn get_room_version(&self, room_id: &RoomId) -> Result> { + let create_event = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + + Ok(create_event_content.map(|content| content.room_version)) } /// Returns the json of a pdu. - pub async fn get_pdu_json(&self, event_id: &EventId) -> Result { - self.db.get_pdu_json(event_id).await + pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + self.db.get_pdu_json(event_id) } /// Returns the json of a pdu. - #[inline] - pub async fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result { - self.db.get_non_outlier_pdu_json(event_id).await + pub fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.db.get_non_outlier_pdu_json(event_id) } /// Returns the pdu's id. - #[inline] - pub async fn get_pdu_id(&self, event_id: &EventId) -> Result { - self.db.get_pdu_id(event_id).await + pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.db.get_pdu_id(event_id) } + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.db.get_non_outlier_pdu(event_id) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[inline] - pub async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { - self.db.get_non_outlier_pdu(event_id).await - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub async fn get_pdu(&self, event_id: &EventId) -> Result { - self.db.get_pdu(event_id).await - } + pub fn get_pdu(&self, event_id: &EventId) -> Result>> { self.db.get_pdu(event_id) } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { - self.db.get_pdu_from_id(pdu_id).await - } + pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.db.get_pdu_from_id(pdu_id) } /// Returns the pdu as a `BTreeMap`. - pub async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result { - self.db.get_pdu_json_from_id(pdu_id).await - } - - /// Checks if pdu exists - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn pdu_exists<'a>( - &'a self, - event_id: &'a EventId, - ) -> impl Future + Send + 'a { - self.db.pdu_exists(event_id).is_ok() + pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + self.db.get_pdu_json_from_id(pdu_id) } /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self), level = "debug")] - pub async fn replace_pdu( - &self, - pdu_id: &RawPduId, - pdu_json: &CanonicalJsonObject, - pdu: &PduEvent, - ) -> Result<()> { - self.db.replace_pdu(pdu_id, pdu_json, pdu).await + #[tracing::instrument(skip(self))] + pub fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> { + self.db.replace_pdu(pdu_id, pdu_json, pdu) } /// Creates a new persisted data unit and adds it to a room. @@ -266,26 +217,22 @@ impl Service { /// happens in `append_pdu`. /// /// Returns pdu id - #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu<'a, Leafs>( - &'a self, - pdu: &'a PduEvent, + #[tracing::instrument(skip(self, pdu, pdu_json, leaves))] + pub async fn append_pdu( + &self, + pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leafs: Leafs, - state_lock: &'a RoomMutexGuard, - ) -> Result - where - Leafs: Iterator + Send + 'a, - { + leaves: Vec, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result> { // Coalesce database writes for the remainder of this scope. - let _cork = self.db.db.cork_and_flush(); + let _cork = services().globals.db.cork_and_flush()?; - let shortroomid = self - .services + let shortroomid = services() + .rooms .short - .get_shortroomid(&pdu.room_id) - .await - .map_err(|_| err!(Database("Room does not exist")))?; + .get_shortroomid(&pdu.room_id)? + .expect("room exists"); // Make unsigned fields correct. This is not properly documented in the spec, // but state events need to have previous content in the unsigned field, so @@ -295,41 +242,27 @@ impl Service { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default())) { - if let Ok(shortstatehash) = self - .services + if let Some(shortstatehash) = services() + .rooms .state_accessor .pdu_shortstatehash(&pdu.event_id) - .await + .unwrap() { - if let Ok(prev_state) = self - .services + if let Some(prev_state) = services() + .rooms .state_accessor .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .await + .unwrap() { unsigned.insert( "prev_content".to_owned(), CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()).map_err( - |e| { - error!( - "Failed to convert prev_state to canonical JSON: {e}" - ); - Error::bad_database( - "Failed to convert prev_state to canonical JSON.", - ) - }, - )?, + utils::to_canonical_object(prev_state.content.clone()).map_err(|e| { + error!("Failed to convert prev_state to canonical JSON: {}", e); + Error::bad_database("Failed to convert prev_state to canonical JSON.") + })?, ), ); - unsigned.insert( - String::from("prev_sender"), - CanonicalJsonValue::String(prev_state.sender.to_string()), - ); - unsigned.insert( - String::from("replaces_state"), - CanonicalJsonValue::String(prev_state.event_id.to_string()), - ); } } } else { @@ -338,101 +271,129 @@ impl Service { } // We must keep track of all events that have been referenced. - self.services + services() + .rooms .pdu_metadata - .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); - - self.services + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services() + .rooms .state - .set_forward_extremities(&pdu.room_id, leafs, state_lock) - .await; + .set_forward_extremities(&pdu.room_id, leaves, state_lock)?; - let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .await + .entry(pdu.room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().await; - let count1 = self.services.globals.next_count().unwrap(); + let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if // appending fails - self.services + services() + .rooms .read_receipt - .private_read_set(&pdu.room_id, &pdu.sender, count1); - self.services + .private_read_set(&pdu.room_id, &pdu.sender, count1)?; + services() + .rooms .user - .reset_notification_counts(&pdu.sender, &pdu.room_id); + .reset_notification_counts(&pdu.sender, &pdu.room_id)?; - let count2 = PduCount::Normal(self.services.globals.next_count().unwrap()); - let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into(); + let count2 = services().globals.next_count()?; + let mut pdu_id = shortroomid.to_be_bytes().to_vec(); + pdu_id.extend_from_slice(&count2.to_be_bytes()); + + // https://spec.matrix.org/v1.9/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property + // For backwards-compatibility with older clients, + // servers should add a redacts property to the top level of m.room.redaction + // events in when serving such events over the Client-Server API. + if pdu.kind == TimelineEventType::RoomRedaction + && services().rooms.state.get_room_version(&pdu.room_id)? == RoomVersionId::V11 + { + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?; + + if let Some(redact_id) = &content.redacts { + pdu_json.insert("redacts".to_owned(), CanonicalJsonValue::String(redact_id.to_string())); + } + } // Insert pdu - self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await; + self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?; drop(insert_lock); - // See if the event matches any known pushers via power level - let power_levels: RoomPowerLevelsEventContent = self - .services + // See if the event matches any known pushers + let power_levels: RoomPowerLevelsEventContent = services() + .rooms .state_accessor - .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") - .await + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); - let mut push_target: HashSet<_> = self - .services - .state_cache - .active_local_users_in_room(&pdu.room_id) - .map(ToOwned::to_owned) - // Don't notify the sender of their own events, and dont send from ignored users - .ready_filter(|user| *user != pdu.sender) - .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, &recipient_user).await).then_some(recipient_user) }) - .collect() - .await; + let mut notifies = Vec::new(); + let mut highlights = Vec::new(); - let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1)); - let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1)); + let mut push_target = services() + .rooms + .state_cache + .get_our_real_users(&pdu.room_id)?; if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { - let target_user_id = UserId::parse(state_key)?; + let target_user_id = UserId::parse(state_key.clone()).expect("This state_key was previously validated"); - if self.services.users.is_active_local(target_user_id).await { - push_target.insert(target_user_id.to_owned()); + if !push_target.contains(&target_user_id) { + let mut target = push_target.as_ref().clone(); + target.insert(target_user_id); + push_target = Arc::new(target); } } } - for user in &push_target { - let rules_for_user = self - .services + for user in push_target.iter() { + // Don't notify the user of their own events + if user == &pdu.sender { + continue; + } + + let rules_for_user = services() .account_data - .get_global(user, GlobalAccountDataEventType::PushRules) - .await - .map_or_else( - |_| Ruleset::server_default(user), - |ev: PushRulesEvent| ev.content.global, - ); + .get(None, user, GlobalAccountDataEventType::PushRules.to_string().into())? + .map(|event| { + serde_json::from_str::(event.get()).map_err(|e| { + warn!("Invalid push rules event in db for user ID {user}: {e}"); + Error::bad_database("Invalid push rules event in db.") + }) + }) + .transpose()? + .map_or_else(|| Ruleset::server_default(user), |ev: PushRulesEvent| ev.content.global); let mut highlight = false; let mut notify = false; - for action in self - .services - .pusher - .get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id) - .await + for action in + services() + .pusher + .get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id)? { match action { - | Action::Notify => notify = true, - | Action::SetTweak(Tweak::Highlight(true)) => { + Action::Notify => notify = true, + Action::SetTweak(Tweak::Highlight(true)) => { highlight = true; }, - | _ => {}, - } - - // Break early if both conditions are true - if notify && highlight { - break; - } + _ => {}, + }; } if notify { @@ -443,148 +404,181 @@ impl Service { highlights.push(user.clone()); } - self.services - .pusher - .get_pushkeys(user) - .ready_for_each(|push_key| { - self.services - .sending - .send_pdu_push(&pdu_id, user, push_key.to_owned()) - .expect("TODO: replace with future"); - }) - .await; + for push_key in services().pusher.get_pushkeys(user) { + services().sending.send_pdu_push(&pdu_id, user, push_key?)?; + } } self.db - .increment_notification_counts(&pdu.room_id, notifies, highlights); + .increment_notification_counts(&pdu.room_id, notifies, highlights)?; match pdu.kind { - | TimelineEventType::RoomRedaction => { - use RoomVersionId::*; - - let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; + TimelineEventType::RoomRedaction => { + let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; match room_version_id { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + | RoomVersionId::V7 + | RoomVersionId::V8 + | RoomVersionId::V9 + | RoomVersionId::V10 => { if let Some(redact_id) = &pdu.redacts { - if self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - self.redact_pdu(redact_id, pdu, shortroomid).await?; - } + self.redact_pdu(redact_id, pdu)?; } }, - | _ => { - let content: RoomRedactionEventContent = pdu.get_content()?; + RoomVersionId::V11 => { + let content = + serde_json::from_str::(pdu.content.get()).map_err(|e| { + warn!("Invalid content in redaction pdu: {e}"); + Error::bad_database("Invalid content in redaction pdu.") + })?; if let Some(redact_id) = &content.redacts { - if self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - self.redact_pdu(redact_id, pdu, shortroomid).await?; - } + self.redact_pdu(redact_id, pdu)?; } }, - } + _ => { + warn!("Unexpected or unsupported room version {}", room_version_id); + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Unexpected or unsupported room version found", + )); + }, + }; }, - | TimelineEventType::SpaceChild => + TimelineEventType::SpaceChild => { if let Some(_state_key) = &pdu.state_key { - self.services + services() + .rooms .spaces .roomid_spacehierarchy_cache .lock() .await .remove(&pdu.room_id); - }, - | TimelineEventType::RoomMember => { + } + }, + TimelineEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { // if the state_key fails - let target_user_id = UserId::parse(state_key) - .expect("This state_key was previously validated"); + let target_user_id = + UserId::parse(state_key.clone()).expect("This state_key was previously validated"); - let content: RoomMemberEventContent = pdu.get_content()?; - let stripped_state = match content.membership { - | MembershipState::Invite | MembershipState::Knock => - self.services.state.summary_stripped(pdu).await.into(), - | _ => None, + let content = serde_json::from_str::(pdu.content.get()).map_err(|e| { + error!("Invalid room member event content in pdu: {e}"); + Error::bad_database("Invalid room member event content in pdu.") + })?; + + let invite_state = match content.membership { + MembershipState::Invite => { + let state = services().rooms.state.calculate_invite_state(pdu)?; + Some(state) + }, + _ => None, }; - // Update our membership info, we do this here incase a user is invited or - // knocked and immediately leaves we need the DB to record the invite or - // knock event for auth - self.services - .state_cache - .update_membership( - &pdu.room_id, - target_user_id, - content, - &pdu.sender, - stripped_state, - None, - true, - ) - .await?; + // Update our membership info, we do this here incase a user is invited + // and immediately leaves we need the DB to record the invite event for auth + services().rooms.state_cache.update_membership( + &pdu.room_id, + &target_user_id, + content, + &pdu.sender, + invite_state, + None, + true, + )?; } }, - | TimelineEventType::RoomMessage => { - let content: ExtractBody = pdu.get_content()?; - if let Some(body) = content.body { - self.services.search.index_pdu(shortroomid, &pdu_id, &body); + TimelineEventType::RoomMessage => { + #[derive(Deserialize)] + struct ExtractBody { + body: Option, + } - if self.services.admin.is_admin_command(pdu, &body).await { - self.services - .admin - .command(body, Some((*pdu.event_id).into()))?; + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if let Some(body) = content.body { + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, &body)?; + + let server_user = format!("@conduit:{}", services().globals.server_name()); + + let to_conduit = body.starts_with(&format!("{server_user}: ")) + || body.starts_with(&format!("{server_user} ")) + || body.starts_with("!admin") + || body == format!("{server_user}:") + || body == server_user; + + // This will evaluate to false if the emergency password is set up so that + // the administrator can execute commands as conduit + let from_conduit = pdu.sender == server_user && services().globals.emergency_password().is_none(); + + if let Some(admin_room) = service::admin::Service::get_admin_room()? { + if to_conduit && !from_conduit && admin_room == pdu.room_id { + services().admin.process_message(body, pdu.event_id.clone()); + } } } }, - | _ => {}, + _ => {}, } - if let Ok(content) = pdu.get_content::() { - if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { - self.services + if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + if let Some(related_pducount) = services() + .rooms + .timeline + .get_pdu_count(&content.relates_to.event_id)? + { + services() + .rooms .pdu_metadata - .add_relation(count2, related_pducount); + .add_relation(PduCount::Normal(count2), related_pducount)?; } } - if let Ok(content) = pdu.get_content::() { + if let Ok(content) = serde_json::from_str::(pdu.content.get()) { match content.relates_to { - | Relation::Reply { in_reply_to } => { + Relation::Reply { + in_reply_to, + } => { // We need to do it again here, because replies don't have // event_id as a top level field - if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await + if let Some(related_pducount) = services() + .rooms + .timeline + .get_pdu_count(&in_reply_to.event_id)? { - self.services + services() + .rooms .pdu_metadata - .add_relation(count2, related_pducount); + .add_relation(PduCount::Normal(count2), related_pducount)?; } }, - | Relation::Thread(thread) => { - self.services + Relation::Thread(thread) => { + services() + .rooms .threads - .add_to_thread(&thread.event_id, pdu) - .await?; + .add_to_thread(&thread.event_id, pdu)?; }, - | _ => {}, // TODO: Aggregate other types + _ => {}, // TODO: Aggregate other types } } - for appservice in self.services.appservice.read().await.values() { - if self - .services + for appservice in services().appservice.read().await.values() { + if services() + .rooms .state_cache - .appservice_in_room(&pdu.room_id, appservice) - .await + .appservice_in_room(&pdu.room_id, appservice)? { - self.services + services() .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; continue; } @@ -597,10 +591,10 @@ impl Service { .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) { let appservice_uid = appservice.registration.sender_localpart.as_str(); - if state_key_uid == &appservice_uid { - self.services + if state_key_uid == appservice_uid { + services() .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; continue; } } @@ -612,35 +606,36 @@ impl Service { && pdu .state_key .as_ref() - .is_some_and(|state_key| users.is_match(state_key)) + .map_or(false, |state_key| users.is_match(state_key)) }; - let matching_aliases = |aliases: NamespaceRegex| { - self.services + let matching_aliases = |aliases: &NamespaceRegex| { + services() + .rooms .alias .local_aliases_for_room(&pdu.room_id) - .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) + .filter_map(Result::ok) + .any(|room_alias| aliases.is_match(room_alias.as_str())) }; - if matching_aliases(appservice.aliases.clone()).await + if matching_aliases(&appservice.aliases) || appservice.rooms.is_match(pdu.room_id.as_str()) || matching_users(&appservice.users) { - self.services + services() .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; } } Ok(pdu_id) } - pub async fn create_hash_and_sign_event( + pub fn create_hash_and_sign_event( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, @@ -648,27 +643,25 @@ impl Service { unsigned, state_key, redacts, - timestamp, } = pdu_builder; - let prev_events: Vec = self - .services + let prev_events: Vec<_> = services() + .rooms .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id)? + .into_iter() .take(20) - .map(Into::into) - .collect() - .await; + .collect(); // If there was no create event yet, assume we are creating a room - let room_version_id = self - .services + let room_version_id = services() + .rooms .state .get_room_version(room_id) - .await .or_else(|_| { if event_type == TimelineEventType::RoomCreate { - let content: RoomCreateEventContent = serde_json::from_str(content.get())?; + let content = serde_json::from_str::(content.get()) + .expect("Invalid content in RoomCreate pdu."); Ok(content.room_version) } else { Err(Error::InconsistentRoomState( @@ -680,42 +673,36 @@ impl Service { let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - let auth_events = self - .services - .state - .get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content) - .await?; + let auth_events = + services() + .rooms + .state + .get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .stream() - .map(Ok) - .and_then(|event_id| self.get_pdu(event_id)) - .and_then(|pdu| future::ok(pdu.depth)) - .ignore_err() - .ready_fold(uint!(0), cmp::max) - .await - .saturating_add(uint!(1)); + .filter_map(|event_id| Some(services().rooms.timeline.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Ok(prev_pdu) = self - .services - .state_accessor - .room_state_get(room_id, &event_type.to_string().into(), state_key) - .await + if let Some(prev_pdu) = + services() + .rooms + .state_accessor + .room_state_get(room_id, &event_type.to_string().into(), state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender) - .expect("UserId::to_value always works"), + "prev_content".to_owned(), + serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), ); unsigned.insert( - "replaces_state".to_owned(), - serde_json::to_value(&prev_pdu.event_id).expect("EventId is valid json"), + "prev_sender".to_owned(), + serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), ); } } @@ -725,14 +712,9 @@ impl Service { room_id: room_id.to_owned(), sender: sender.to_owned(), origin: None, - origin_server_ts: timestamp.map_or_else( - || { - utils::millis_since_unix_epoch() - .try_into() - .expect("u64 fits into UInt") - }, - |ts| ts.get(), - ), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), kind: event_type, content, state_key, @@ -748,73 +730,81 @@ impl Service { } else { Some(to_raw_value(&unsigned).expect("to_raw_value always works")) }, - hashes: EventHash { sha256: "aaa".to_owned() }, + hashes: EventHash { + sha256: "aaa".to_owned(), + }, signatures: None, }; - let auth_fetch = |k: &StateEventType, s: &str| { - let key = (k.clone(), s.into()); - ready(auth_events.get(&key)) - }; - let auth_check = state_res::auth_check( &room_version, &pdu, - None, // TODO: third_party_invite - auth_fetch, + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) - .await - .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; + .map_err(|e| { + error!("Auth check failed: {:?}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed.") + })?; if !auth_check { - return Err!(Request(Forbidden("Event is not authorized."))); + return Err(Error::BadRequest(ErrorKind::forbidden(), "Event is not authorized.")); } // Hash and sign let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| { - err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}")))) + error!("Failed to convert PDU to canonical JSON: {}", e); + Error::bad_database("Failed to convert PDU to canonical JSON.") })?; // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { + RoomVersionId::V1 | RoomVersionId::V2 => {}, + _ => { pdu_json.remove("event_id"); }, - } + }; // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(self.services.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), + to_canonical_value(services().globals.server_name()).expect("server name is a valid CanonicalJsonValue"), ); - if let Err(e) = self - .services - .server_keys - .hash_and_sign_event(&mut pdu_json, &room_version_id) - { - return match e { - | Error::Signatures(ruma::signatures::Error::PduSize) => { - Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)"))) - }, - | _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))), - }; + match ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut pdu_json, + &room_version_id, + ) { + Ok(()) => {}, + Err(e) => { + return match e { + ruma::signatures::Error::PduSize => { + Err(Error::BadRequest(ErrorKind::TooLarge, "Message is too long")) + }, + _ => Err(Error::BadRequest(ErrorKind::Unknown, "Signing event failed")), + } + }, } // Generate event id - pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?; + pdu.event_id = EventId::parse_arc(format!( + "${}", + ruma::signatures::reference_hash(&pdu_json, &room_version_id).expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); - pdu_json - .insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into())); + pdu_json.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), + ); // Generate short event id - let _shorteventid = self - .services + let _shorteventid = services() + .rooms .short - .get_or_create_shorteventid(&pdu.event_id) - .await; + .get_or_create_shorteventid(&pdu.event_id)?; Ok((pdu, pdu_json)) } @@ -822,81 +812,97 @@ impl Service { /// Creates a new persisted data unit and adds it to a room. This function /// takes a roomid_mutex_state, meaning that only this function is able to /// mutate the room state. - #[tracing::instrument(skip(self, state_lock), level = "debug")] + #[tracing::instrument(skip(self, state_lock))] pub async fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - state_lock: &RoomMutexGuard, - ) -> Result { - let (pdu, pdu_json) = self - .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) - .await?; + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result> { + let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; - if self.services.admin.is_admin_room(&pdu.room_id).await { - self.check_pdu_for_admin_room(&pdu, sender).boxed().await?; - } + if let Some(admin_room) = service::admin::Service::get_admin_room()? { + if admin_room == room_id { + match pdu.event_type() { + TimelineEventType::RoomEncryption => { + warn!("Encryption is not allowed in the admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Encryption is not allowed in the admins room.", + )); + }, + TimelineEventType::RoomMember => { + let target = pdu + .state_key() + .filter(|v| v.starts_with('@')) + .unwrap_or(sender.as_str()); + let server_name = services().globals.server_name(); + let server_user = format!("@conduit:{server_name}"); + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - // If redaction event is not authorized, do not append it to the timeline - if pdu.kind == TimelineEventType::RoomRedaction { - use RoomVersionId::*; - match self.services.state.get_room_version(&pdu.room_id).await? { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &pdu.redacts { - if !self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - return Err!(Request(Forbidden("User cannot redact this event."))); + if content.membership == MembershipState::Leave { + if target == server_user { + warn!("Conduit user cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Conduit user cannot leave from admins room.", + )); + } + + let count = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|m| m.server_name() == server_name) + .filter(|m| m != target) + .count(); + if count < 2 { + warn!("Last admin cannot leave from admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Last admin cannot leave from admins room.", + )); + } } - } - }, - | _ => { - let content: RoomRedactionEventContent = pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - if !self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - return Err!(Request(Forbidden("User cannot redact this event."))); + + if content.membership == MembershipState::Ban && pdu.state_key().is_some() { + if target == server_user { + warn!("Conduit user cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Conduit user cannot be banned in admins room.", + )); + } + + let count = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|m| m.server_name() == server_name) + .filter(|m| m != target) + .count(); + if count < 2 { + warn!("Last admin cannot be banned in admins room"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Last admin cannot be banned in admins room.", + )); + } } - } - }, - } - } - - if pdu.kind == TimelineEventType::RoomMember { - let content: RoomMemberEventContent = pdu.get_content()?; - - if content.join_authorized_via_users_server.is_some() - && content.membership != MembershipState::Join - { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } - - if content - .join_authorized_via_users_server - .as_ref() - .is_some_and(|authorising_user| { - !self.services.globals.user_is_local(authorising_user) - }) { - return Err!(Request(InvalidParam( - "Authorising user does not belong to this homeserver" - ))); + }, + _ => {}, + } } } // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. - let statehashid = self.services.state.append_to_state(&pdu).await?; + let statehashid = services().rooms.state.append_to_state(&pdu)?; let pdu_id = self .append_pdu( @@ -904,25 +910,24 @@ impl Service { pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - once(pdu.event_id.borrow()), + vec![(*pdu.event_id).to_owned()], state_lock, ) - .boxed() .await?; // We set the room state after inserting the pdu, so that we never have a moment // in time where events in the current room state do not exist - self.services + services() + .rooms .state - .set_room_state(&pdu.room_id, statehashid, state_lock); + .set_room_state(room_id, statehashid, state_lock)?; - let mut servers: HashSet = self - .services + let mut servers: HashSet = services() + .rooms .state_cache - .room_servers(&pdu.room_id) - .map(ToOwned::to_owned) - .collect() - .await; + .room_servers(room_id) + .filter_map(Result::ok) + .collect(); // In case we are kicking or banning a user, we need to inform their server of // the change @@ -938,233 +943,210 @@ impl Service { // Remove our server from the server list since it will be added to it by // room_servers() and/or the if statement above - servers.remove(self.services.globals.server_name()); + servers.remove(services().globals.server_name()); - self.services + services() .sending - .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) - .await?; + .send_pdu_servers(servers.into_iter(), &pdu_id)?; Ok(pdu.event_id) } /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. - #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu<'a, Leafs>( - &'a self, - pdu: &'a PduEvent, + #[tracing::instrument(skip_all)] + pub async fn append_incoming_pdu( + &self, + pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leafs: Leafs, - state_ids_compressed: Arc, + new_room_leaves: Vec, + state_ids_compressed: Arc>, soft_fail: bool, - state_lock: &'a RoomMutexGuard, - ) -> Result> - where - Leafs: Iterator + Send + 'a, - { + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. - self.services + services() + .rooms .state - .set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed) - .await?; + .set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed)?; if soft_fail { - self.services + services() + .rooms .pdu_metadata - .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); - - self.services + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services() + .rooms .state - .set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock) - .await; - + .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)?; return Ok(None); } - let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leafs, state_lock) + let pdu_id = services() + .rooms + .timeline + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) .await?; Ok(Some(pdu_id)) } - /// Returns an iterator over all PDUs in a room. Unknown rooms produce no - /// items. - #[inline] + /// Returns an iterator over all PDUs in a room. pub fn all_pdus<'a>( - &'a self, - user_id: &'a UserId, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - self.pdus(Some(user_id), room_id, None).ignore_err() + &'a self, user_id: &UserId, room_id: &RoomId, + ) -> Result> + 'a> { + self.pdus_after(user_id, room_id, PduCount::min()) } - /// Reverse iteration starting at from. - #[tracing::instrument(skip(self), level = "debug")] - pub fn pdus_rev<'a>( - &'a self, - user_id: Option<&'a UserId>, - room_id: &'a RoomId, - until: Option, - ) -> impl Stream> + Send + 'a { - self.db - .pdus_rev(user_id, room_id, until.unwrap_or_else(PduCount::max)) + /// Returns an iterator over all events and their tokens in a room that + /// happened before the event with id `until` in reverse-chronological + /// order. + #[tracing::instrument(skip(self))] + pub fn pdus_until<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, until: PduCount, + ) -> Result> + 'a> { + self.db.pdus_until(user_id, room_id, until) } - /// Forward iteration starting at from. - #[tracing::instrument(skip(self), level = "debug")] - pub fn pdus<'a>( - &'a self, - user_id: Option<&'a UserId>, - room_id: &'a RoomId, - from: Option, - ) -> impl Stream> + Send + 'a { - self.db - .pdus(user_id, room_id, from.unwrap_or_else(PduCount::min)) + /// Returns an iterator over all events and their token in a room that + /// happened after the event with id `from` in chronological order. + #[tracing::instrument(skip(self))] + pub fn pdus_after<'a>( + &'a self, user_id: &UserId, room_id: &RoomId, from: PduCount, + ) -> Result> + 'a> { + self.db.pdus_after(user_id, room_id, from) } /// Replace a PDU with the redacted form. - #[tracing::instrument(name = "redact", level = "debug", skip(self))] - pub async fn redact_pdu( - &self, - event_id: &EventId, - reason: &PduEvent, - shortroomid: ShortRoomId, - ) -> Result { + #[tracing::instrument(skip(self, reason))] + pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { // TODO: Don't reserialize, keep original json - let Ok(pdu_id) = self.get_pdu_id(event_id).await else { - // If event does not exist, just noop - return Ok(()); - }; - - let mut pdu = self.get_pdu_from_id(&pdu_id).await.map_err(|e| { - err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))) - })?; - - if let Ok(content) = pdu.get_content::() { - if let Some(body) = content.body { - self.services - .search - .deindex_pdu(shortroomid, &pdu_id, &body); - } + if let Some(pdu_id) = self.get_pdu_id(event_id)? { + let mut pdu = self + .get_pdu_from_id(&pdu_id)? + .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; + let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; + pdu.redact(room_version_id, reason)?; + self.replace_pdu( + &pdu_id, + &utils::to_canonical_object(&pdu).map_err(|e| { + error!("Failed to convert PDU to canonical JSON: {}", e); + Error::bad_database("Failed to convert PDU to canonical JSON.") + })?, + &pdu, + )?; } - - let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; - - pdu.redact(&room_version_id, reason)?; - - let obj = utils::to_canonical_object(&pdu).map_err(|e| { - err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))) - })?; - - self.replace_pdu(&pdu_id, &obj, &pdu).await + // If event does not exist, just noop + Ok(()) } - #[tracing::instrument(name = "backfill", level = "debug", skip(self))] + #[tracing::instrument(skip(self, room_id))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { - if self - .services - .state_cache - .room_joined_count(room_id) - .await - .is_ok_and(|count| count <= 1) - && !self - .services - .state_accessor - .is_world_readable(room_id) - .await - { - // Room is empty (1 user or none), there is no one that can backfill - return Ok(()); - } - let first_pdu = self - .first_item_in_room(room_id) - .await - .expect("Room is not empty"); + .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? + .next() + .expect("Room is not empty")?; if first_pdu.0 < from { // No backfill required, there are still events between them return Ok(()); } - let power_levels: RoomPowerLevelsEventContent = self - .services + let mut servers: Vec = vec![]; + + // add server names of any trusted key servers if they're in the room + servers.extend( + services() + .rooms + .state_cache + .room_servers(room_id) + .filter_map(Result::ok) + .filter(|server| services().globals.trusted_servers().contains(server)) + .filter(|server| server != services().globals.server_name()), + ); + + // add server names from room aliases on the room ID + let room_aliases = services() + .rooms + .alias + .local_aliases_for_room(room_id) + .collect::, _>>(); + if let Ok(aliases) = &room_aliases { + for alias in aliases { + if alias.server_name() != services().globals.server_name() { + servers.push(alias.server_name().to_owned()); + } + } + } + + // add room ID server name for backfill server + if let Some(server) = room_id.server_name() { + if server != services().globals.server_name() { + servers.push(server.to_owned()); + } + } + + let power_levels: RoomPowerLevelsEventContent = services() + .rooms .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") - .await + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? .unwrap_or_default(); - let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| { - if level > &power_levels.users_default - && !self.services.globals.user_is_local(user_id) - { - Some(user_id.server_name()) - } else { - None - } - }); + // add server names of the list of admins in the room for backfill server + servers.extend( + power_levels + .users + .iter() + .filter(|(_, level)| **level > power_levels.users_default) + .map(|(user_id, _)| user_id.server_name()) + .filter(|server| server != &services().globals.server_name()) + .map(ToOwned::to_owned), + ); - let canonical_room_alias_server = once( - self.services - .state_accessor - .get_canonical_alias(room_id) - .await, - ) - .filter_map(Result::ok) - .map(|alias| alias.server_name().to_owned()) - .stream(); + // don't backfill from ourselves (might be noop if we checked it above already) + if let Some(server_index) = servers + .clone() + .into_iter() + .position(|server| server == services().globals.server_name()) + { + servers.remove(server_index); + } - let mut servers = room_mods - .stream() - .map(ToOwned::to_owned) - .chain(canonical_room_alias_server) - .chain( - self.services - .server - .config - .trusted_servers - .iter() - .map(ToOwned::to_owned) - .stream(), - ) - .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)) - .filter_map(|server_name| async move { - self.services - .state_cache - .server_in_room(&server_name, room_id) - .await - .then_some(server_name) - }) - .boxed(); + servers.sort_unstable(); + servers.dedup(); + servers.shuffle(&mut rand::thread_rng()); - while let Some(ref backfill_server) = servers.next().await { + for backfill_server in servers { info!("Asking {backfill_server} for backfill"); - let response = self - .services + let response = services() .sending .send_federation_request( - backfill_server, + &backfill_server, federation::backfill::get_backfill::v1::Request { room_id: room_id.to_owned(), - v: vec![first_pdu.1.event_id.clone()], + v: vec![first_pdu.1.event_id.as_ref().to_owned()], limit: uint!(100), }, ) .await; match response { - | Ok(response) => { + Ok(response) => { + let pub_key_map = RwLock::new(BTreeMap::new()); for pdu in response.pdus { - if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { - debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}"); + if let Err(e) = self.backfill_pdu(&backfill_server, pdu, &pub_key_map).await { + warn!("Failed to add backfilled pdu in room {room_id}: {e}"); } } return Ok(()); }, - | Err(e) => { + Err(e) => { warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); }, } @@ -1174,56 +1156,87 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(self, pdu), level = "debug")] - pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { - let (room_id, event_id, value) = - self.services.event_handler.parse_incoming_pdu(&pdu).await?; + #[tracing::instrument(skip(self, pdu, pub_key_map))] + pub async fn backfill_pdu( + &self, origin: &ServerName, pdu: Box, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; // Lock so we cannot backfill the same pdu twice at the same time - let mutex_lock = self - .services - .event_handler - .mutex_federation - .lock(&room_id) - .await; + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; // Skip the PDU if we already have it as a timeline event - if let Ok(pdu_id) = self.get_pdu_id(&event_id).await { - debug!("We already know {event_id} at {pdu_id:?}"); + if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(&event_id)? { + info!("We already know {event_id} at {pdu_id:?}"); return Ok(()); } - self.services + services() + .rooms .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, false) - .boxed() + .fetch_required_signing_keys([&value], pub_key_map) .await?; - let value = self.get_pdu_json(&event_id).await?; + services() + .rooms + .event_handler + .handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map) + .await?; - let pdu = self.get_pdu(&event_id).await?; + let value = self.get_pdu_json(&event_id)?.expect("We just created it"); + let pdu = self.get_pdu(&event_id)?.expect("We just created it"); - let shortroomid = self.services.short.get_shortroomid(&room_id).await?; + let shortroomid = services() + .rooms + .short + .get_shortroomid(&room_id)? + .expect("room exists"); - let insert_lock = self.mutex_insert.lock(&room_id).await; + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .await + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().await; - let count: i64 = self.services.globals.next_count().unwrap().try_into()?; - - let pdu_id: RawPduId = PduId { - shortroomid, - shorteventid: PduCount::Backfilled(validated!(0 - count)), - } - .into(); + let count = services().globals.next_count()?; + let mut pdu_id = shortroomid.to_be_bytes().to_vec(); + pdu_id.extend_from_slice(&0_u64.to_be_bytes()); + pdu_id.extend_from_slice(&(u64::MAX - count).to_be_bytes()); // Insert pdu - self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value); + self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?; drop(insert_lock); if pdu.kind == TimelineEventType::RoomMessage { - let content: ExtractBody = pdu.get_content()?; + #[derive(Deserialize)] + struct ExtractBody { + body: Option, + } + + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + if let Some(body) = content.body { - self.services.search.index_pdu(shortroomid, &pdu_id, &body); + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, &body)?; } } drop(mutex_lock); @@ -1232,76 +1245,15 @@ impl Service { Ok(()) } } +#[cfg(test)] +mod tests { + use super::*; -#[implement(Service)] -#[tracing::instrument(skip_all, level = "debug")] -async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Result<()> { - match &pdu.kind { - | TimelineEventType::RoomEncryption => { - return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); - }, - | TimelineEventType::RoomMember => { - let target = pdu - .state_key() - .filter(|v| v.starts_with('@')) - .unwrap_or(sender.as_str()); - - let server_user = &self.services.globals.server_user.to_string(); - - let content: RoomMemberEventContent = pdu.get_content()?; - match content.membership { - | MembershipState::Leave => { - if target == server_user { - return Err!(Request(Forbidden(error!( - "Server user cannot leave the admins room." - )))); - } - - let count = self - .services - .state_cache - .room_members(&pdu.room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .ready_filter(|user| *user != target) - .boxed() - .count() - .await; - - if count < 2 { - return Err!(Request(Forbidden(error!( - "Last admin cannot leave the admins room." - )))); - } - }, - - | MembershipState::Ban if pdu.state_key().is_some() => { - if target == server_user { - return Err!(Request(Forbidden(error!( - "Server cannot be banned from admins room." - )))); - } - - let count = self - .services - .state_cache - .room_members(&pdu.room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .ready_filter(|user| *user != target) - .boxed() - .count() - .await; - - if count < 2 { - return Err!(Request(Forbidden(error!( - "Last admin cannot be banned from admins room." - )))); - } - }, - | _ => {}, - } - }, - | _ => {}, + #[test] + fn comparisons() { + assert!(PduCount::Normal(1) < PduCount::Normal(2)); + assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1)); + assert!(PduCount::Normal(1) > PduCount::Backfilled(1)); + assert!(PduCount::Backfilled(1) < PduCount::Normal(1)); } - - Ok(()) } diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index a81ee95c..6d98937f 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -1,63 +1,27 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; -use conduwuit::{ - Result, Server, debug_info, trace, - utils::{self, IterStream}, -}; -use futures::StreamExt; use ruma::{ - OwnedRoomId, OwnedUserId, RoomId, UserId, api::federation::transactions::edu::{Edu, TypingContent}, events::SyncEphemeralRoomEvent, + OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use tokio::sync::{RwLock, broadcast}; +use tokio::sync::{broadcast, RwLock}; +use tracing::debug; -use crate::{Dep, globals, sending, sending::EduBuf, users}; +use crate::{services, utils, Result}; pub struct Service { - server: Arc, - services: Services, - /// u64 is unix timestamp of timeout - pub typing: RwLock>>, - /// timestamp of the last change to typing users - pub last_typing_update: RwLock>, + pub typing: RwLock>>, // u64 is unix timestamp of timeout + pub last_typing_update: RwLock>, /* timestamp of the last change to typing + * users */ pub typing_update_sender: broadcast::Sender, } -struct Services { - globals: Dep, - sending: Dep, - users: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - server: args.server.clone(), - services: Services { - globals: args.depend::("globals"), - sending: args.depend::("sending"), - users: args.depend::("users"), - }, - typing: RwLock::new(BTreeMap::new()), - last_typing_update: RwLock::new(BTreeMap::new()), - typing_update_sender: broadcast::channel(100).0, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - impl Service { /// Sets a user as typing until the timeout timestamp is reached or /// roomtyping_remove is called. - pub async fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { - debug_info!("typing started {user_id:?} in {room_id:?} timeout:{timeout:?}"); + pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { + debug!("typing add {:?} in {:?} timeout:{:?}", user_id, room_id, timeout); // update clients self.typing .write() @@ -65,19 +29,15 @@ impl Service { .entry(room_id.to_owned()) .or_default() .insert(user_id.to_owned(), timeout); - self.last_typing_update .write() .await - .insert(room_id.to_owned(), self.services.globals.next_count()?); - - if self.typing_update_sender.send(room_id.to_owned()).is_err() { - trace!("receiver found what it was looking for and is no longer interested"); - } + .insert(room_id.to_owned(), services().globals.next_count()?); + _ = self.typing_update_sender.send(room_id.to_owned()); // update federation - if self.services.globals.user_is_local(user_id) { - self.federation_send(room_id, user_id, true).await?; + if user_id.server_name() == services().globals.server_name() { + self.federation_send(room_id, user_id, true)?; } Ok(()) @@ -85,7 +45,7 @@ impl Service { /// Removes a user from typing before the timeout is reached. pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - debug_info!("typing stopped {user_id:?} in {room_id:?}"); + debug!("typing remove {:?} in {:?}", user_id, room_id); // update clients self.typing .write() @@ -93,31 +53,29 @@ impl Service { .entry(room_id.to_owned()) .or_default() .remove(user_id); - self.last_typing_update .write() .await - .insert(room_id.to_owned(), self.services.globals.next_count()?); - - if self.typing_update_sender.send(room_id.to_owned()).is_err() { - trace!("receiver found what it was looking for and is no longer interested"); - } + .insert(room_id.to_owned(), services().globals.next_count()?); + _ = self.typing_update_sender.send(room_id.to_owned()); // update federation - if self.services.globals.user_is_local(user_id) { - self.federation_send(room_id, user_id, false).await?; + if user_id.server_name() == services().globals.server_name() { + self.federation_send(room_id, user_id, false)?; } Ok(()) } - pub async fn wait_for_update(&self, room_id: &RoomId) { + pub async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> { let mut receiver = self.typing_update_sender.subscribe(); while let Ok(next) = receiver.recv().await { if next == room_id { break; } } + + Ok(()) } /// Makes sure that typing events with old timestamps get removed. @@ -136,30 +94,28 @@ impl Service { removable.push(user.clone()); } } + + drop(typing); }; if !removable.is_empty() { let typing = &mut self.typing.write().await; let room = typing.entry(room_id.to_owned()).or_default(); for user in &removable { - debug_info!("typing timeout {user:?} in {room_id:?}"); + debug!("typing maintain remove {:?} in {:?}", &user, room_id); room.remove(user); } - // update clients self.last_typing_update .write() .await - .insert(room_id.to_owned(), self.services.globals.next_count()?); - - if self.typing_update_sender.send(room_id.to_owned()).is_err() { - trace!("receiver found what it was looking for and is no longer interested"); - } + .insert(room_id.to_owned(), services().globals.next_count()?); + _ = self.typing_update_sender.send(room_id.to_owned()); // update federation - for user in &removable { - if self.services.globals.user_is_local(user) { - self.federation_send(room_id, user, false).await?; + for user in removable { + if user.server_name() == services().globals.server_name() { + self.federation_send(room_id, &user, false)?; } } } @@ -181,59 +137,35 @@ impl Service { /// Returns a new typing EDU. pub async fn typings_all( - &self, - room_id: &RoomId, - sender_user: &UserId, + &self, room_id: &RoomId, ) -> Result> { - let room_typing_indicators = self.typing.read().await.get(room_id).cloned(); - - let Some(typing_indicators) = room_typing_indicators else { - return Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { user_ids: Vec::new() }, - }); - }; - - let user_ids: Vec<_> = typing_indicators - .into_keys() - .stream() - .filter_map(|typing_user_id| async move { - (!self - .services - .users - .user_is_ignored(&typing_user_id, sender_user) - .await) - .then_some(typing_user_id) - }) - .collect() - .await; - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { user_ids }, + content: ruma::events::typing::TypingEventContent { + user_ids: self + .typing + .read() + .await + .get(room_id) + .map(|m| m.keys().cloned().collect()) + .unwrap_or_default(), + }, }) } - async fn federation_send( - &self, - room_id: &RoomId, - user_id: &UserId, - typing: bool, - ) -> Result<()> { + fn federation_send(&self, room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> { debug_assert!( - self.services.globals.user_is_local(user_id), + user_id.server_name() == services().globals.server_name(), "tried to broadcast typing status of remote user", ); - - if !self.server.config.allow_outgoing_typing { + if !services().globals.config.allow_outgoing_typing { return Ok(()); } - let content = TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing); - let edu = Edu::Typing(content); + let edu = Edu::Typing(TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing)); - let mut buf = EduBuf::new(); - serde_json::to_writer(&mut buf, &edu).expect("Serialized Edu::Typing"); - - self.services.sending.send_edu_room(room_id, buf).await?; + services() + .sending + .send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing"))?; Ok(()) } diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs new file mode 100644 index 00000000..2fd1c29e --- /dev/null +++ b/src/service/rooms/user/data.rs @@ -0,0 +1,22 @@ +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + // Returns the count at which the last reset_notification_counts was called + fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) -> Result<()>; + + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result>; + + fn get_shared_rooms<'a>( + &'a self, users: Vec, + ) -> Result> + 'a>>; +} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index bd76f1f4..e1741782 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,129 +1,41 @@ -use std::sync::Arc; +mod data; -use conduwuit::{Result, implement}; -use database::{Database, Deserialized, Map}; -use ruma::{RoomId, UserId}; +pub use data::Data; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; -use crate::{Dep, globals, rooms, rooms::short::ShortStateHash}; +use crate::Result; pub struct Service { - db: Data, - services: Services, + pub db: &'static dyn Data, } -struct Data { - db: Arc, - userroomid_notificationcount: Arc, - userroomid_highlightcount: Arc, - roomuserid_lastnotificationread: Arc, - roomsynctoken_shortstatehash: Arc, -} - -struct Services { - globals: Dep, - short: Dep, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - db: args.db.clone(), - userroomid_notificationcount: args.db["userroomid_notificationcount"].clone(), - userroomid_highlightcount: args.db["userroomid_highlightcount"].clone(), - roomuserid_lastnotificationread: args.db["userroomid_highlightcount"].clone(), - roomsynctoken_shortstatehash: args.db["roomsynctoken_shortstatehash"].clone(), - }, - - services: Services { - globals: args.depend::("globals"), - short: args.depend::("rooms::short"), - }, - })) +impl Service { + pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + self.db.reset_notification_counts(user_id, room_id) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - self.db.userroomid_highlightcount.put(userroom_id, 0_u64); - self.db.userroomid_notificationcount.put(userroom_id, 0_u64); - - let roomuser_id = (room_id, user_id); - let count = self.services.globals.next_count().unwrap(); - self.db - .roomuserid_lastnotificationread - .put(roomuser_id, count); -} - -#[implement(Service)] -pub async fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - let key = (user_id, room_id); - self.db - .userroomid_notificationcount - .qry(&key) - .await - .deserialized() - .unwrap_or(0) -} - -#[implement(Service)] -pub async fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - let key = (user_id, room_id); - self.db - .userroomid_highlightcount - .qry(&key) - .await - .deserialized() - .unwrap_or(0) -} - -#[implement(Service)] -pub async fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> u64 { - let key = (room_id, user_id); - self.db - .roomuserid_lastnotificationread - .qry(&key) - .await - .deserialized() - .unwrap_or(0) -} - -#[implement(Service)] -pub async fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: ShortStateHash, -) { - let shortroomid = self - .services - .short - .get_shortroomid(room_id) - .await - .expect("room exists"); - - let _cork = self.db.db.cork(); - let key: &[u64] = &[shortroomid, token]; - self.db - .roomsynctoken_shortstatehash - .put(key, shortstatehash); -} - -#[implement(Service)] -pub async fn get_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, -) -> Result { - let shortroomid = self.services.short.get_shortroomid(room_id).await?; - - let key: &[u64] = &[shortroomid, token]; - self.db - .roomsynctoken_shortstatehash - .qry(key) - .await - .deserialized() + pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.notification_count(user_id, room_id) + } + + pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.highlight_count(user_id, room_id) + } + + pub fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.last_notification_read(user_id, room_id) + } + + pub fn associate_token_shortstatehash(&self, room_id: &RoomId, token: u64, shortstatehash: u64) -> Result<()> { + self.db + .associate_token_shortstatehash(room_id, token, shortstatehash) + } + + pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + self.db.get_token_shortstatehash(room_id, token) + } + + pub fn get_shared_rooms(&self, users: Vec) -> Result>> { + self.db.get_shared_rooms(users) + } } diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index c7fae11f..20155c5f 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -1,53 +1,41 @@ -use std::{fmt::Debug, mem}; +use std::{fmt::Debug, mem, time::Duration}; use bytes::BytesMut; -use conduwuit::{Err, Result, debug_error, err, trace, utils, warn}; -use reqwest::Client; -use ruma::api::{ - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration, -}; +use ruma::api::{appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; +use tracing::warn; + +use crate::{services, utils, Error, Result}; /// Sends a request to an appservice /// -/// Only returns Ok(None) if there is no url specified in the appservice +/// Only returns None if there is no url specified in the appservice /// registration file -pub(crate) async fn send_request( - client: &Client, - registration: Registration, - request: T, -) -> Result> +pub(crate) async fn send_request(registration: Registration, request: T) -> Result> where - T: OutgoingRequest + Debug + Send, + T: OutgoingRequest + Debug, { - const VERSIONS: [MatrixVersion; 1] = [MatrixVersion::V1_7]; - - let Some(dest) = registration.url else { + let Some(destination) = registration.url else { return Ok(None); }; - if dest == *"null" || dest.is_empty() { - return Ok(None); - } - - trace!("Appservice URL \"{dest}\", Appservice ID: {}", registration.id); - let hs_token = registration.hs_token.as_str(); + let mut http_request = request - .try_into_http_request::( - &dest, - SendAccessToken::IfRequired(hs_token), - &VERSIONS, - ) + .try_into_http_request::(&destination, SendAccessToken::IfRequired(hs_token), &[MatrixVersion::V1_0]) .map_err(|e| { - err!(BadServerResponse( - warn!(appservice = %registration.id, "Failed to find destination {dest}: {e:?}") - )) - })? + warn!("Failed to find destination {}: {}", destination, e); + Error::BadServerResponse("Invalid destination") + }) + .unwrap() .map(BytesMut::freeze); let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); - let symbol = if old_path_and_query.contains('?') { "&" } else { "?" }; + let symbol = if old_path_and_query.contains('?') { + "&" + } else { + "?" + }; parts.path_and_query = Some( (old_path_and_query + symbol + "access_token=" + hs_token) @@ -56,12 +44,25 @@ where ); *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); - let reqwest_request = reqwest::Request::try_from(http_request)?; + let mut reqwest_request = reqwest::Request::try_from(http_request)?; - let mut response = client.execute(reqwest_request).await.map_err(|e| { - warn!("Could not send request to appservice \"{}\" at {dest}: {e:?}", registration.id); - e - })?; + *reqwest_request.timeout_mut() = Some(Duration::from_secs(120)); + + let url = reqwest_request.url().clone(); + + let mut response = services() + .globals + .client + .appservice + .execute(reqwest_request) + .await + .map_err(|e| { + warn!( + "Could not send request to appservice {} at {}: {}", + registration.id, destination, e + ); + e + })?; // reqwest::Response -> http::Response conversion let status = response.status(); @@ -75,14 +76,19 @@ where .expect("http::response::Builder is usable"), ); - let body = response.bytes().await?; // TODO: handle timeout + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error: {}", e); + Vec::new().into() + }); // TODO: handle timeout if !status.is_success() { - debug_error!("Appservice response bytes: {:?}", utils::string_from_bytes(&body)); - return Err!(BadServerResponse(warn!( - "Appservice \"{}\" returned unsuccessful HTTP response {status} at {dest}", - registration.id - ))); + warn!( + "Appservice returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + utils::string_from_bytes(&body) + ); } let response = T::IncomingResponse::try_from_http_response( @@ -91,10 +97,8 @@ where .expect("reqwest body is valid http body"), ); - response.map(Some).map_err(|e| { - err!(BadServerResponse(warn!( - "Appservice \"{}\" returned invalid/malformed response bytes {dest}: {e}", - registration.id - ))) + response.map(Some).map_err(|_| { + warn!("Appservice returned invalid response bytes {}\n{}", destination, url); + Error::BadServerResponse("Server returned bad response.") }) } diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index a6bcc2b2..d5b0923e 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,258 +1,22 @@ -use std::{fmt::Debug, sync::Arc}; +use ruma::ServerName; -use conduwuit::{ - Error, Result, at, utils, - utils::{ReadyExt, stream::TryIgnore}, -}; -use database::{Database, Deserialized, Map}; -use futures::{Stream, StreamExt}; -use ruma::{OwnedServerName, ServerName, UserId}; +use super::{OutgoingDestination, SendingEventType}; +use crate::Result; -use super::{Destination, SendingEvent}; -use crate::{Dep, globals}; +type OutgoingSendingIter<'a> = Box, OutgoingDestination, SendingEventType)>> + 'a>; +type SendingEventTypeIter<'a> = Box, SendingEventType)>> + 'a>; -pub(super) type OutgoingItem = (Key, SendingEvent, Destination); -pub(super) type SendingItem = (Key, SendingEvent); -pub(super) type QueueItem = (Key, SendingEvent); -pub(super) type Key = Vec; - -pub struct Data { - servercurrentevent_data: Arc, - servernameevent_data: Arc, - servername_educount: Arc, - pub(super) db: Arc, - services: Services, -} - -struct Services { - globals: Dep, -} - -impl Data { - pub(super) fn new(args: &crate::Args<'_>) -> Self { - let db = &args.db; - Self { - servercurrentevent_data: db["servercurrentevent_data"].clone(), - servernameevent_data: db["servernameevent_data"].clone(), - servername_educount: db["servername_educount"].clone(), - db: args.db.clone(), - services: Services { - globals: args.depend::("globals"), - }, - } - } - - pub(super) fn delete_active_request(&self, key: &[u8]) { - self.servercurrentevent_data.remove(key); - } - - pub(super) async fn delete_all_active_requests_for(&self, destination: &Destination) { - let prefix = destination.get_prefix(); - self.servercurrentevent_data - .raw_keys_prefix(&prefix) - .ignore_err() - .ready_for_each(|key| self.servercurrentevent_data.remove(key)) - .await; - } - - pub(super) async fn delete_all_requests_for(&self, destination: &Destination) { - let prefix = destination.get_prefix(); - self.servercurrentevent_data - .raw_keys_prefix(&prefix) - .ignore_err() - .ready_for_each(|key| self.servercurrentevent_data.remove(key)) - .await; - - self.servernameevent_data - .raw_keys_prefix(&prefix) - .ignore_err() - .ready_for_each(|key| self.servernameevent_data.remove(key)) - .await; - } - - pub(super) fn mark_as_active<'a, I>(&self, events: I) - where - I: Iterator, - { - events - .filter(|(key, _)| !key.is_empty()) - .for_each(|(key, val)| { - let val = if let SendingEvent::Edu(val) = &val { &**val } else { &[] }; - - self.servercurrentevent_data.insert(key, val); - self.servernameevent_data.remove(key); - }); - } - - #[inline] - pub fn active_requests(&self) -> impl Stream + Send + '_ { - self.servercurrentevent_data - .raw_stream() - .ignore_err() - .map(|(key, val)| { - let (dest, event) = - parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); - - (key.to_vec(), event, dest) - }) - } - - #[inline] - pub fn active_requests_for( - &self, - destination: &Destination, - ) -> impl Stream + Send + '_ + use<'_> { - let prefix = destination.get_prefix(); - self.servercurrentevent_data - .raw_stream_from(&prefix) - .ignore_err() - .ready_take_while(move |(key, _)| key.starts_with(&prefix)) - .map(|(key, val)| { - let (_, event) = - parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); - - (key.to_vec(), event) - }) - } - - pub(super) fn queue_requests<'a, I>(&self, requests: I) -> Vec> - where - I: Iterator + Clone + Debug + Send, - { - let keys: Vec<_> = requests - .clone() - .map(|(event, dest)| { - let mut key = dest.get_prefix(); - if let SendingEvent::Pdu(value) = event { - key.extend(value.as_ref()); - } else { - let count = self.services.globals.next_count().unwrap(); - key.extend(&count.to_be_bytes()); - } - - key - }) - .collect(); - - self.servernameevent_data.insert_batch( - keys.iter() - .map(Vec::as_slice) - .zip(requests.map(at!(0))) - .map(|(key, event)| { - let value = if let SendingEvent::Edu(value) = &event { - &**value - } else { - &[] - }; - - (key, value) - }), - ); - - keys - } - - pub fn queued_requests( - &self, - destination: &Destination, - ) -> impl Stream + Send + '_ + use<'_> { - let prefix = destination.get_prefix(); - self.servernameevent_data - .raw_stream_from(&prefix) - .ignore_err() - .ready_take_while(move |(key, _)| key.starts_with(&prefix)) - .map(|(key, val)| { - let (_, event) = - parse_servercurrentevent(key, val).expect("invalid servercurrentevent"); - - (key.to_vec(), event) - }) - } - - pub(super) fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) { - self.servername_educount.raw_put(server_name, last_count); - } - - pub async fn get_latest_educount(&self, server_name: &ServerName) -> u64 { - self.servername_educount - .get(server_name) - .await - .deserialized() - .unwrap_or(0) - } -} - -fn parse_servercurrentevent(key: &[u8], value: &[u8]) -> Result<(Destination, SendingEvent)> { - // Appservices start with a plus - Ok::<_, Error>(if key.starts_with(b"+") { - let mut parts = key[1..].splitn(2, |&b| b == 0xFF); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - Destination::Appservice(server), - if value.is_empty() { - SendingEvent::Pdu(event.into()) - } else { - SendingEvent::Edu(value.into()) - }, - ) - } else if key.starts_with(b"$") { - let mut parts = key[1..].splitn(3, |&b| b == 0xFF); - - let user = parts.next().expect("splitn always returns one element"); - let user_string = utils::str_from_bytes(user) - .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; - let user_id = UserId::parse(user_string) - .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; - - let pushkey = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let pushkey_string = utils::string_from_bytes(pushkey) - .map_err(|_| Error::bad_database("Invalid pushkey in servercurrentevent"))?; - - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - ( - Destination::Push(user_id.to_owned(), pushkey_string), - if value.is_empty() { - SendingEvent::Pdu(event.into()) - } else { - // I'm pretty sure this should never be called - SendingEvent::Edu(value.into()) - }, - ) - } else { - let mut parts = key.splitn(2, |&b| b == 0xFF); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - Destination::Federation(OwnedServerName::parse(&server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), - if value.is_empty() { - SendingEvent::Pdu(event.into()) - } else { - SendingEvent::Edu(value.into()) - }, - ) - }) +pub trait Data: Send + Sync { + fn active_requests(&self) -> OutgoingSendingIter<'_>; + fn active_requests_for(&self, outgoing_kind: &OutgoingDestination) -> SendingEventTypeIter<'_>; + fn delete_active_request(&self, key: Vec) -> Result<()>; + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingDestination) -> Result<()>; + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingDestination) -> Result<()>; + fn queue_requests(&self, requests: &[(&OutgoingDestination, SendingEventType)]) -> Result>>; + fn queued_requests<'a>( + &'a self, outgoing_kind: &OutgoingDestination, + ) -> Box)>> + 'a>; + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()>; + fn set_latest_educount(&self, server_name: &ServerName, educount: u64) -> Result<()>; + fn get_latest_educount(&self, server_name: &ServerName) -> Result; } diff --git a/src/service/sending/dest.rs b/src/service/sending/dest.rs deleted file mode 100644 index 4099d372..00000000 --- a/src/service/sending/dest.rs +++ /dev/null @@ -1,53 +0,0 @@ -use std::fmt::Debug; - -use conduwuit::implement; -use ruma::{OwnedServerName, OwnedUserId}; - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum Destination { - Appservice(String), - Push(OwnedUserId, String), // user and pushkey - Federation(OwnedServerName), -} - -#[implement(Destination)] -#[must_use] -pub(super) fn get_prefix(&self) -> Vec { - match self { - | Self::Federation(server) => { - let len = server.as_bytes().len().saturating_add(1); - - let mut p = Vec::with_capacity(len); - p.extend_from_slice(server.as_bytes()); - p.push(0xFF); - p - }, - | Self::Appservice(server) => { - let sigil = b"+"; - let len = sigil.len().saturating_add(server.len()).saturating_add(1); - - let mut p = Vec::with_capacity(len); - p.extend_from_slice(sigil); - p.extend_from_slice(server.as_bytes()); - p.push(0xFF); - p - }, - | Self::Push(user, pushkey) => { - let sigil = b"$"; - let len = sigil - .len() - .saturating_add(user.as_bytes().len()) - .saturating_add(1) - .saturating_add(pushkey.len()) - .saturating_add(1); - - let mut p = Vec::with_capacity(len); - p.extend_from_slice(sigil); - p.extend_from_slice(user.as_bytes()); - p.push(0xFF); - p.extend_from_slice(pushkey.as_bytes()); - p.push(0xFF); - p - }, - } -} diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 08ca7010..73cde12e 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -1,321 +1,265 @@ -mod appservice; -mod data; -mod dest; -mod sender; - use std::{ + cmp, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, fmt::Debug, - hash::{DefaultHasher, Hash, Hasher}, - iter::once, sync::Arc, + time::{Duration, Instant}, }; -use async_trait::async_trait; -use conduwuit::{ - Result, Server, debug, debug_warn, err, error, - smallvec::SmallVec, - utils::{ReadyExt, TryReadyExt, available_parallelism, math::usize_from_u64_truncated}, - warn, -}; -use futures::{FutureExt, Stream, StreamExt}; +use base64::{engine::general_purpose, Engine as _}; +pub use data::Data; +use federation::transactions::send_transaction_message; +use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ - RoomId, ServerName, UserId, - api::{OutgoingRequest, appservice::Registration}, + api::{ + appservice::Registration, + federation::{ + self, + transactions::edu::{ + DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, ReceiptData, ReceiptMap, + }, + }, + OutgoingRequest, + }, + device_id, + events::{push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, + push, uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, RoomId, ServerName, UInt, UserId, }; -use tokio::{task, task::JoinSet}; +use tokio::sync::{oneshot, Mutex, Semaphore}; +use tracing::{error, warn}; -use self::data::Data; -pub use self::{ - dest::Destination, - sender::{EDU_LIMIT, PDU_LIMIT}, -}; -use crate::{ - Dep, account_data, client, federation, globals, presence, pusher, rooms, - rooms::timeline::RawPduId, users, -}; +use crate::{service::presence::Presence, services, utils::calculate_hash, Config, Error, PduEvent, Result}; + +pub mod appservice; +pub mod data; +pub mod send; +pub use send::FedDest; + +const SELECT_EDU_LIMIT: usize = 16; pub struct Service { - pub db: Data, - server: Arc, - services: Services, - channels: Vec<(loole::Sender, loole::Receiver)>, + db: &'static dyn Data, + + /// The state for a given state hash. + pub(super) maximum_requests: Arc, + pub sender: loole::Sender<(OutgoingDestination, SendingEventType, Vec)>, + receiver: Mutex)>>, + startup_netburst: bool, + startup_netburst_keep: i64, + timeout: u64, } -struct Services { - client: Dep, - globals: Dep, - state: Dep, - state_cache: Dep, - user: Dep, - users: Dep, - presence: Dep, - read_receipt: Dep, - timeline: Dep, - account_data: Dep, - appservice: Dep, - pusher: Dep, - federation: Dep, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -struct Msg { - dest: Destination, - event: SendingEvent, - queue_id: Vec, -} - -#[allow(clippy::module_name_repetitions)] #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum SendingEvent { - Pdu(RawPduId), // pduid - Edu(EduBuf), // edu json - Flush, // none +pub enum OutgoingDestination { + Appservice(String), + Push(OwnedUserId, String), // user and pushkey + Normal(OwnedServerName), } -pub type EduBuf = SmallVec<[u8; EDU_BUF_CAP]>; -pub type EduVec = SmallVec<[EduBuf; EDU_VEC_CAP]>; +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[allow(clippy::module_name_repetitions)] +pub enum SendingEventType { + Pdu(Vec), // pduid + Edu(Vec), // pdu json + Flush, // none +} -const EDU_BUF_CAP: usize = 128; -const EDU_VEC_CAP: usize = 1; +enum TransactionStatus { + /// Currently running (for the first time) + Running, + /// Failed, backing off for a retry + Failed { + failures: u32, + waker: Option>, + }, + /// Currently retrying + Retrying { + /// number of times failed + failures: u32, + }, +} -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - let num_senders = num_senders(&args); - Ok(Arc::new(Self { - db: Data::new(&args), - server: args.server.clone(), - services: Services { - client: args.depend::("client"), - globals: args.depend::("globals"), - state: args.depend::("rooms::state"), - state_cache: args.depend::("rooms::state_cache"), - user: args.depend::("rooms::user"), - users: args.depend::("users"), - presence: args.depend::("presence"), - read_receipt: args.depend::("rooms::read_receipt"), - timeline: args.depend::("rooms::timeline"), - account_data: args.depend::("account_data"), - appservice: args.depend::("appservice"), - pusher: args.depend::("pusher"), - federation: args.depend::("federation"), - }, - channels: (0..num_senders).map(|_| loole::unbounded()).collect(), - })) - } - - async fn worker(self: Arc) -> Result { - let mut senders = - self.channels - .iter() - .enumerate() - .fold(JoinSet::new(), |mut joinset, (id, _)| { - let self_ = self.clone(); - let worker = self_.sender(id); - let worker = if self.unconstrained() { - task::unconstrained(worker).boxed() - } else { - worker.boxed() - }; - - let runtime = self.server.runtime(); - let _abort = joinset.spawn_on(worker, runtime); - joinset - }); - - while let Some(ret) = senders.join_next_with_id().await { - match ret { - | Ok((id, _)) => { - debug!(?id, "sender worker finished"); - }, - | Err(error) => { - error!(id = ?error.id(), ?error, "sender worker finished"); - }, - } - } - - Ok(()) - } - - fn interrupt(&self) { - for (sender, _) in &self.channels { - if !sender.is_closed() { - sender.close(); - } - } - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } - - fn unconstrained(&self) -> bool { true } +/// A control-flow enum to dictate what the handler should do after (trying to) +/// prepare a transaction +enum TransactionPrepOutcome { + Send(Vec), + Wake(OutgoingDestination), + Nothing, } impl Service { - #[tracing::instrument(skip(self, pdu_id, user, pushkey), level = "debug")] - pub fn send_pdu_push(&self, pdu_id: &RawPduId, user: &UserId, pushkey: String) -> Result { - let dest = Destination::Push(user.to_owned(), pushkey); - let event = SendingEvent::Pdu(*pdu_id); - let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(once((&event, &dest))); - self.dispatch(Msg { - dest, - event, - queue_id: keys.into_iter().next().expect("request queue key"), + pub fn build(db: &'static dyn Data, config: &Config) -> Arc { + let (sender, receiver) = loole::unbounded(); + Arc::new(Self { + db, + sender, + receiver: Mutex::new(receiver), + maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), + startup_netburst: config.startup_netburst, + startup_netburst_keep: config.startup_netburst_keep, + timeout: config.sender_timeout, }) } - #[tracing::instrument(skip(self), level = "debug")] - pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: RawPduId) -> Result { - let dest = Destination::Appservice(appservice_id); - let event = SendingEvent::Pdu(pdu_id); - let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(once((&event, &dest))); - self.dispatch(Msg { - dest, - event, - queue_id: keys.into_iter().next().expect("request queue key"), - }) + #[tracing::instrument(skip(self, pdu_id, user, pushkey))] + pub fn send_pdu_push(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> { + let outgoing_kind = OutgoingDestination::Push(user.to_owned(), pushkey); + let event = SendingEventType::Pdu(pdu_id.to_owned()); + let _cork = services().globals.db.cork()?; + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); + + Ok(()) } - #[tracing::instrument(skip(self, room_id, pdu_id), level = "debug")] - pub async fn send_pdu_room(&self, room_id: &RoomId, pdu_id: &RawPduId) -> Result { - let servers = self - .services + #[tracing::instrument(skip(self))] + pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: Vec) -> Result<()> { + let outgoing_kind = OutgoingDestination::Appservice(appservice_id); + let event = SendingEventType::Pdu(pdu_id); + let _cork = services().globals.db.cork()?; + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); + + Ok(()) + } + + #[tracing::instrument(skip(self, room_id, pdu_id))] + pub fn send_pdu_room(&self, room_id: &RoomId, pdu_id: &[u8]) -> Result<()> { + let servers = services() + .rooms .state_cache .room_servers(room_id) - .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)); + .filter_map(Result::ok) + .filter(|server| &**server != services().globals.server_name()); - self.send_pdu_servers(servers, pdu_id).await + self.send_pdu_servers(servers, pdu_id) } - #[tracing::instrument(skip(self, servers, pdu_id), level = "debug")] - pub async fn send_pdu_servers<'a, S>(&self, servers: S, pdu_id: &RawPduId) -> Result - where - S: Stream + Send + 'a, - { + #[tracing::instrument(skip(self, servers, pdu_id))] + pub fn send_pdu_servers>(&self, servers: I, pdu_id: &[u8]) -> Result<()> { let requests = servers - .map(|server| { - (Destination::Federation(server.into()), SendingEvent::Pdu(pdu_id.to_owned())) - }) - .collect::>() - .await; - - let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); - - for ((dest, event), queue_id) in requests.into_iter().zip(keys) { - self.dispatch(Msg { dest, event, queue_id })?; + .into_iter() + .map(|server| (OutgoingDestination::Normal(server), SendingEventType::Pdu(pdu_id.to_owned()))) + .collect::>(); + let _cork = services().globals.db.cork()?; + let keys = self.db.queue_requests( + &requests + .iter() + .map(|(o, e)| (o, e.clone())) + .collect::>(), + )?; + for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { + self.sender + .send((outgoing_kind.clone(), event, key)) + .unwrap(); } Ok(()) } - #[tracing::instrument(skip(self, server, serialized), level = "debug")] - pub fn send_edu_server(&self, server: &ServerName, serialized: EduBuf) -> Result { - let dest = Destination::Federation(server.to_owned()); - let event = SendingEvent::Edu(serialized); - let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(once((&event, &dest))); - self.dispatch(Msg { - dest, - event, - queue_id: keys.into_iter().next().expect("request queue key"), - }) + #[tracing::instrument(skip(self, server, serialized))] + pub fn send_edu_server(&self, server: &ServerName, serialized: Vec) -> Result<()> { + let outgoing_kind = OutgoingDestination::Normal(server.to_owned()); + let event = SendingEventType::Edu(serialized); + let _cork = services().globals.db.cork()?; + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); + + Ok(()) } - #[tracing::instrument(skip(self, room_id, serialized), level = "debug")] - pub async fn send_edu_room(&self, room_id: &RoomId, serialized: EduBuf) -> Result { - let servers = self - .services + #[tracing::instrument(skip(self, room_id, serialized))] + pub fn send_edu_room(&self, room_id: &RoomId, serialized: Vec) -> Result<()> { + let servers = services() + .rooms .state_cache .room_servers(room_id) - .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)); + .filter_map(Result::ok) + .filter(|server| &**server != services().globals.server_name()); - self.send_edu_servers(servers, serialized).await + self.send_edu_servers(servers, serialized) } - #[tracing::instrument(skip(self, servers, serialized), level = "debug")] - pub async fn send_edu_servers<'a, S>(&self, servers: S, serialized: EduBuf) -> Result - where - S: Stream + Send + 'a, - { + #[tracing::instrument(skip(self, servers, serialized))] + pub fn send_edu_servers>(&self, servers: I, serialized: Vec) -> Result<()> { let requests = servers - .map(|server| { - ( - Destination::Federation(server.to_owned()), - SendingEvent::Edu(serialized.clone()), - ) - }) - .collect::>() - .await; - - let _cork = self.db.db.cork(); - let keys = self.db.queue_requests(requests.iter().map(|(o, e)| (e, o))); - - for ((dest, event), queue_id) in requests.into_iter().zip(keys) { - self.dispatch(Msg { dest, event, queue_id })?; + .into_iter() + .map(|server| (OutgoingDestination::Normal(server), SendingEventType::Edu(serialized.clone()))) + .collect::>(); + let _cork = services().globals.db.cork()?; + let keys = self.db.queue_requests( + &requests + .iter() + .map(|(o, e)| (o, e.clone())) + .collect::>(), + )?; + for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { + self.sender + .send((outgoing_kind.clone(), event, key)) + .unwrap(); } Ok(()) } - #[tracing::instrument(skip(self, room_id), level = "debug")] - pub async fn flush_room(&self, room_id: &RoomId) -> Result<()> { - let servers = self - .services + #[tracing::instrument(skip(self, room_id))] + pub fn flush_room(&self, room_id: &RoomId) -> Result<()> { + let servers = services() + .rooms .state_cache .room_servers(room_id) - .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)); + .filter_map(Result::ok) + .filter(|server| &**server != services().globals.server_name()); - self.flush_servers(servers).await + self.flush_servers(servers) } - #[tracing::instrument(skip(self, servers), level = "debug")] - pub async fn flush_servers<'a, S>(&self, servers: S) -> Result<()> + #[tracing::instrument(skip(self, servers))] + pub fn flush_servers>(&self, servers: I) -> Result<()> { + let requests = servers.into_iter().map(OutgoingDestination::Normal); + + for outgoing_kind in requests { + self.sender + .send((outgoing_kind, SendingEventType::Flush, Vec::::new())) + .unwrap(); + } + + Ok(()) + } + + /// Cleanup event data + /// Used for instance after we remove an appservice registration + #[tracing::instrument(skip(self))] + pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { + self.db + .delete_all_requests_for(&OutgoingDestination::Appservice(appservice_id))?; + + Ok(()) + } + + #[tracing::instrument(skip(self, request), name = "request")] + pub async fn send_federation_request(&self, dest: &ServerName, request: T) -> Result where - S: Stream + Send + 'a, + T: OutgoingRequest + Debug, { - servers - .map(ToOwned::to_owned) - .map(Destination::Federation) - .map(Ok) - .ready_try_for_each(|dest| { - self.dispatch(Msg { - dest, - event: SendingEvent::Flush, - queue_id: Vec::::new(), - }) - }) + let permit = self.maximum_requests.acquire().await; + let timeout = Duration::from_secs(self.timeout); + let client = &services().globals.client.federation; + let response = tokio::time::timeout(timeout, send::send_request(client, dest, request)) .await - } + .map_err(|_| { + warn!("Timeout after 300 seconds waiting for server response of {dest}"); + Error::BadServerResponse("Timeout after 300 seconds waiting for server response") + })?; + drop(permit); - /// Sends a request to a federation server - #[inline] - pub async fn send_federation_request( - &self, - dest: &ServerName, - request: T, - ) -> Result - where - T: OutgoingRequest + Debug + Send, - { - self.services.federation.execute(dest, request).await - } - - /// Like send_federation_request() but with a very large timeout - #[inline] - pub async fn send_synapse_request( - &self, - dest: &ServerName, - request: T, - ) -> Result - where - T: OutgoingRequest + Debug + Send, - { - self.services - .federation - .execute_synapse(dest, request) - .await + response } /// Sends a request to an appservice @@ -323,96 +267,790 @@ impl Service { /// Only returns None if there is no url specified in the appservice /// registration file pub async fn send_appservice_request( - &self, - registration: Registration, - request: T, + &self, registration: Registration, request: T, ) -> Result> where - T: OutgoingRequest + Debug + Send, + T: OutgoingRequest + Debug, { - let client = &self.services.client.appservice; - appservice::send_request(client, registration, request).await + let permit = self.maximum_requests.acquire().await; + let response = appservice::send_request(registration, request).await; + drop(permit); + + response } - /// Clean up queued sending event data + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(self); + tokio::spawn(async move { + self2 + .handler() + .await + .expect("Failed to initialize request sending handler"); + }); + } + + #[tracing::instrument(skip(self), name = "sender")] + async fn handler(&self) -> Result<()> { + let new_transactions = self.receiver.lock().await; + let (waking_sender, waking_receiver) = loole::unbounded(); + + let mut outgoing = FuturesUnordered::new(); + let mut retrying = FuturesUnordered::new(); + + let mut current_transaction_status = HashMap::::new(); + + // Retry requests we could not finish yet + if self.startup_netburst { + let mut initial_transactions = HashMap::>::new(); + for (key, outgoing_kind, event) in self.db.active_requests().filter_map(Result::ok) { + let entry = initial_transactions + .entry(outgoing_kind.clone()) + .or_default(); + + if self.startup_netburst_keep >= 0 + && entry.len() >= usize::try_from(self.startup_netburst_keep).unwrap() + { + warn!("Dropping unsent event {:?} {:?}", outgoing_kind, String::from_utf8_lossy(&key),); + self.db.delete_active_request(key)?; + continue; + } + + entry.push(event); + } + + for (outgoing_kind, events) in initial_transactions { + current_transaction_status.insert(outgoing_kind.clone(), TransactionStatus::Running); + outgoing.push(handle_events(outgoing_kind.clone(), events)); + } + } + + loop { + tokio::select! { + Some(response) = outgoing.next() => { + // Outgoing transaction succeeded + match response { + Ok(outgoing_kind) => { + let _cork = services().globals.db.cork(); + self.db.delete_all_active_requests_for(&outgoing_kind)?; + + // Find events that have been added since starting the last request + let new_events = self + .db + .queued_requests(&outgoing_kind) + .filter_map(Result::ok) + .take(30).collect::>(); + + if !new_events.is_empty() { + // Insert pdus we found + self.db.mark_as_active(&new_events)?; + + // Clear retries + current_transaction_status.insert(outgoing_kind.clone(), TransactionStatus::Running); + + outgoing.push(handle_events( + outgoing_kind, + new_events.into_iter().map(|(event, _)| event).collect(), + )); + } else { + current_transaction_status.remove(&outgoing_kind); + } + } + // Outgoing transaction failed + Err((destination, err)) => { + // Set status to Failed, create timer + let timer = Self::mark_failed_and_backoff(&mut current_transaction_status, destination.clone()); + + // Add timer to loop + retrying.push(timer); + + warn!("Outgoing request to {destination} failed: {err}"); + } + }; + }, + + // Transaction retry timers firing + Some(dest) = retrying.next() => { + // Transition Failed => Retrying, return pending old transaction events + match self.select_events( + &dest, + vec![], // will be ignored because fresh == false + &mut current_transaction_status, + false, + ) { + Ok(TransactionPrepOutcome::Send(events)) => { + outgoing.push(handle_events(dest, events)); + } + Ok(_) => { + // Unreachable because fresh == false + unreachable!("select_events on a stale transaction {} did not return ::Send", dest) + } + + Err(err) => { + error!("Ignoring error in (stale) outgoing request ({}) handler: {}", dest, err); + + // transaction dropped, so drop destination as well. + current_transaction_status.remove(&dest); + } + } + }, + + // Explicit wakeups, makes a backoff timer return immediately + Ok(outgoing) = waking_receiver.recv_async() => { + if let Some(TransactionStatus::Failed { waker, .. }) = current_transaction_status.get_mut(&outgoing) { + if let Some(waker) = waker.take() { + _ = waker.send(()); + } + } + }, + + // New transactions to be sent out (from server/user activity) + event = new_transactions.recv_async() => { + if let Ok((dest, event, key)) = event { + match self.select_events( + &dest, + vec![(event, key)], + &mut current_transaction_status, + true) { + Ok(TransactionPrepOutcome::Send(events)) => { + outgoing.push(handle_events(dest, events)); + }, + Ok(TransactionPrepOutcome::Wake(dest)) => { + waking_sender.send(dest).expect("nothing closes this channel but ourselves"); + }, + Ok(TransactionPrepOutcome::Nothing) => {}, + Err(err) => { + error!("Ignoring error in (fresh) outgoing request ({}) handler: {}", dest, err); + } + } + } + } + } + } + } + + /// Generates timer/oneshot, alters status to reflect Failed /// - /// Used after we remove an appservice registration or a user deletes a push - /// key - #[tracing::instrument(skip(self), level = "debug")] - pub async fn cleanup_events( + /// Returns timer/oneshot future to wake up loop for next retry + fn mark_failed_and_backoff( + status: &mut HashMap, dest: OutgoingDestination, + ) -> impl std::future::Future { + let now = Instant::now(); + + let entry = status + .get_mut(&dest) + .expect("guaranteed to be set before this function"); + + let failures = match entry { + // Running -> Failed + TransactionStatus::Running => 1, + // Retrying -> Failed + TransactionStatus::Retrying { + failures, + } => *failures + 1, + + // The transition of Failed -> Retrying is handled by handle_events + TransactionStatus::Failed { + .. + } => { + unreachable!( + "TransactionStatus in inconsistent state: Expected either Running or Retrying, got Failed, \ + bailing..." + ) + }, + }; + + const ONE_DAY: Duration = Duration::from_secs(60 * 60 * 24); + + // Exponential backoff, clamp upper value to one day + let next_wakeup = now + (Duration::from_secs(30) * failures * failures).min(ONE_DAY); + + let (fut, waker) = dest.wrap_in_interruptible_sleep(next_wakeup); + + *entry = TransactionStatus::Failed { + failures, + waker: Some(waker), + }; + + fut + } + + /// This prepares a transaction, checks the transaction state, and selects + /// appropriate events. + #[tracing::instrument(skip(self, outgoing_kind, new_events, current_transaction_status))] + fn select_events( &self, - appservice_id: Option<&str>, - user_id: Option<&UserId>, - push_key: Option<&str>, - ) -> Result { - match (appservice_id, user_id, push_key) { - | (None, Some(user_id), Some(push_key)) => { - self.db - .delete_all_requests_for(&Destination::Push( - user_id.to_owned(), - push_key.to_owned(), - )) - .await; + outgoing_kind: &OutgoingDestination, + new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key + current_transaction_status: &mut HashMap, + fresh: bool, // Wether or not this transaction came from server activity. + ) -> Result { + let (allow, retry, wake_up) = + self.select_events_current(outgoing_kind.clone(), current_transaction_status, fresh)?; - Ok(()) - }, - | (Some(appservice_id), None, None) => { - self.db - .delete_all_requests_for(&Destination::Appservice(appservice_id.to_owned())) - .await; + // Nothing can be done for this remote, bail out. + if wake_up { + return Ok(TransactionPrepOutcome::Wake(outgoing_kind.clone())); + } else if !allow { + return Ok(TransactionPrepOutcome::Nothing); + } - Ok(()) + let _cork = services().globals.db.cork(); + let mut events = Vec::new(); + + // Must retry any previous transaction for this remote. + if retry { + // We retry the previous transaction + for (_, e) in self + .db + .active_requests_for(outgoing_kind) + .filter_map(Result::ok) + { + events.push(e); + } + } + + // Compose the next transaction + let _cork = services().globals.db.cork(); + if !new_events.is_empty() { + self.db.mark_as_active(&new_events)?; + for (e, _) in new_events { + events.push(e); + } + } + + // Add EDU's into the transaction + if let OutgoingDestination::Normal(server_name) = outgoing_kind { + if let Ok((select_edus, last_count)) = self.select_edus(server_name) { + events.extend(select_edus.into_iter().map(SendingEventType::Edu)); + self.db.set_latest_educount(server_name, last_count)?; + } + } + + Ok(TransactionPrepOutcome::Send(events)) + } + + #[tracing::instrument(skip(self, outgoing_kind, current_transaction_status))] + fn select_events_current( + &self, outgoing_kind: OutgoingDestination, + current_transaction_status: &mut HashMap, fresh: bool, + ) -> Result<(bool, bool, bool)> { + let (mut allow, mut retry, mut wake_up) = (true, false, false); + + let entry = current_transaction_status.entry(outgoing_kind); + + if fresh { + // If its fresh, we initialise the status if we need to. + // + // We do nothing if it is already running or retrying. + // + // We return with a wake if it is in the Failed state. + entry + .and_modify(|e| match e { + TransactionStatus::Running + | TransactionStatus::Retrying { + .. + } => { + allow = false; // already running + }, + TransactionStatus::Failed { + .. + } => { + // currently sleeping + wake_up = true; + }, + }) + .or_insert(TransactionStatus::Running); + } else { + // If it's not fresh, we expect an entry. + // + // We also expect us to be the only one who are touching this destination right + // now, and its a stale transaction, so it must be in the Failed state + match entry { + Entry::Occupied(mut e) => { + let e = e.get_mut(); + match e { + TransactionStatus::Failed { + failures, + .. + } => { + *e = TransactionStatus::Retrying { + failures: *failures, + }; + retry = true; + }, + + _ => unreachable!( + "Encountered bad state when preparing stale transaction: expected Failed state, got \ + Running or Retrying" + ), + } + }, + Entry::Vacant(_) => unreachable!( + "Encountered bad state when preparing stale transaction: expected Failed state, got vacant state" + ), + } + } + + Ok((allow, retry, wake_up)) + } + + #[tracing::instrument(skip(self, server_name))] + pub fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { + // u64: count of last edu + let since = self.db.get_latest_educount(server_name)?; + let mut events = Vec::new(); + let mut max_edu_count = since; + let mut device_list_changes = HashSet::new(); + + for room_id in services().rooms.state_cache.server_rooms(server_name) { + let room_id = room_id?; + // Look for device list updates in this room + device_list_changes.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(Result::ok) + .filter(|user_id| user_id.server_name() == services().globals.server_name()), + ); + + if services().globals.allow_outgoing_read_receipts() + && !select_edus_receipts(&room_id, since, &mut max_edu_count, &mut events)? + { + break; + } + } + + for user_id in device_list_changes { + // Empty prev id forces synapse to resync; because synapse resyncs, + // we can just insert placeholder data + let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { + user_id, + device_id: device_id!("placeholder").to_owned(), + device_display_name: Some("Placeholder".to_owned()), + stream_id: uint!(1), + prev_id: Vec::new(), + deleted: None, + keys: None, + }); + + events.push(serde_json::to_vec(&edu).expect("json can be serialized")); + } + + if services().globals.allow_outgoing_presence() { + select_edus_presence(server_name, since, &mut max_edu_count, &mut events)?; + } + + Ok((events, max_edu_count)) + } +} + +/// Look for presence +#[tracing::instrument(skip(server_name, since, max_edu_count, events))] +pub fn select_edus_presence( + server_name: &ServerName, since: u64, max_edu_count: &mut u64, events: &mut Vec>, +) -> Result { + // Look for presence updates for this server + let mut presence_updates = Vec::new(); + for (user_id, count, presence_bytes) in services().presence.presence_since(since) { + *max_edu_count = cmp::max(count, *max_edu_count); + + if user_id.server_name() != services().globals.server_name() { + continue; + } + + if !services() + .rooms + .state_cache + .server_sees_user(server_name, &user_id)? + { + continue; + } + + let presence_event = Presence::from_json_bytes_to_event(&presence_bytes, &user_id)?; + presence_updates.push(PresenceUpdate { + user_id, + presence: presence_event.content.presence, + currently_active: presence_event.content.currently_active.unwrap_or(false), + last_active_ago: presence_event + .content + .last_active_ago + .unwrap_or_else(|| uint!(0)), + status_msg: presence_event.content.status_msg, + }); + + if presence_updates.len() >= SELECT_EDU_LIMIT { + break; + } + } + + let presence_content = Edu::Presence(PresenceContent::new(presence_updates)); + events.push(serde_json::to_vec(&presence_content).expect("PresenceEvent can be serialized")); + + Ok(true) +} + +/// Look for read receipts in this room +#[tracing::instrument(skip(room_id, since, max_edu_count, events))] +pub fn select_edus_receipts( + room_id: &RoomId, since: u64, max_edu_count: &mut u64, events: &mut Vec>, +) -> Result { + for r in services() + .rooms + .read_receipt + .readreceipts_since(room_id, since) + { + let (user_id, count, read_receipt) = r?; + *max_edu_count = cmp::max(count, *max_edu_count); + + if user_id.server_name() != services().globals.server_name() { + continue; + } + + let event = serde_json::from_str(read_receipt.json().get()) + .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; + let federation_event = if let AnySyncEphemeralRoomEvent::Receipt(r) = event { + let mut read = BTreeMap::new(); + + let (event_id, mut receipt) = r + .content + .0 + .into_iter() + .next() + .expect("we only use one event per read receipt"); + let receipt = receipt + .remove(&ReceiptType::Read) + .expect("our read receipts always set this") + .remove(&user_id) + .expect("our read receipts always have the user here"); + + read.insert( + user_id, + ReceiptData { + data: receipt.clone(), + event_ids: vec![event_id.clone()], + }, + ); + + let receipt_map = ReceiptMap { + read, + }; + + let mut receipts = BTreeMap::new(); + receipts.insert(room_id.to_owned(), receipt_map); + + Edu::Receipt(ReceiptContent { + receipts, + }) + } else { + Error::bad_database("Invalid event type in read_receipts"); + continue; + }; + + events.push(serde_json::to_vec(&federation_event).expect("json can be serialized")); + + if events.len() >= SELECT_EDU_LIMIT { + return Ok(false); + } + } + + Ok(true) +} + +async fn handle_events( + kind: OutgoingDestination, events: Vec, +) -> Result { + match kind { + OutgoingDestination::Appservice(ref id) => handle_events_kind_appservice(&kind, id, events).await, + OutgoingDestination::Push(ref userid, ref pushkey) => { + handle_events_kind_push(&kind, userid, pushkey, events).await + }, + OutgoingDestination::Normal(ref server) => handle_events_kind_normal(&kind, server, events).await, + } +} + +#[tracing::instrument(skip(kind, events))] +async fn handle_events_kind_appservice( + kind: &OutgoingDestination, id: &String, events: Vec, +) -> Result { + let mut pdu_jsons = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdu_jsons.push( + services() + .rooms + .timeline + .get_pdu_from_id(pdu_id) + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database("[Appservice] Event in servernameevent_data not found in db."), + ) + })? + .to_room_event(), + ); }, - | _ => { - debug_warn!("cleanup_events called with too many or too few arguments"); - Ok(()) + SendingEventType::Edu(_) | SendingEventType::Flush => { + // Appservices don't need EDUs (?) and flush only; + // no new content }, } } - fn dispatch(&self, msg: Msg) -> Result { - let shard = self.shard_id(&msg.dest); - let sender = &self - .channels - .get(shard) - .expect("missing sender worker channels") - .0; + let permit = services().sending.maximum_requests.acquire().await; - debug_assert!(!sender.is_full(), "channel full"); - debug_assert!(!sender.is_closed(), "channel closed"); - sender.send(msg).map_err(|e| err!("{e}")) + let response = match appservice::send_request( + services() + .appservice + .get_registration(id) + .await + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database("[Appservice] Could not load registration from db."), + ) + })?, + ruma::api::appservice::event::push_events::v1::Request { + events: pdu_jsons, + txn_id: (&*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + SendingEventType::Flush => &[], + }) + .collect::>(), + ))) + .into(), + }, + ) + .await + { + Ok(_) => Ok(kind.clone()), + Err(e) => Err((kind.clone(), e)), + }; + + drop(permit); + + response +} + +#[tracing::instrument(skip(kind, events))] +async fn handle_events_kind_push( + kind: &OutgoingDestination, userid: &OwnedUserId, pushkey: &String, events: Vec, +) -> Result { + let mut pdus = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdus.push( + services() + .rooms + .timeline + .get_pdu_from_id(pdu_id) + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database("[Push] Event in servernamevent_datas not found in db."), + ) + })?, + ); + }, + SendingEventType::Edu(_) | SendingEventType::Flush => { + // Push gateways don't need EDUs (?) and flush only; + // no new content + }, + } } - pub(super) fn shard_id(&self, dest: &Destination) -> usize { - if self.channels.len() <= 1 { - return 0; + for pdu in pdus { + // Redacted events are not notification targets (we don't send push for them) + if let Some(unsigned) = &pdu.unsigned { + if let Ok(unsigned) = serde_json::from_str::(unsigned.get()) { + if unsigned.get("redacted_because").is_some() { + continue; + } + } } - let mut hash = DefaultHasher::default(); - dest.hash(&mut hash); + let Some(pusher) = services() + .pusher + .get_pusher(userid, pushkey) + .map_err(|e| (OutgoingDestination::Push(userid.clone(), pushkey.clone()), e))? + else { + continue; + }; - let hash: u64 = hash.finish(); - let hash = usize_from_u64_truncated(hash); + let rules_for_user = services() + .account_data + .get(None, userid, GlobalAccountDataEventType::PushRules.to_string().into()) + .unwrap_or_default() + .and_then(|event| serde_json::from_str::(event.get()).ok()) + .map_or_else(|| push::Ruleset::server_default(userid), |ev: PushRulesEvent| ev.content.global); - let chans = self.channels.len().max(1); - hash.overflowing_rem(chans).0 + let unread: UInt = services() + .rooms + .user + .notification_count(userid, &pdu.room_id) + .map_err(|e| (kind.clone(), e))? + .try_into() + .expect("notification count can't go that high"); + + let permit = services().sending.maximum_requests.acquire().await; + + let _response = services() + .pusher + .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) + .await + .map(|_response| kind.clone()) + .map_err(|e| (kind.clone(), e)); + + drop(permit); + } + + Ok(kind.clone()) +} + +#[tracing::instrument(skip(kind, events), name = "")] +async fn handle_events_kind_normal( + kind: &OutgoingDestination, dest: &OwnedServerName, events: Vec, +) -> Result { + let mut edu_jsons = Vec::new(); + let mut pdu_jsons = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + // TODO: check room version and remove event_id if needed + let raw = PduEvent::convert_to_outgoing_federation_event( + services() + .rooms + .timeline + .get_pdu_json_from_id(pdu_id) + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + error!("event not found: {dest} {pdu_id:?}"); + ( + kind.clone(), + Error::bad_database("[Normal] Event in servernamevent_datas not found in db."), + ) + })?, + ); + pdu_jsons.push(raw); + }, + SendingEventType::Edu(edu) => { + if let Ok(raw) = serde_json::from_slice(edu) { + edu_jsons.push(raw); + } + }, + SendingEventType::Flush => { + // flush only; no new content + }, + } + } + + let permit = services().sending.maximum_requests.acquire().await; + let client = &services().globals.client.sender; + let response = send::send_request( + client, + dest, + send_transaction_message::v1::Request { + origin: services().globals.server_name().to_owned(), + pdus: pdu_jsons, + edus: edu_jsons, + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + transaction_id: (&*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + SendingEventType::Flush => &[], + }) + .collect::>(), + ))) + .into(), + }, + ) + .await + .map(|response| { + for pdu in response.pdus { + if pdu.1.is_err() { + warn!("error for {} from remote: {:?}", pdu.0, pdu.1); + } + } + kind.clone() + }) + .map_err(|e| (kind.clone(), e)); + + drop(permit); + + response +} + +impl OutgoingDestination { + #[tracing::instrument(skip(self))] + pub fn get_prefix(&self) -> Vec { + let mut prefix = match self { + OutgoingDestination::Appservice(server) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(server.as_bytes()); + p + }, + OutgoingDestination::Push(user, pushkey) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(user.as_bytes()); + p.push(0xFF); + p.extend_from_slice(pushkey.as_bytes()); + p + }, + OutgoingDestination::Normal(server) => { + let mut p = Vec::new(); + p.extend_from_slice(server.as_bytes()); + p + }, + }; + prefix.push(0xFF); + + prefix + } + + /// This wraps the OutgoingDestination key in an interruptible sleep future. + /// + /// The first return value is the future, the second is the oneshot that + /// interrupts that future, and causes it to return instantly. + fn wrap_in_interruptible_sleep( + self, at: Instant, + ) -> (impl std::future::Future, oneshot::Sender<()>) { + let (tx, rx) = oneshot::channel(); + let at = tokio::time::Instant::from_std(at); + + ( + async move { + _ = tokio::time::timeout_at(at, rx).await; + + self + }, + tx, + ) } } -fn num_senders(args: &crate::Args<'_>) -> usize { - const MIN_SENDERS: usize = 1; - // Limit the number of senders to the number of workers threads or number of - // cores, conservatively. - let max_senders = args - .server - .metrics - .num_workers() - .min(available_parallelism()); - - // If the user doesn't override the default 0, this is intended to then default - // to 1 for now as multiple senders is experimental. - args.server - .config - .sender_workers - .clamp(MIN_SENDERS, max_senders) +impl std::fmt::Display for OutgoingDestination { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + OutgoingDestination::Appservice(appservice_id) => { + write!(f, "Appservice (ID {:?})", appservice_id) + }, + OutgoingDestination::Push(user, push_key) => { + write!(f, "User Push Service (for {:?}, with key {:?})", user, push_key) + }, + OutgoingDestination::Normal(server) => { + write!(f, "Matrix Server ({:?})", server) + }, + } + } } diff --git a/src/service/sending/send.rs b/src/service/sending/send.rs new file mode 100644 index 00000000..5090db99 --- /dev/null +++ b/src/service/sending/send.rs @@ -0,0 +1,609 @@ +use std::{ + fmt::Debug, + mem, + net::{IpAddr, SocketAddr}, +}; + +use futures_util::TryFutureExt; +use hickory_resolver::{error::ResolveError, lookup::SrvLookup}; +use http::{header::AUTHORIZATION, HeaderValue}; +use ipaddress::IPAddress; +use ruma::{ + api::{ + client::error::Error as RumaError, EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, + SendAccessToken, + }, + OwnedServerName, ServerName, +}; +use tracing::{debug, trace, warn}; + +use crate::{services, Error, Result}; + +/// Wraps either an literal IP address plus port, or a hostname plus complement +/// (colon-plus-port if it was specified). +/// +/// Note: A `FedDest::Named` might contain an IP address in string form if there +/// was no port specified to construct a `SocketAddr` with. +/// +/// # Examples: +/// ```rust +/// # use conduit::api::server_server::FedDest; +/// # fn main() -> Result<(), std::net::AddrParseError> { +/// FedDest::Literal("198.51.100.3:8448".parse()?); +/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); +/// FedDest::Named("matrix.example.org".to_owned(), String::new()); +/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); +/// FedDest::Named("198.51.100.5".to_owned(), String::new()); +/// # Ok(()) +/// # } +/// ``` +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum FedDest { + Literal(SocketAddr), + Named(String, String), +} + +#[tracing::instrument(skip_all, name = "send")] +pub(crate) async fn send_request( + client: &reqwest::Client, destination: &ServerName, request: T, +) -> Result +where + T: OutgoingRequest + Debug, +{ + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if destination == services().globals.server_name() { + return Err(Error::bad_config("Won't send federation request to ourselves")); + } + + if destination.is_ip_literal() || IPAddress::is_valid(destination.host()) { + debug!( + "Destination {} is an IP literal, checking against IP range denylist.", + destination + ); + let ip = IPAddress::parse(destination.host()).map_err(|e| { + warn!("Failed to parse IP literal from string: {}", e); + Error::BadServerResponse("Invalid IP address") + })?; + + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + debug!("List of pushed CIDR ranges: {:?}", cidr_ranges); + + for cidr in cidr_ranges { + if cidr.includes(&ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); + } + } + + debug!("IP literal {} is allowed.", destination); + } + + trace!("Preparing to send request to {destination}"); + + let mut write_destination_to_cache = false; + + let cached_result = services() + .globals + .actual_destinations() + .read() + .await + .get(destination) + .cloned(); + + let (actual_destination, host) = if let Some(result) = cached_result { + result + } else { + write_destination_to_cache = true; + + let result = resolve_actual_destination(destination).await; + + (result.0, result.1.into_uri_string()) + }; + + let actual_destination_str = actual_destination.clone().into_https_string(); + + let mut http_request = request + .try_into_http_request::>( + &actual_destination_str, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_5], + ) + .map_err(|e| { + warn!("Failed to find destination {}: {}", actual_destination_str, e); + Error::BadServerResponse("Invalid destination") + })?; + + let mut request_map = serde_json::Map::new(); + + if !http_request.body().is_empty() { + request_map.insert( + "content".to_owned(), + serde_json::from_slice(http_request.body()).expect("body is valid json, we just created it"), + ); + }; + + request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); + request_map.insert( + "uri".to_owned(), + http_request + .uri() + .path_and_query() + .expect("all requests have a path") + .to_string() + .into(), + ); + request_map.insert("origin".to_owned(), services().globals.server_name().as_str().into()); + request_map.insert("destination".to_owned(), destination.as_str().into()); + + let mut request_json = serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); + + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut request_json, + ) + .expect("our request json is what ruma expects"); + + let request_json: serde_json::Map = + serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); + + let signatures = request_json["signatures"] + .as_object() + .unwrap() + .values() + .map(|v| { + v.as_object() + .unwrap() + .iter() + .map(|(k, v)| (k, v.as_str().unwrap())) + }); + + for signature_server in signatures { + for s in signature_server { + http_request.headers_mut().insert( + AUTHORIZATION, + HeaderValue::from_str(&format!( + "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + services().globals.server_name(), + s.0, + s.1 + )) + .unwrap(), + ); + } + } + + let reqwest_request = reqwest::Request::try_from(http_request)?; + let method = reqwest_request.method().clone(); + let url = reqwest_request.url().clone(); + + if let Some(url_host) = url.host_str() { + trace!("Checking request URL for IP"); + if let Ok(ip) = IPAddress::parse(url_host) { + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + for cidr in cidr_ranges { + if cidr.includes(&ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); + } + } + } + } + + debug!("Sending request {} {}", method, url); + let response = client.execute(reqwest_request).await; + trace!("Received resonse {} {}", method, url); + + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + + trace!("Checking response destination's IP"); + if let Some(remote_addr) = response.remote_addr() { + if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { + let cidr_ranges_s = services().globals.ip_range_denylist().to_vec(); + let mut cidr_ranges: Vec = Vec::new(); + + for cidr in cidr_ranges_s { + cidr_ranges.push(IPAddress::parse(cidr).expect("we checked this at startup")); + } + + for cidr in cidr_ranges { + if cidr.includes(&ip) { + return Err(Error::BadServerResponse("Not allowed to send requests to this IP")); + } + } + } + } + + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + trace!("Waiting for response body"); + let body = response.bytes().await.unwrap_or_else(|e| { + debug!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if !status.is_success() { + debug!( + "Got {status:?} for {method} {url}: {}", + String::from_utf8_lossy(&body) + .lines() + .collect::>() + .join(" ") + ); + } + + let http_response = http_response_builder + .body(body) + .expect("reqwest body is valid http body"); + + if status.is_success() { + debug!("Got {status:?} for {method} {url}"); + let response = T::IncomingResponse::try_from_http_response(http_response); + if response.is_ok() && write_destination_to_cache { + services() + .globals + .actual_destinations() + .write() + .await + .insert(OwnedServerName::from(destination), (actual_destination, host)); + } + + response.map_err(|e| { + debug!("Invalid 200 response for {} {}", url, e); + Error::BadServerResponse("Server returned bad 200 response.") + }) + } else { + Err(Error::FederationError( + destination.to_owned(), + RumaError::from_http_response(http_response), + )) + } + }, + Err(e) => { + // we do not need to log that servers in a room are dead, this is normal in + // public rooms and just spams the logs. + if e.is_timeout() { + debug!( + "Timed out sending request to {} at {}: {}", + destination, actual_destination_str, e + ); + } else if e.is_connect() { + debug!("Failed to connect to {} at {}: {}", destination, actual_destination_str, e); + } else if e.is_redirect() { + debug!( + "Redirect loop sending request to {} at {}: {}\nFinal URL: {:?}", + destination, + actual_destination_str, + e, + e.url() + ); + } else { + debug!("Could not send request to {} at {}: {}", destination, actual_destination_str, e); + } + + Err(e.into()) + }, + } +} + +fn get_ip_with_port(destination_str: &str) -> Option { + if let Ok(destination) = destination_str.parse::() { + Some(FedDest::Literal(destination)) + } else if let Ok(ip_addr) = destination_str.parse::() { + Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) + } else { + None + } +} + +fn add_port_to_hostname(destination_str: &str) -> FedDest { + let (host, port) = match destination_str.find(':') { + None => (destination_str, ":8448"), + Some(pos) => destination_str.split_at(pos), + }; + FedDest::Named(host.to_owned(), port.to_owned()) +} + +/// Returns: `actual_destination`, host header +/// Implemented according to the specification at +/// Numbers in comments below refer to bullet points in linked section of +/// specification +#[tracing::instrument(skip_all, name = "resolve")] +async fn resolve_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { + trace!("Finding actual destination for {destination}"); + let destination_str = destination.as_str().to_owned(); + let mut hostname = destination_str.clone(); + let actual_destination = match get_ip_with_port(&destination_str) { + Some(host_port) => { + debug!("1: IP literal with provided or default port"); + host_port + }, + None => { + if let Some(pos) = destination_str.find(':') { + debug!("2: Hostname with included port"); + + let (host, port) = destination_str.split_at(pos); + query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await; + + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + trace!("Requesting well known for {destination}"); + if let Some(delegated_hostname) = request_well_known(destination.as_str()).await { + debug!("3: A .well-known file is available"); + hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); + match get_ip_with_port(&delegated_hostname) { + Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file + None => { + if let Some(pos) = delegated_hostname.find(':') { + debug!("3.2: Hostname with port in .well-known file"); + + let (host, port) = delegated_hostname.split_at(pos); + query_and_cache_override(host, host, port.parse::().unwrap_or(8448)).await; + + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + trace!("Delegated hostname has no port in this branch"); + if let Some(hostname_override) = query_srv_record(&delegated_hostname).await { + debug!("3.3: SRV lookup successful"); + + let force_port = hostname_override.port(); + query_and_cache_override( + &delegated_hostname, + &hostname_override.hostname(), + force_port.unwrap_or(8448), + ) + .await; + + if let Some(port) = force_port { + FedDest::Named(delegated_hostname, format!(":{port}")) + } else { + add_port_to_hostname(&delegated_hostname) + } + } else { + debug!("3.4: No SRV records, just use the hostname from .well-known"); + query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448).await; + add_port_to_hostname(&delegated_hostname) + } + } + }, + } + } else { + trace!("4: No .well-known or an error occured"); + if let Some(hostname_override) = query_srv_record(&destination_str).await { + debug!("4: No .well-known; SRV record found"); + + let force_port = hostname_override.port(); + query_and_cache_override(&hostname, &hostname_override.hostname(), force_port.unwrap_or(8448)) + .await; + + if let Some(port) = force_port { + FedDest::Named(hostname.clone(), format!(":{port}")) + } else { + add_port_to_hostname(&hostname) + } + } else { + debug!("4: No .well-known; 5: No SRV record found"); + query_and_cache_override(&destination_str, &destination_str, 8448).await; + add_port_to_hostname(&destination_str) + } + } + } + }, + }; + + // Can't use get_ip_with_port here because we don't want to add a port + // to an IP address if it wasn't specified + let hostname = if let Ok(addr) = hostname.parse::() { + FedDest::Literal(addr) + } else if let Ok(addr) = hostname.parse::() { + FedDest::Named(addr.to_string(), ":8448".to_owned()) + } else if let Some(pos) = hostname.find(':') { + let (host, port) = hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + FedDest::Named(hostname, ":8448".to_owned()) + }; + + debug!("Actual destination: {actual_destination:?} hostname: {hostname:?}"); + (actual_destination, hostname) +} + +async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) { + match services() + .globals + .dns_resolver() + .lookup_ip(hostname.to_owned()) + .await + { + Ok(override_ip) => { + trace!("Caching result of {:?} overriding {:?}", hostname, overname); + + services() + .globals + .resolver + .overrides + .write() + .unwrap() + .insert(overname.to_owned(), (override_ip.iter().collect(), port)); + }, + Err(e) => { + debug!("Got {:?} for {:?} to override {:?}", e.kind(), hostname, overname); + }, + } +} + +async fn query_srv_record(hostname: &'_ str) -> Option { + fn handle_successful_srv(srv: &SrvLookup) -> Option { + srv.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()), + ) + }) + } + + async fn lookup_srv(hostname: &str) -> Result { + debug!("querying SRV for {:?}", hostname); + let hostname = hostname.trim_end_matches('.'); + services() + .globals + .dns_resolver() + .srv_lookup(hostname.to_owned()) + .await + } + + let first_hostname = format!("_matrix-fed._tcp.{hostname}."); + let second_hostname = format!("_matrix._tcp.{hostname}."); + + lookup_srv(&first_hostname) + .or_else(|_| { + trace!("Querying deprecated _matrix SRV record for host {:?}", hostname); + lookup_srv(&second_hostname) + }) + .and_then(|srv_lookup| async move { Ok(handle_successful_srv(&srv_lookup)) }) + .await + .ok() + .flatten() +} + +async fn request_well_known(destination: &str) -> Option { + if !services() + .globals + .resolver + .overrides + .read() + .unwrap() + .contains_key(destination) + { + query_and_cache_override(destination, destination, 8448).await; + } + + let response = services() + .globals + .client + .well_known + .get(&format!("https://{destination}/.well-known/matrix/server")) + .send() + .await; + + trace!("Well known response: {:?}", response); + if let Err(e) = &response { + debug!("Well known error: {e:?}"); + return None; + } + + let text = response.ok()?.text().await; + trace!("Well known response text: {:?}", text); + + if text.as_ref().ok()?.len() > 10000 { + debug!( + "Well known response for destination '{destination}' exceeded past 10000 characters, assuming no \ + well-known." + ); + return None; + } + + let body: serde_json::Value = serde_json::from_str(&text.ok()?).ok()?; + trace!("serde_json body of well known text: {}", body); + + Some(body.get("m.server")?.as_str()?.to_owned()) +} + +impl FedDest { + fn into_https_string(self) -> String { + match self { + Self::Literal(addr) => format!("https://{addr}"), + Self::Named(host, port) => format!("https://{host}{port}"), + } + } + + fn into_uri_string(self) -> String { + match self { + Self::Literal(addr) => addr.to_string(), + Self::Named(host, port) => host + &port, + } + } + + fn hostname(&self) -> String { + match &self { + Self::Literal(addr) => addr.ip().to_string(), + Self::Named(host, _) => host.clone(), + } + } + + fn port(&self) -> Option { + match &self { + Self::Literal(addr) => Some(addr.port()), + Self::Named(_, port) => port[1..].parse().ok(), + } + } +} + +#[cfg(test)] +mod tests { + use super::{add_port_to_hostname, get_ip_with_port, FedDest}; + + #[test] + fn ips_get_default_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1"), + Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("dead:beef::"), + Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) + ); + } + + #[test] + fn ips_keep_custom_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1:1234"), + Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("[dead::beef]:8933"), + Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) + ); + } + + #[test] + fn hostnames_get_default_ports() { + assert_eq!( + add_port_to_hostname("example.com"), + FedDest::Named(String::from("example.com"), String::from(":8448")) + ); + } + + #[test] + fn hostnames_keep_custom_ports() { + assert_eq!( + add_port_to_hostname("example.com:1337"), + FedDest::Named(String::from("example.com"), String::from(":1337")) + ); + } +} diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs deleted file mode 100644 index fab02f6b..00000000 --- a/src/service/sending/sender.rs +++ /dev/null @@ -1,921 +0,0 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - fmt::Debug, - sync::{ - Arc, - atomic::{AtomicU64, AtomicUsize, Ordering}, - }, - time::{Duration, Instant}, -}; - -use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD}; -use conduwuit::{ - Error, Result, debug, err, error, - result::LogErr, - trace, - utils::{ - ReadyExt, calculate_hash, continue_exponential_backoff_secs, - future::TryExtExt, - stream::{BroadbandExt, IterStream, WidebandExt}, - }, - warn, -}; -use futures::{ - FutureExt, StreamExt, - future::{BoxFuture, OptionFuture}, - join, pin_mut, - stream::FuturesUnordered, -}; -use ruma::{ - CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, - RoomId, RoomVersionId, ServerName, UInt, - api::{ - appservice::event::push_events::v1::EphemeralData, - federation::transactions::{ - edu::{ - DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, - ReceiptData, ReceiptMap, - }, - send_transaction_message, - }, - }, - device_id, - events::{ - AnySyncEphemeralRoomEvent, GlobalAccountDataEventType, push_rules::PushRulesEvent, - receipt::ReceiptType, - }, - push, - serde::Raw, - uint, -}; -use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; - -use super::{ - Destination, EduBuf, EduVec, Msg, SendingEvent, Service, appservice, data::QueueItem, -}; - -#[derive(Debug)] -enum TransactionStatus { - Running, - Failed(u32, Instant), // number of times failed, time of last failure - Retrying(u32), // number of times failed -} - -type SendingError = (Destination, Error); -type SendingResult = Result; -type SendingFuture<'a> = BoxFuture<'a, SendingResult>; -type SendingFutures<'a> = FuturesUnordered>; -type CurTransactionStatus = HashMap; - -const SELECT_PRESENCE_LIMIT: usize = 256; -const SELECT_RECEIPT_LIMIT: usize = 256; -const SELECT_EDU_LIMIT: usize = EDU_LIMIT - 2; -const DEQUEUE_LIMIT: usize = 48; - -pub const PDU_LIMIT: usize = 50; -pub const EDU_LIMIT: usize = 100; - -impl Service { - #[tracing::instrument(skip(self), level = "debug")] - pub(super) async fn sender(self: Arc, id: usize) -> Result { - let mut statuses: CurTransactionStatus = CurTransactionStatus::new(); - let mut futures: SendingFutures<'_> = FuturesUnordered::new(); - - self.startup_netburst(id, &mut futures, &mut statuses) - .boxed() - .await; - - self.work_loop(id, &mut futures, &mut statuses).await; - - if !futures.is_empty() { - self.finish_responses(&mut futures).boxed().await; - } - - Ok(()) - } - - #[tracing::instrument( - name = "work", - level = "trace" - skip_all, - fields( - futures = %futures.len(), - statuses = %statuses.len(), - ), - )] - async fn work_loop<'a>( - &'a self, - id: usize, - futures: &mut SendingFutures<'a>, - statuses: &mut CurTransactionStatus, - ) { - let receiver = self - .channels - .get(id) - .map(|(_, receiver)| receiver.clone()) - .expect("Missing channel for sender worker"); - - while !receiver.is_closed() { - tokio::select! { - Some(response) = futures.next() => { - self.handle_response(response, futures, statuses).await; - }, - request = receiver.recv_async() => match request { - Ok(request) => self.handle_request(request, futures, statuses).await, - Err(_) => return, - }, - } - } - } - - #[tracing::instrument(name = "response", level = "debug", skip_all)] - async fn handle_response<'a>( - &'a self, - response: SendingResult, - futures: &mut SendingFutures<'a>, - statuses: &mut CurTransactionStatus, - ) { - match response { - | Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, - | Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), - } - } - - fn handle_response_err(dest: Destination, statuses: &mut CurTransactionStatus, e: &Error) { - debug!(dest = ?dest, "{e:?}"); - statuses.entry(dest).and_modify(|e| { - *e = match e { - | TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - | &mut TransactionStatus::Retrying(ref n) => - TransactionStatus::Failed(n.saturating_add(1), Instant::now()), - | TransactionStatus::Failed(..) => { - panic!("Request that was not even running failed?!") - }, - } - }); - } - - #[allow(clippy::needless_pass_by_ref_mut)] - async fn handle_response_ok<'a>( - &'a self, - dest: &Destination, - futures: &mut SendingFutures<'a>, - statuses: &mut CurTransactionStatus, - ) { - let _cork = self.db.db.cork(); - self.db.delete_all_active_requests_for(dest).await; - - // Find events that have been added since starting the last request - let new_events = self - .db - .queued_requests(dest) - .take(DEQUEUE_LIMIT) - .collect::>() - .await; - - // Insert any pdus we found - if !new_events.is_empty() { - self.db.mark_as_active(new_events.iter()); - - let new_events_vec = new_events.into_iter().map(|(_, event)| event).collect(); - futures.push(self.send_events(dest.clone(), new_events_vec)); - } else { - statuses.remove(dest); - } - } - - #[allow(clippy::needless_pass_by_ref_mut)] - #[tracing::instrument(name = "request", level = "debug", skip_all)] - async fn handle_request<'a>( - &'a self, - msg: Msg, - futures: &mut SendingFutures<'a>, - statuses: &mut CurTransactionStatus, - ) { - let iv = vec![(msg.queue_id, msg.event)]; - if let Ok(Some(events)) = self.select_events(&msg.dest, iv, statuses).await { - if !events.is_empty() { - futures.push(self.send_events(msg.dest, events)); - } else { - statuses.remove(&msg.dest); - } - } - } - - #[tracing::instrument( - name = "finish", - level = "info", - skip_all, - fields(futures = %futures.len()), - )] - async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>) { - use tokio::{ - select, - time::{Instant, sleep_until}, - }; - - let timeout = self.server.config.sender_shutdown_timeout; - let timeout = Duration::from_secs(timeout); - let now = Instant::now(); - let deadline = now.checked_add(timeout).unwrap_or(now); - loop { - trace!("Waiting for {} requests to complete...", futures.len()); - select! { - () = sleep_until(deadline) => return, - response = futures.next() => match response { - Some(Ok(dest)) => self.db.delete_all_active_requests_for(&dest).await, - Some(_) => continue, - None => return, - }, - } - } - } - - #[tracing::instrument( - name = "netburst", - level = "debug", - skip_all, - fields(futures = %futures.len()), - )] - #[allow(clippy::needless_pass_by_ref_mut)] - async fn startup_netburst<'a>( - &'a self, - id: usize, - futures: &mut SendingFutures<'a>, - statuses: &mut CurTransactionStatus, - ) { - let keep = - usize::try_from(self.server.config.startup_netburst_keep).unwrap_or(usize::MAX); - let mut txns = HashMap::>::new(); - let mut active = self.db.active_requests().boxed(); - - while let Some((key, event, dest)) = active.next().await { - if self.shard_id(&dest) != id { - continue; - } - - let entry = txns.entry(dest.clone()).or_default(); - if self.server.config.startup_netburst_keep >= 0 && entry.len() >= keep { - warn!("Dropping unsent event {dest:?} {:?}", String::from_utf8_lossy(&key)); - self.db.delete_active_request(&key); - } else { - entry.push(event); - } - } - - for (dest, events) in txns { - if self.server.config.startup_netburst && !events.is_empty() { - statuses.insert(dest.clone(), TransactionStatus::Running); - futures.push(self.send_events(dest.clone(), events)); - } - } - } - - #[tracing::instrument( - name = "select",, - level = "debug", - skip_all, - fields( - ?dest, - new_events = %new_events.len(), - ) - )] - async fn select_events( - &self, - dest: &Destination, - new_events: Vec, // Events we want to send: event and full key - statuses: &mut CurTransactionStatus, - ) -> Result>> { - let (allow, retry) = self.select_events_current(dest, statuses)?; - - // Nothing can be done for this remote, bail out. - if !allow { - return Ok(None); - } - - let _cork = self.db.db.cork(); - let mut events = Vec::new(); - - // Must retry any previous transaction for this remote. - if retry { - self.db - .active_requests_for(dest) - .ready_for_each(|(_, e)| events.push(e)) - .await; - - return Ok(Some(events)); - } - - // Compose the next transaction - let _cork = self.db.db.cork(); - if !new_events.is_empty() { - self.db.mark_as_active(new_events.iter()); - for (_, e) in new_events { - events.push(e); - } - } - - // Add EDU's into the transaction - if let Destination::Federation(server_name) = dest { - if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { - debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); - let select_edus = select_edus.into_iter().map(SendingEvent::Edu); - - events.extend(select_edus); - self.db.set_latest_educount(server_name, last_count); - } - } - - Ok(Some(events)) - } - - fn select_events_current( - &self, - dest: &Destination, - statuses: &mut CurTransactionStatus, - ) -> Result<(bool, bool)> { - let (mut allow, mut retry) = (true, false); - statuses - .entry(dest.clone()) // TODO: can we avoid cloning? - .and_modify(|e| match e { - TransactionStatus::Failed(tries, time) => { - // Fail if a request has failed recently (exponential backoff) - let min = self.server.config.sender_timeout; - let max = self.server.config.sender_retry_backoff_limit; - if continue_exponential_backoff_secs(min, max, time.elapsed(), *tries) - && !matches!(dest, Destination::Appservice(_)) - { - allow = false; - } else { - retry = true; - *e = TransactionStatus::Retrying(*tries); - } - }, - TransactionStatus::Running | TransactionStatus::Retrying(_) => { - allow = false; // already running - }, - }) - .or_insert(TransactionStatus::Running); - - Ok((allow, retry)) - } - - #[tracing::instrument( - name = "edus",, - level = "debug", - skip_all, - )] - async fn select_edus(&self, server_name: &ServerName) -> Result<(EduVec, u64)> { - // selection window - let since = self.db.get_latest_educount(server_name).await; - let since_upper = self.services.globals.current_count()?; - let batch = (since, since_upper); - debug_assert!(batch.0 <= batch.1, "since range must not be negative"); - - let events_len = AtomicUsize::default(); - let max_edu_count = AtomicU64::new(since); - - let device_changes = - self.select_edus_device_changes(server_name, batch, &max_edu_count, &events_len); - - let receipts: OptionFuture<_> = self - .server - .config - .allow_outgoing_read_receipts - .then(|| self.select_edus_receipts(server_name, batch, &max_edu_count)) - .into(); - - let presence: OptionFuture<_> = self - .server - .config - .allow_outgoing_presence - .then(|| self.select_edus_presence(server_name, batch, &max_edu_count)) - .into(); - - let (device_changes, receipts, presence) = join!(device_changes, receipts, presence); - - let mut events = device_changes; - events.extend(presence.into_iter().flatten()); - events.extend(receipts.into_iter().flatten()); - - Ok((events, max_edu_count.load(Ordering::Acquire))) - } - - /// Look for device changes - #[tracing::instrument( - name = "device_changes", - level = "trace", - skip(self, server_name, max_edu_count) - )] - async fn select_edus_device_changes( - &self, - server_name: &ServerName, - since: (u64, u64), - max_edu_count: &AtomicU64, - events_len: &AtomicUsize, - ) -> EduVec { - let mut events = EduVec::new(); - let server_rooms = self.services.state_cache.server_rooms(server_name); - - pin_mut!(server_rooms); - let mut device_list_changes = HashSet::::new(); - while let Some(room_id) = server_rooms.next().await { - let keys_changed = self - .services - .users - .room_keys_changed(room_id, since.0, None) - .ready_filter(|(user_id, _)| self.services.globals.user_is_local(user_id)); - - pin_mut!(keys_changed); - while let Some((user_id, count)) = keys_changed.next().await { - if count > since.1 { - break; - } - - max_edu_count.fetch_max(count, Ordering::Relaxed); - if !device_list_changes.insert(user_id.into()) { - continue; - } - - // Empty prev id forces synapse to resync; because synapse resyncs, - // we can just insert placeholder data - let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { - user_id: user_id.into(), - device_id: device_id!("placeholder").to_owned(), - device_display_name: Some("Placeholder".to_owned()), - stream_id: uint!(1), - prev_id: Vec::new(), - deleted: None, - keys: None, - }); - - let mut buf = EduBuf::new(); - serde_json::to_writer(&mut buf, &edu) - .expect("failed to serialize device list update to JSON"); - - events.push(buf); - if events_len.fetch_add(1, Ordering::Relaxed) >= SELECT_EDU_LIMIT - 1 { - return events; - } - } - } - - events - } - - /// Look for read receipts in this room - #[tracing::instrument( - name = "receipts", - level = "trace", - skip(self, server_name, max_edu_count) - )] - async fn select_edus_receipts( - &self, - server_name: &ServerName, - since: (u64, u64), - max_edu_count: &AtomicU64, - ) -> Option { - let mut num = 0; - let receipts: BTreeMap = self - .services - .state_cache - .server_rooms(server_name) - .map(ToOwned::to_owned) - .broad_filter_map(|room_id| async move { - let receipt_map = self - .select_edus_receipts_room(&room_id, since, max_edu_count, &mut num) - .await; - - receipt_map - .read - .is_empty() - .eq(&false) - .then_some((room_id, receipt_map)) - }) - .collect() - .await; - - if receipts.is_empty() { - return None; - } - - let receipt_content = Edu::Receipt(ReceiptContent { receipts }); - - let mut buf = EduBuf::new(); - serde_json::to_writer(&mut buf, &receipt_content) - .expect("Failed to serialize Receipt EDU to JSON vec"); - - Some(buf) - } - - /// Look for read receipts in this room - #[tracing::instrument(name = "receipts", level = "trace", skip(self, since, max_edu_count))] - async fn select_edus_receipts_room( - &self, - room_id: &RoomId, - since: (u64, u64), - max_edu_count: &AtomicU64, - num: &mut usize, - ) -> ReceiptMap { - let receipts = self - .services - .read_receipt - .readreceipts_since(room_id, since.0); - - pin_mut!(receipts); - let mut read = BTreeMap::::new(); - while let Some((user_id, count, read_receipt)) = receipts.next().await { - if count > since.1 { - break; - } - - max_edu_count.fetch_max(count, Ordering::Relaxed); - if !self.services.globals.user_is_local(user_id) { - continue; - } - - let Ok(event) = serde_json::from_str(read_receipt.json().get()) else { - error!(?user_id, ?count, ?read_receipt, "Invalid edu event in read_receipts."); - continue; - }; - - let AnySyncEphemeralRoomEvent::Receipt(r) = event else { - error!(?user_id, ?count, ?event, "Invalid event type in read_receipts"); - continue; - }; - - let (event_id, mut receipt) = r - .content - .0 - .into_iter() - .next() - .expect("we only use one event per read receipt"); - - let receipt = receipt - .remove(&ReceiptType::Read) - .expect("our read receipts always set this") - .remove(user_id) - .expect("our read receipts always have the user here"); - - let receipt_data = ReceiptData { - data: receipt, - event_ids: vec![event_id.clone()], - }; - - if read.insert(user_id.to_owned(), receipt_data).is_none() { - *num = num.saturating_add(1); - if *num >= SELECT_RECEIPT_LIMIT { - break; - } - } - } - - ReceiptMap { read } - } - - /// Look for presence - #[tracing::instrument( - name = "presence", - level = "trace", - skip(self, server_name, max_edu_count) - )] - async fn select_edus_presence( - &self, - server_name: &ServerName, - since: (u64, u64), - max_edu_count: &AtomicU64, - ) -> Option { - let presence_since = self.services.presence.presence_since(since.0); - - pin_mut!(presence_since); - let mut presence_updates = HashMap::::new(); - while let Some((user_id, count, presence_bytes)) = presence_since.next().await { - if count > since.1 { - break; - } - - max_edu_count.fetch_max(count, Ordering::Relaxed); - if !self.services.globals.user_is_local(user_id) { - continue; - } - - if !self - .services - .state_cache - .server_sees_user(server_name, user_id) - .await - { - continue; - } - - let Ok(presence_event) = self - .services - .presence - .from_json_bytes_to_event(presence_bytes, user_id) - .await - .log_err() - else { - continue; - }; - - let update = PresenceUpdate { - user_id: user_id.into(), - presence: presence_event.content.presence, - currently_active: presence_event.content.currently_active.unwrap_or(false), - status_msg: presence_event.content.status_msg, - last_active_ago: presence_event - .content - .last_active_ago - .unwrap_or_else(|| uint!(0)), - }; - - presence_updates.insert(user_id.into(), update); - if presence_updates.len() >= SELECT_PRESENCE_LIMIT { - break; - } - } - - if presence_updates.is_empty() { - return None; - } - - let presence_content = Edu::Presence(PresenceContent { - push: presence_updates.into_values().collect(), - }); - - let mut buf = EduBuf::new(); - serde_json::to_writer(&mut buf, &presence_content) - .expect("failed to serialize Presence EDU to JSON"); - - Some(buf) - } - - fn send_events(&self, dest: Destination, events: Vec) -> SendingFuture<'_> { - debug_assert!(!events.is_empty(), "sending empty transaction"); - match dest { - | Destination::Federation(server) => - self.send_events_dest_federation(server, events).boxed(), - | Destination::Appservice(id) => self.send_events_dest_appservice(id, events).boxed(), - | Destination::Push(user_id, pushkey) => - self.send_events_dest_push(user_id, pushkey, events).boxed(), - } - } - - #[tracing::instrument( - name = "appservice", - level = "debug", - skip(self, events), - fields( - events = %events.len(), - ), - )] - async fn send_events_dest_appservice( - &self, - id: String, - events: Vec, - ) -> SendingResult { - let Some(appservice) = self.services.appservice.get_registration(&id).await else { - return Err(( - Destination::Appservice(id.clone()), - err!(Database(warn!(?id, "Missing appservice registration"))), - )); - }; - - let mut pdu_jsons = Vec::with_capacity( - events - .iter() - .filter(|event| matches!(event, SendingEvent::Pdu(_))) - .count(), - ); - let mut edu_jsons: Vec = Vec::with_capacity( - events - .iter() - .filter(|event| matches!(event, SendingEvent::Edu(_))) - .count(), - ); - for event in &events { - match event { - | SendingEvent::Pdu(pdu_id) => { - if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { - pdu_jsons.push(pdu.into_room_event()); - } - }, - | SendingEvent::Edu(edu) => - if appservice.receive_ephemeral { - if let Ok(edu) = serde_json::from_slice(edu) { - edu_jsons.push(edu); - } - }, - | SendingEvent::Flush => {}, // flush only; no new content - } - } - - let txn_hash = calculate_hash(events.iter().filter_map(|e| match e { - | SendingEvent::Edu(b) => Some(&**b), - | SendingEvent::Pdu(b) => Some(b.as_ref()), - | SendingEvent::Flush => None, - })); - - let txn_id = &*URL_SAFE_NO_PAD.encode(txn_hash); - - //debug_assert!(pdu_jsons.len() + edu_jsons.len() > 0, "sending empty - // transaction"); - let client = &self.services.client.appservice; - match appservice::send_request( - client, - appservice, - ruma::api::appservice::event::push_events::v1::Request { - events: pdu_jsons, - txn_id: txn_id.into(), - ephemeral: edu_jsons, - to_device: Vec::new(), // TODO - }, - ) - .await - { - | Ok(_) => Ok(Destination::Appservice(id)), - | Err(e) => Err((Destination::Appservice(id), e)), - } - } - - #[tracing::instrument( - name = "push", - level = "info", - skip(self, events), - fields( - events = %events.len(), - ), - )] - async fn send_events_dest_push( - &self, - user_id: OwnedUserId, - pushkey: String, - events: Vec, - ) -> SendingResult { - let Ok(pusher) = self.services.pusher.get_pusher(&user_id, &pushkey).await else { - return Err(( - Destination::Push(user_id.clone(), pushkey.clone()), - err!(Database(error!(?user_id, ?pushkey, "Missing pusher"))), - )); - }; - - let mut pdus = Vec::with_capacity( - events - .iter() - .filter(|event| matches!(event, SendingEvent::Pdu(_))) - .count(), - ); - for event in &events { - match event { - | SendingEvent::Pdu(pdu_id) => { - if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { - pdus.push(pdu); - } - }, - | SendingEvent::Edu(_) | SendingEvent::Flush => { - // Push gateways don't need EDUs (?) and flush only; - // no new content - }, - } - } - - for pdu in pdus { - // Redacted events are not notification targets (we don't send push for them) - if pdu.contains_unsigned_property("redacted_because", serde_json::Value::is_string) { - continue; - } - - let rules_for_user = self - .services - .account_data - .get_global(&user_id, GlobalAccountDataEventType::PushRules) - .await - .map_or_else( - |_| push::Ruleset::server_default(&user_id), - |ev: PushRulesEvent| ev.content.global, - ); - - let unread: UInt = self - .services - .user - .notification_count(&user_id, &pdu.room_id) - .await - .try_into() - .expect("notification count can't go that high"); - - let _response = self - .services - .pusher - .send_push_notice(&user_id, unread, &pusher, rules_for_user, &pdu) - .await - .map_err(|e| (Destination::Push(user_id.clone(), pushkey.clone()), e)); - } - - Ok(Destination::Push(user_id, pushkey)) - } - - async fn send_events_dest_federation( - &self, - server: OwnedServerName, - events: Vec, - ) -> SendingResult { - let pdus: Vec<_> = events - .iter() - .filter_map(|pdu| match pdu { - | SendingEvent::Pdu(pdu) => Some(pdu), - | _ => None, - }) - .stream() - .wide_filter_map(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id).ok()) - .wide_then(|pdu| self.convert_to_outgoing_federation_event(pdu)) - .collect() - .await; - - let edus: Vec> = events - .iter() - .filter_map(|edu| match edu { - | SendingEvent::Edu(edu) => Some(edu.as_ref()), - | _ => None, - }) - .map(serde_json::from_slice) - .filter_map(Result::ok) - .collect(); - - if pdus.is_empty() && edus.is_empty() { - return Ok(Destination::Federation(server)); - } - - let preimage = pdus - .iter() - .map(|raw| raw.get().as_bytes()) - .chain(edus.iter().map(|raw| raw.json().get().as_bytes())); - - let txn_hash = calculate_hash(preimage); - let txn_id = &*URL_SAFE_NO_PAD.encode(txn_hash); - let request = send_transaction_message::v1::Request { - transaction_id: txn_id.into(), - origin: self.server.name.clone(), - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdus, - edus, - }; - - let result = self - .services - .federation - .execute_on(&self.services.client.sender, &server, request) - .await; - - for (event_id, result) in result.iter().flat_map(|resp| resp.pdus.iter()) { - if let Err(e) = result { - warn!( - %txn_id, %server, - "error sending PDU {event_id} to remote server: {e:?}" - ); - } - } - - match result { - | Err(error) => Err((Destination::Federation(server), error)), - | Ok(_) => Ok(Destination::Federation(server)), - } - } - - /// This does not return a full `Pdu` it is only to satisfy ruma's types. - pub async fn convert_to_outgoing_federation_event( - &self, - mut pdu_json: CanonicalJsonObject, - ) -> Box { - if let Some(unsigned) = pdu_json - .get_mut("unsigned") - .and_then(|val| val.as_object_mut()) - { - unsigned.remove("transaction_id"); - } - - // room v3 and above removed the "event_id" field from remote PDU format - if let Some(room_id) = pdu_json - .get("room_id") - .and_then(|val| RoomId::parse(val.as_str()?).ok()) - { - match self.services.state.get_room_version(room_id).await { - | Ok(room_version_id) => match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => _ = pdu_json.remove("event_id"), - }, - | Err(_) => _ = pdu_json.remove("event_id"), - } - } else { - pdu_json.remove("event_id"); - } - - // TODO: another option would be to convert it to a canonical string to validate - // size and return a Result> - // serde_json::from_str::>( - // ruma::serde::to_canonical_json_string(pdu_json).expect("CanonicalJson is - // valid serde_json::Value"), ) - // .expect("Raw::from_value always works") - - to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value") - } -} diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs deleted file mode 100644 index 64b936b6..00000000 --- a/src/service/server_keys/acquire.rs +++ /dev/null @@ -1,240 +0,0 @@ -use std::{ - borrow::Borrow, - collections::{BTreeMap, BTreeSet}, - time::Duration, -}; - -use conduwuit::{ - debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn, -}; -use futures::{StreamExt, stream::FuturesUnordered}; -use ruma::{ - CanonicalJsonObject, OwnedServerName, OwnedServerSigningKeyId, ServerName, - ServerSigningKeyId, api::federation::discovery::ServerSigningKeys, serde::Raw, -}; -use serde_json::value::RawValue as RawJsonValue; -use tokio::time::{Instant, timeout_at}; - -use super::key_exists; - -type Batch = BTreeMap>; - -#[implement(super::Service)] -pub async fn acquire_events_pubkeys<'a, I>(&self, events: I) -where - I: Iterator> + Send, -{ - type Batch = BTreeMap>; - type Signatures = BTreeMap>; - - let mut batch = Batch::new(); - events - .cloned() - .map(Raw::::from_json) - .map(|event| event.get_field::("signatures")) - .filter_map(FlatOk::flat_ok) - .flat_map(IntoIterator::into_iter) - .for_each(|(server, sigs)| { - batch.entry(server).or_default().extend(sigs.into_keys()); - }); - - let batch = batch - .iter() - .map(|(server, keys)| (server.borrow(), keys.iter().map(Borrow::borrow))); - - self.acquire_pubkeys(batch).await; -} - -#[implement(super::Service)] -pub async fn acquire_pubkeys<'a, S, K>(&self, batch: S) -where - S: Iterator + Send + Clone, - K: Iterator + Send + Clone, -{ - let notary_only = self.services.server.config.only_query_trusted_key_servers; - let notary_first_always = self.services.server.config.query_trusted_key_servers_first; - let notary_first_on_join = self - .services - .server - .config - .query_trusted_key_servers_first_on_join; - - let requested_servers = batch.clone().count(); - let requested_keys = batch.clone().flat_map(|(_, key_ids)| key_ids).count(); - - debug!("acquire {requested_keys} keys from {requested_servers}"); - - let mut missing = self.acquire_locals(batch).await; - let mut missing_keys = keys_count(&missing); - let mut missing_servers = missing.len(); - if missing_servers == 0 { - return; - } - - info!("{missing_keys} keys for {missing_servers} servers will be acquired"); - - if notary_first_always || notary_first_on_join { - missing = self.acquire_notary(missing.into_iter()).await; - missing_keys = keys_count(&missing); - missing_servers = missing.len(); - if missing_keys == 0 { - return; - } - - warn!( - "missing {missing_keys} keys for {missing_servers} servers from all notaries first" - ); - } - - if !notary_only { - missing = self.acquire_origins(missing.into_iter()).await; - missing_keys = keys_count(&missing); - missing_servers = missing.len(); - if missing_keys == 0 { - return; - } - - debug_warn!("missing {missing_keys} keys for {missing_servers} servers unreachable"); - } - - if !notary_first_always && !notary_first_on_join { - missing = self.acquire_notary(missing.into_iter()).await; - missing_keys = keys_count(&missing); - missing_servers = missing.len(); - if missing_keys == 0 { - return; - } - - debug_warn!( - "still missing {missing_keys} keys for {missing_servers} servers from all notaries." - ); - } - - if missing_keys > 0 { - warn!( - "did not obtain {missing_keys} keys for {missing_servers} servers out of \ - {requested_keys} total keys for {requested_servers} total servers." - ); - } - - for (server, key_ids) in missing { - debug_warn!(?server, ?key_ids, "missing"); - } -} - -#[implement(super::Service)] -async fn acquire_locals<'a, S, K>(&self, batch: S) -> Batch -where - S: Iterator + Send, - K: Iterator + Send, -{ - let mut missing = Batch::new(); - for (server, key_ids) in batch { - for key_id in key_ids { - if !self.verify_key_exists(server, key_id).await { - missing - .entry(server.into()) - .or_default() - .push(key_id.into()); - } - } - } - - missing -} - -#[implement(super::Service)] -async fn acquire_origins(&self, batch: I) -> Batch -where - I: Iterator)> + Send, -{ - let timeout = Instant::now() - .checked_add(Duration::from_secs(45)) - .expect("timeout overflows"); - - let mut requests: FuturesUnordered<_> = batch - .map(|(origin, key_ids)| self.acquire_origin(origin, key_ids, timeout)) - .collect(); - - let mut missing = Batch::new(); - while let Some((origin, key_ids)) = requests.next().await { - if !key_ids.is_empty() { - missing.insert(origin, key_ids); - } - } - - missing -} - -#[implement(super::Service)] -async fn acquire_origin( - &self, - origin: OwnedServerName, - mut key_ids: Vec, - timeout: Instant, -) -> (OwnedServerName, Vec) { - match timeout_at(timeout, self.server_request(&origin)).await { - | Err(e) => debug_warn!(?origin, "timed out: {e}"), - | Ok(Err(e)) => debug_error!(?origin, "{e}"), - | Ok(Ok(server_keys)) => { - trace!( - %origin, - ?key_ids, - ?server_keys, - "received server_keys" - ); - - self.add_signing_keys(server_keys.clone()).await; - key_ids.retain(|key_id| !key_exists(&server_keys, key_id)); - }, - } - - (origin, key_ids) -} - -#[implement(super::Service)] -async fn acquire_notary(&self, batch: I) -> Batch -where - I: Iterator)> + Send, -{ - let mut missing: Batch = batch.collect(); - for notary in self.services.globals.trusted_servers() { - let missing_keys = keys_count(&missing); - let missing_servers = missing.len(); - debug!( - "Asking notary {notary} for {missing_keys} missing keys from {missing_servers} \ - servers" - ); - - let batch = missing - .iter() - .map(|(server, keys)| (server.borrow(), keys.iter().map(Borrow::borrow))); - - match self.batch_notary_request(notary, batch).await { - | Err(e) => error!("Failed to contact notary {notary:?}: {e}"), - | Ok(results) => - for server_keys in results { - self.acquire_notary_result(&mut missing, server_keys).await; - }, - } - } - - missing -} - -#[implement(super::Service)] -async fn acquire_notary_result(&self, missing: &mut Batch, server_keys: ServerSigningKeys) { - let server = &server_keys.server_name; - self.add_signing_keys(server_keys.clone()).await; - - if let Some(key_ids) = missing.get_mut(server) { - key_ids.retain(|key_id| key_exists(&server_keys, key_id)); - if key_ids.is_empty() { - missing.remove(server); - } - } -} - -fn keys_count(batch: &Batch) -> usize { - batch.iter().flat_map(|(_, key_ids)| key_ids.iter()).count() -} diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs deleted file mode 100644 index f9c5bdaf..00000000 --- a/src/service/server_keys/get.rs +++ /dev/null @@ -1,138 +0,0 @@ -use std::borrow::Borrow; - -use conduwuit::{Err, Result, implement}; -use ruma::{ - CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId, - api::federation::discovery::VerifyKey, -}; - -use super::{PubKeyMap, PubKeys, extract_key}; - -#[implement(super::Service)] -pub async fn get_event_keys( - &self, - object: &CanonicalJsonObject, - version: &RoomVersionId, -) -> Result { - use ruma::signatures::required_keys; - - let required = match required_keys(object, version) { - | Ok(required) => required, - | Err(e) => { - return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")); - }, - }; - - let batch = required - .iter() - .map(|(s, ids)| (s.borrow(), ids.iter().map(Borrow::borrow))); - - Ok(self.get_pubkeys(batch).await) -} - -#[implement(super::Service)] -pub async fn get_pubkeys<'a, S, K>(&self, batch: S) -> PubKeyMap -where - S: Iterator + Send, - K: Iterator + Send, -{ - let mut keys = PubKeyMap::new(); - for (server, key_ids) in batch { - let pubkeys = self.get_pubkeys_for(server, key_ids).await; - keys.insert(server.into(), pubkeys); - } - - keys -} - -#[implement(super::Service)] -pub async fn get_pubkeys_for<'a, I>(&self, origin: &ServerName, key_ids: I) -> PubKeys -where - I: Iterator + Send, -{ - let mut keys = PubKeys::new(); - for key_id in key_ids { - if let Ok(verify_key) = self.get_verify_key(origin, key_id).await { - keys.insert(key_id.into(), verify_key.key); - } - } - - keys -} - -#[implement(super::Service)] -pub async fn get_verify_key( - &self, - origin: &ServerName, - key_id: &ServerSigningKeyId, -) -> Result { - let notary_first = self.services.server.config.query_trusted_key_servers_first; - let notary_only = self.services.server.config.only_query_trusted_key_servers; - - if let Some(result) = self.verify_keys_for(origin).await.remove(key_id) { - return Ok(result); - } - - if notary_first { - if let Ok(result) = self.get_verify_key_from_notaries(origin, key_id).await { - return Ok(result); - } - } - - if !notary_only { - if let Ok(result) = self.get_verify_key_from_origin(origin, key_id).await { - return Ok(result); - } - } - - if !notary_first { - if let Ok(result) = self.get_verify_key_from_notaries(origin, key_id).await { - return Ok(result); - } - } - - Err!(BadServerResponse(debug_error!( - ?key_id, - ?origin, - "Failed to fetch federation signing-key" - ))) -} - -#[implement(super::Service)] -async fn get_verify_key_from_notaries( - &self, - origin: &ServerName, - key_id: &ServerSigningKeyId, -) -> Result { - for notary in self.services.globals.trusted_servers() { - if let Ok(server_keys) = self.notary_request(notary, origin).await { - for server_key in server_keys.clone() { - self.add_signing_keys(server_key).await; - } - - for server_key in server_keys { - if let Some(result) = extract_key(server_key, key_id) { - return Ok(result); - } - } - } - } - - Err!(Request(NotFound("Failed to fetch signing-key from notaries"))) -} - -#[implement(super::Service)] -async fn get_verify_key_from_origin( - &self, - origin: &ServerName, - key_id: &ServerSigningKeyId, -) -> Result { - if let Ok(server_key) = self.server_request(origin).await { - self.add_signing_keys(server_key.clone()).await; - if let Some(result) = extract_key(server_key, key_id) { - return Ok(result); - } - } - - Err!(Request(NotFound("Failed to fetch signing-key from origin"))) -} diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs deleted file mode 100644 index 259c37fb..00000000 --- a/src/service/server_keys/keypair.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::sync::Arc; - -use conduwuit::{Result, debug, debug_info, err, error, utils, utils::string_from_bytes}; -use database::Database; -use ruma::{api::federation::discovery::VerifyKey, serde::Base64, signatures::Ed25519KeyPair}; - -use super::VerifyKeys; - -pub(super) fn init(db: &Arc) -> Result<(Box, VerifyKeys)> { - let keypair = load(db).inspect_err(|_e| { - error!("Keypair invalid. Deleting..."); - remove(db); - })?; - - let verify_key = VerifyKey { - key: Base64::new(keypair.public_key().to_vec()), - }; - - let id = format!("ed25519:{}", keypair.version()); - let verify_keys: VerifyKeys = [(id.try_into()?, verify_key)].into(); - - Ok((keypair, verify_keys)) -} - -fn load(db: &Arc) -> Result> { - let (version, key) = db["global"] - .get_blocking(b"keypair") - .map(|ref val| { - // database deserializer is having trouble with this so it's manual for now - let mut elems = val.split(|&b| b == b'\xFF'); - let vlen = elems.next().expect("invalid keypair entry").len(); - let ver = string_from_bytes(&val[..vlen]).expect("invalid keypair version"); - let der = val[vlen.saturating_add(1)..].to_vec(); - debug!("Found existing Ed25519 keypair: {ver:?}"); - (ver, der) - }) - .or_else(|e| { - assert!(e.is_not_found(), "unexpected error fetching keypair"); - create(db) - })?; - - let key = Ed25519KeyPair::from_der(&key, version) - .map_err(|e| err!("Failed to load ed25519 keypair from der: {e:?}"))?; - - Ok(Box::new(key)) -} - -fn create(db: &Arc) -> Result<(String, Vec)> { - let keypair = Ed25519KeyPair::generate() - .map_err(|e| err!("Failed to generate new ed25519 keypair: {e:?}"))?; - - let id = utils::rand::string(8); - debug_info!("Generated new Ed25519 keypair: {id:?}"); - - let value: (String, Vec) = (id, keypair.to_vec()); - db["global"].raw_put(b"keypair", &value); - - Ok(value) -} - -#[inline] -fn remove(db: &Arc) { - let global = &db["global"]; - global.remove(b"keypair"); -} diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs deleted file mode 100644 index bf6799ba..00000000 --- a/src/service/server_keys/mod.rs +++ /dev/null @@ -1,211 +0,0 @@ -mod acquire; -mod get; -mod keypair; -mod request; -mod sign; -mod verify; - -use std::{collections::BTreeMap, sync::Arc, time::Duration}; - -use conduwuit::{ - Result, Server, implement, - utils::{IterStream, timepoint_from_now}, -}; -use database::{Deserialized, Json, Map}; -use futures::StreamExt; -use ruma::{ - CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, - ServerName, ServerSigningKeyId, - api::federation::discovery::{ServerSigningKeys, VerifyKey}, - serde::Raw, - signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, -}; -use serde_json::value::RawValue as RawJsonValue; - -use crate::{Dep, globals, sending}; - -pub struct Service { - keypair: Box, - verify_keys: VerifyKeys, - minimum_valid: Duration, - services: Services, - db: Data, -} - -struct Services { - globals: Dep, - sending: Dep, - server: Arc, -} - -struct Data { - server_signingkeys: Arc, -} - -pub type VerifyKeys = BTreeMap; -pub type PubKeyMap = PublicKeyMap; -pub type PubKeys = PublicKeySet; - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - let minimum_valid = Duration::from_secs(3600); - - let (keypair, verify_keys) = keypair::init(args.db)?; - debug_assert!(verify_keys.len() == 1, "only one active verify_key supported"); - - Ok(Arc::new(Self { - keypair, - verify_keys, - minimum_valid, - services: Services { - globals: args.depend::("globals"), - sending: args.depend::("sending"), - server: args.server.clone(), - }, - db: Data { - server_signingkeys: args.db["server_signingkeys"].clone(), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -#[inline] -pub fn keypair(&self) -> &Ed25519KeyPair { &self.keypair } - -#[implement(Service)] -#[inline] -pub fn active_key_id(&self) -> &ServerSigningKeyId { self.active_verify_key().0 } - -#[implement(Service)] -#[inline] -pub fn active_verify_key(&self) -> (&ServerSigningKeyId, &VerifyKey) { - debug_assert!(self.verify_keys.len() <= 1, "more than one active verify_key"); - self.verify_keys - .iter() - .next() - .map(|(id, key)| (id.as_ref(), key)) - .expect("missing active verify_key") -} - -#[implement(Service)] -async fn add_signing_keys(&self, new_keys: ServerSigningKeys) { - let origin = &new_keys.server_name; - - // (timo) Not atomic, but this is not critical - let mut keys: ServerSigningKeys = self - .db - .server_signingkeys - .get(origin) - .await - .deserialized() - .unwrap_or_else(|_| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - - keys.verify_keys.extend(new_keys.verify_keys); - keys.old_verify_keys.extend(new_keys.old_verify_keys); - self.db.server_signingkeys.raw_put(origin, Json(&keys)); -} - -#[implement(Service)] -pub async fn required_keys_exist( - &self, - object: &CanonicalJsonObject, - version: &RoomVersionId, -) -> bool { - use ruma::signatures::required_keys; - - let Ok(required_keys) = required_keys(object, version) else { - return false; - }; - - required_keys - .iter() - .flat_map(|(server, key_ids)| key_ids.iter().map(move |key_id| (server, key_id))) - .stream() - .all(|(server, key_id)| self.verify_key_exists(server, key_id)) - .await -} - -#[implement(Service)] -pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool { - type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>; - - let Ok(keys) = self - .db - .server_signingkeys - .get(origin) - .await - .deserialized::>() - else { - return false; - }; - - if let Ok(Some(verify_keys)) = keys.get_field::>("verify_keys") { - if verify_keys.contains_key(key_id) { - return true; - } - } - - if let Ok(Some(old_verify_keys)) = keys.get_field::>("old_verify_keys") { - if old_verify_keys.contains_key(key_id) { - return true; - } - } - - false -} - -#[implement(Service)] -pub async fn verify_keys_for(&self, origin: &ServerName) -> VerifyKeys { - let mut keys = self - .signing_keys_for(origin) - .await - .map(|keys| merge_old_keys(keys).verify_keys) - .unwrap_or(BTreeMap::new()); - - if self.services.globals.server_is_ours(origin) { - keys.extend(self.verify_keys.clone().into_iter()); - } - - keys -} - -#[implement(Service)] -pub async fn signing_keys_for(&self, origin: &ServerName) -> Result { - self.db.server_signingkeys.get(origin).await.deserialized() -} - -#[implement(Service)] -fn minimum_valid_ts(&self) -> MilliSecondsSinceUnixEpoch { - let timepoint = - timepoint_from_now(self.minimum_valid).expect("SystemTime should not overflow"); - MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow") -} - -fn merge_old_keys(mut keys: ServerSigningKeys) -> ServerSigningKeys { - keys.verify_keys.extend( - keys.old_verify_keys - .clone() - .into_iter() - .map(|(key_id, old)| (key_id, VerifyKey::new(old.key))), - ); - - keys -} - -fn extract_key(mut keys: ServerSigningKeys, key_id: &ServerSigningKeyId) -> Option { - keys.verify_keys.remove(key_id).or_else(|| { - keys.old_verify_keys - .remove(key_id) - .map(|old| VerifyKey::new(old.key)) - }) -} - -fn key_exists(keys: &ServerSigningKeys, key_id: &ServerSigningKeyId) -> bool { - keys.verify_keys.contains_key(key_id) || keys.old_verify_keys.contains_key(key_id) -} diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs deleted file mode 100644 index d9907616..00000000 --- a/src/service/server_keys/request.rs +++ /dev/null @@ -1,124 +0,0 @@ -use std::{collections::BTreeMap, fmt::Debug}; - -use conduwuit::{Err, Result, debug, implement}; -use ruma::{ - OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, - api::federation::discovery::{ - ServerSigningKeys, get_remote_server_keys, - get_remote_server_keys_batch::{self, v2::QueryCriteria}, - get_server_keys, - }, -}; - -#[implement(super::Service)] -pub(super) async fn batch_notary_request<'a, S, K>( - &self, - notary: &ServerName, - batch: S, -) -> Result> -where - S: Iterator + Send, - K: Iterator + Send, -{ - use get_remote_server_keys_batch::v2::Request; - type RumaBatch = BTreeMap>; - - let criteria = QueryCriteria { - minimum_valid_until_ts: Some(self.minimum_valid_ts()), - }; - - let mut server_keys = batch.fold(RumaBatch::new(), |mut batch, (server, key_ids)| { - batch - .entry(server.into()) - .or_default() - .extend(key_ids.map(|key_id| (key_id.into(), criteria.clone()))); - - batch - }); - - debug_assert!(!server_keys.is_empty(), "empty batch request to notary"); - - let mut results = Vec::new(); - while let Some(batch) = server_keys - .keys() - .rev() - .take(self.services.server.config.trusted_server_batch_size) - .next_back() - .cloned() - { - let request = Request { - server_keys: server_keys.split_off(&batch), - }; - - debug!( - ?notary, - ?batch, - remaining = %server_keys.len(), - requesting = ?request.server_keys.keys(), - "notary request" - ); - - let response = self - .services - .sending - .send_synapse_request(notary, request) - .await? - .server_keys - .into_iter() - .map(|key| key.deserialize()) - .filter_map(Result::ok); - - results.extend(response); - } - - Ok(results) -} - -#[implement(super::Service)] -pub async fn notary_request( - &self, - notary: &ServerName, - target: &ServerName, -) -> Result + Clone + Debug + Send + use<>> { - use get_remote_server_keys::v2::Request; - - let request = Request { - server_name: target.into(), - minimum_valid_until_ts: self.minimum_valid_ts(), - }; - - let response = self - .services - .sending - .send_federation_request(notary, request) - .await? - .server_keys - .into_iter() - .map(|key| key.deserialize()) - .filter_map(Result::ok); - - Ok(response) -} - -#[implement(super::Service)] -pub async fn server_request(&self, target: &ServerName) -> Result { - use get_server_keys::v2::Request; - - let server_signing_key = self - .services - .sending - .send_federation_request(target, Request::new()) - .await - .map(|response| response.server_key) - .and_then(|key| key.deserialize().map_err(Into::into))?; - - if server_signing_key.server_name != target { - return Err!(BadServerResponse(debug_warn!( - requested = ?target, - response = ?server_signing_key.server_name, - "Server responded with bogus server_name" - ))); - } - - Ok(server_signing_key) -} diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs deleted file mode 100644 index e8cc485d..00000000 --- a/src/service/server_keys/sign.rs +++ /dev/null @@ -1,22 +0,0 @@ -use conduwuit::{Result, implement}; -use ruma::{CanonicalJsonObject, RoomVersionId}; - -#[implement(super::Service)] -pub fn sign_json(&self, object: &mut CanonicalJsonObject) -> Result { - use ruma::signatures::sign_json; - - let server_name = self.services.globals.server_name().as_str(); - sign_json(server_name, self.keypair(), object).map_err(Into::into) -} - -#[implement(super::Service)] -pub fn hash_and_sign_event( - &self, - object: &mut CanonicalJsonObject, - room_version: &RoomVersionId, -) -> Result { - use ruma::signatures::hash_and_sign_event; - - let server_name = self.services.globals.server_name().as_str(); - hash_and_sign_event(server_name, self.keypair(), object, room_version).map_err(Into::into) -} diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs deleted file mode 100644 index 84433628..00000000 --- a/src/service/server_keys/verify.rs +++ /dev/null @@ -1,69 +0,0 @@ -use conduwuit::{Err, Result, implement, pdu::gen_event_id_canonical_json}; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified, -}; -use serde_json::value::RawValue as RawJsonValue; - -#[implement(super::Service)] -pub async fn validate_and_add_event_id( - &self, - pdu: &RawJsonValue, - room_version: &RoomVersionId, -) -> Result<(OwnedEventId, CanonicalJsonObject)> { - let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?; - if let Err(e) = self.verify_event(&value, Some(room_version)).await { - return Err!(BadServerResponse(debug_error!( - "Event {event_id} failed verification: {e:?}" - ))); - } - - value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into())); - - Ok((event_id, value)) -} - -#[implement(super::Service)] -pub async fn validate_and_add_event_id_no_fetch( - &self, - pdu: &RawJsonValue, - room_version: &RoomVersionId, -) -> Result<(OwnedEventId, CanonicalJsonObject)> { - let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?; - if !self.required_keys_exist(&value, room_version).await { - return Err!(BadServerResponse(debug_warn!( - "Event {event_id} cannot be verified: missing keys." - ))); - } - - if let Err(e) = self.verify_event(&value, Some(room_version)).await { - return Err!(BadServerResponse(debug_error!( - "Event {event_id} failed verification: {e:?}" - ))); - } - - value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into())); - - Ok((event_id, value)) -} - -#[implement(super::Service)] -pub async fn verify_event( - &self, - event: &CanonicalJsonObject, - room_version: Option<&RoomVersionId>, -) -> Result { - let room_version = room_version.unwrap_or(&RoomVersionId::V11); - let keys = self.get_event_keys(event, room_version).await?; - ruma::signatures::verify_event(&keys, event, room_version).map_err(Into::into) -} - -#[implement(super::Service)] -pub async fn verify_json( - &self, - event: &CanonicalJsonObject, - room_version: Option<&RoomVersionId>, -) -> Result { - let room_version = room_version.unwrap_or(&RoomVersionId::V11); - let keys = self.get_event_keys(event, room_version).await?; - ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into) -} diff --git a/src/service/service.rs b/src/service/service.rs deleted file mode 100644 index 574efd8f..00000000 --- a/src/service/service.rs +++ /dev/null @@ -1,182 +0,0 @@ -use std::{ - any::Any, - collections::BTreeMap, - fmt::Write, - ops::Deref, - sync::{Arc, OnceLock, RwLock, Weak}, -}; - -use async_trait::async_trait; -use conduwuit::{Err, Result, Server, err, error::inspect_log, utils::string::SplitInfallible}; -use database::Database; - -/// Abstract interface for a Service -#[async_trait] -pub(crate) trait Service: Any + Send + Sync { - /// Implement the construction of the service instance. Services are - /// generally singletons so expect this to only be called once for a - /// service type. Note that it may be called again after a server reload, - /// but the prior instance will have been dropped first. Failure will - /// shutdown the server with an error. - fn build(args: Args<'_>) -> Result> - where - Self: Sized; - - /// Implement the service's worker loop. The service manager spawns a - /// task and calls this function after all services have been built. - async fn worker(self: Arc) -> Result<()> { Ok(()) } - - /// Interrupt the service. This is sent to initiate a graceful shutdown. - /// The service worker should return from its work loop. - fn interrupt(&self) {} - - /// Clear any caches or similar runtime state. - async fn clear_cache(&self) {} - - /// Memory usage report in a markdown string. - async fn memory_usage(&self, _out: &mut (dyn Write + Send)) -> Result { Ok(()) } - - /// Return the name of the service. - /// i.e. `crate::service::make_name(std::module_path!())` - fn name(&self) -> &str; - - /// Return true if the service worker opts out of the tokio cooperative - /// budgeting. This can reduce tail latency at the risk of event loop - /// starvation. - fn unconstrained(&self) -> bool { false } -} - -/// Args are passed to `Service::build` when a service is constructed. This -/// allows for arguments to change with limited impact to the many services. -pub(crate) struct Args<'a> { - pub(crate) server: &'a Arc, - pub(crate) db: &'a Arc, - pub(crate) service: &'a Arc, -} - -/// Dep is a reference to a service used within another service. -/// Circular-dependencies between services require this indirection. -pub(crate) struct Dep { - dep: OnceLock>, - service: Weak, - name: &'static str, -} - -pub(crate) type Map = RwLock; -pub(crate) type MapType = BTreeMap; -pub(crate) type MapVal = (Weak, Weak); -pub(crate) type MapKey = String; - -/// SAFETY: Workaround for a compiler limitation (or bug) where it is Hard to -/// prove the Sync'ness of Dep because services contain circular references -/// to other services through Dep's. The Sync'ness of Dep can still be -/// proved without unsafety by declaring the crate-attribute #![recursion_limit -/// = "192"] but this may take a while. Re-evaluate this when a new trait-solver -/// (such as Chalk) becomes available. -unsafe impl Sync for Dep {} - -/// SAFETY: Ancillary to unsafe impl Sync; while this is not needed to prevent -/// violating the recursion_limit, the trait-solver still spends an inordinate -/// amount of time to prove this. -unsafe impl Send for Dep {} - -impl Deref for Dep { - type Target = Arc; - - /// Dereference a dependency. The dependency must be ready or panics. - #[inline] - fn deref(&self) -> &Self::Target { - self.dep.get_or_init( - #[inline(never)] - || self.init(), - ) - } -} - -impl Dep { - #[inline] - fn init(&self) -> Arc { - let service = self - .service - .upgrade() - .expect("services map exists for dependency initialization."); - - require::(&service, self.name) - } -} - -impl<'a> Args<'a> { - /// Create a lazy-reference to a service when constructing another Service. - #[inline] - pub(crate) fn depend(&'a self, name: &'static str) -> Dep { - Dep:: { - dep: OnceLock::new(), - service: Arc::downgrade(self.service), - name, - } - } - - /// Create a reference immediately to a service when constructing another - /// Service. The other service must be constructed. - #[inline] - pub(crate) fn require(&'a self, name: &str) -> Arc { - require::(self.service, name) - } -} - -/// Reference a Service by name. Panics if the Service does not exist or was -/// incorrectly cast. -#[inline] -fn require(map: &Map, name: &str) -> Arc { - try_get::(map, name) - .inspect_err(inspect_log) - .expect("Failure to reference service required by another service.") -} - -/// Reference a Service by name. Returns None if the Service does not exist, but -/// panics if incorrectly cast. -/// -/// # Panics -/// Incorrect type is not a silent failure (None) as the type never has a reason -/// to be incorrect. -pub(crate) fn get(map: &Map, name: &str) -> Option> -where - T: Any + Send + Sync + Sized, -{ - map.read() - .expect("locked for reading") - .get(name) - .map(|(_, s)| { - s.upgrade().map(|s| { - s.downcast::() - .expect("Service must be correctly downcast.") - }) - })? -} - -/// Reference a Service by name. Returns Err if the Service does not exist or -/// was incorrectly cast. -pub(crate) fn try_get(map: &Map, name: &str) -> Result> -where - T: Any + Send + Sync + Sized, -{ - map.read() - .expect("locked for reading") - .get(name) - .map_or_else( - || Err!("Service {name:?} does not exist or has not been built yet."), - |(_, s)| { - s.upgrade().map_or_else( - || Err!("Service {name:?} no longer exists."), - |s| { - s.downcast::() - .map_err(|_| err!("Service {name:?} must be correctly downcast.")) - }, - ) - }, - ) -} - -/// Utility for service implementations; see Service::name() in the trait. -#[inline] -pub(crate) fn make_name(module_path: &str) -> &str { module_path.split_once_infallible("::").1 } diff --git a/src/service/services.rs b/src/service/services.rs deleted file mode 100644 index daece245..00000000 --- a/src/service/services.rs +++ /dev/null @@ -1,242 +0,0 @@ -use std::{ - any::Any, - collections::BTreeMap, - sync::{Arc, RwLock}, -}; - -use conduwuit::{Result, Server, debug, debug_info, info, trace, utils::stream::IterStream}; -use database::Database; -use futures::{Stream, StreamExt, TryStreamExt}; -use tokio::sync::Mutex; - -use crate::{ - account_data, admin, announcements, appservice, client, config, emergency, federation, - globals, key_backups, - manager::Manager, - media, moderation, presence, pusher, resolver, rooms, sending, server_keys, service, - service::{Args, Map, Service}, - sync, transaction_ids, uiaa, users, -}; - -pub struct Services { - pub account_data: Arc, - pub admin: Arc, - pub appservice: Arc, - pub config: Arc, - pub client: Arc, - pub emergency: Arc, - pub globals: Arc, - pub key_backups: Arc, - pub media: Arc, - pub presence: Arc, - pub pusher: Arc, - pub resolver: Arc, - pub rooms: rooms::Service, - pub federation: Arc, - pub sending: Arc, - pub server_keys: Arc, - pub sync: Arc, - pub transaction_ids: Arc, - pub uiaa: Arc, - pub users: Arc, - pub moderation: Arc, - pub announcements: Arc, - - manager: Mutex>>, - pub(crate) service: Arc, - pub server: Arc, - pub db: Arc, -} - -impl Services { - #[allow(clippy::cognitive_complexity)] - pub async fn build(server: Arc) -> Result> { - let db = Database::open(&server).await?; - let service: Arc = Arc::new(RwLock::new(BTreeMap::new())); - macro_rules! build { - ($tyname:ty) => {{ - let built = <$tyname>::build(Args { - db: &db, - server: &server, - service: &service, - })?; - add_service(&service, built.clone(), built.clone()); - built - }}; - } - - Ok(Arc::new(Self { - account_data: build!(account_data::Service), - admin: build!(admin::Service), - appservice: build!(appservice::Service), - resolver: build!(resolver::Service), - client: build!(client::Service), - config: build!(config::Service), - emergency: build!(emergency::Service), - globals: build!(globals::Service), - key_backups: build!(key_backups::Service), - media: build!(media::Service), - presence: build!(presence::Service), - pusher: build!(pusher::Service), - rooms: rooms::Service { - alias: build!(rooms::alias::Service), - auth_chain: build!(rooms::auth_chain::Service), - directory: build!(rooms::directory::Service), - event_handler: build!(rooms::event_handler::Service), - lazy_loading: build!(rooms::lazy_loading::Service), - metadata: build!(rooms::metadata::Service), - outlier: build!(rooms::outlier::Service), - pdu_metadata: build!(rooms::pdu_metadata::Service), - read_receipt: build!(rooms::read_receipt::Service), - search: build!(rooms::search::Service), - short: build!(rooms::short::Service), - spaces: build!(rooms::spaces::Service), - state: build!(rooms::state::Service), - state_accessor: build!(rooms::state_accessor::Service), - state_cache: build!(rooms::state_cache::Service), - state_compressor: build!(rooms::state_compressor::Service), - threads: build!(rooms::threads::Service), - timeline: build!(rooms::timeline::Service), - typing: build!(rooms::typing::Service), - user: build!(rooms::user::Service), - }, - federation: build!(federation::Service), - sending: build!(sending::Service), - server_keys: build!(server_keys::Service), - sync: build!(sync::Service), - transaction_ids: build!(transaction_ids::Service), - uiaa: build!(uiaa::Service), - users: build!(users::Service), - moderation: build!(moderation::Service), - announcements: build!(announcements::Service), - - manager: Mutex::new(None), - service, - server, - db, - })) - } - - pub async fn start(self: &Arc) -> Result> { - debug_info!("Starting services..."); - - self.admin.set_services(Some(Arc::clone(self)).as_ref()); - super::migrations::migrations(self).await?; - self.manager - .lock() - .await - .insert(Manager::new(self)) - .clone() - .start() - .await?; - - // reset dormant online/away statuses to offline, and set the server user as - // online - if self.server.config.allow_local_presence && !self.db.is_read_only() { - self.presence.unset_all_presence().await; - _ = self - .presence - .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Online) - .await; - } - - debug_info!("Services startup complete."); - Ok(Arc::clone(self)) - } - - pub async fn stop(&self) { - info!("Shutting down services..."); - - // set the server user as offline - if self.server.config.allow_local_presence && !self.db.is_read_only() { - _ = self - .presence - .ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Offline) - .await; - } - - self.interrupt(); - if let Some(manager) = self.manager.lock().await.as_ref() { - manager.stop().await; - } - - self.admin.set_services(None); - - debug_info!("Services shutdown complete."); - } - - pub async fn poll(&self) -> Result<()> { - if let Some(manager) = self.manager.lock().await.as_ref() { - return manager.poll().await; - } - - Ok(()) - } - - pub async fn clear_cache(&self) { - self.services() - .for_each(|service| async move { - service.clear_cache().await; - }) - .await; - } - - pub async fn memory_usage(&self) -> Result { - self.services() - .map(Ok) - .try_fold(String::new(), |mut out, service| async move { - service.memory_usage(&mut out).await?; - Ok(out) - }) - .await - } - - fn interrupt(&self) { - debug!("Interrupting services..."); - for (name, (service, ..)) in self.service.read().expect("locked for reading").iter() { - if let Some(service) = service.upgrade() { - trace!("Interrupting {name}"); - service.interrupt(); - } - } - } - - /// Iterate from snapshot of the services map - fn services(&self) -> impl Stream> + Send { - self.service - .read() - .expect("locked for reading") - .values() - .filter_map(|val| val.0.upgrade()) - .collect::>() - .into_iter() - .stream() - } - - #[inline] - pub fn try_get(&self, name: &str) -> Result> - where - T: Any + Send + Sync + Sized, - { - service::try_get::(&self.service, name) - } - - #[inline] - pub fn get(&self, name: &str) -> Option> - where - T: Any + Send + Sync + Sized, - { - service::get::(&self.service, name) - } -} - -#[allow(clippy::needless_pass_by_value)] -fn add_service(map: &Arc, s: Arc, a: Arc) { - let name = s.name(); - let len = map.read().expect("locked for reading").len(); - - trace!("built service #{len}: {name:?}"); - map.write() - .expect("locked for writing") - .insert(name.to_owned(), (Arc::downgrade(&s), Arc::downgrade(&a))); -} diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs deleted file mode 100644 index b095d2c1..00000000 --- a/src/service/sync/mod.rs +++ /dev/null @@ -1,464 +0,0 @@ -mod watch; - -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::{Arc, Mutex, Mutex as StdMutex}, -}; - -use conduwuit::{Result, Server}; -use database::Map; -use ruma::{ - OwnedDeviceId, OwnedRoomId, OwnedUserId, - api::client::sync::sync_events::{ - self, - v4::{ExtensionsConfig, SyncRequestList}, - v5, - }, -}; - -use crate::{Dep, rooms}; - -pub struct Service { - db: Data, - services: Services, - connections: DbConnections, - snake_connections: DbConnections, -} - -pub struct Data { - todeviceid_events: Arc, - userroomid_joined: Arc, - userroomid_invitestate: Arc, - userroomid_leftstate: Arc, - userroomid_notificationcount: Arc, - userroomid_highlightcount: Arc, - pduid_pdu: Arc, - keychangeid_userid: Arc, - roomusertype_roomuserdataid: Arc, - readreceiptid_readreceipt: Arc, - userid_lastonetimekeyupdate: Arc, -} - -struct Services { - server: Arc, - short: Dep, - state_cache: Dep, - typing: Dep, -} - -struct SlidingSyncCache { - lists: BTreeMap, - subscriptions: BTreeMap, - // For every room, the roomsince number - known_rooms: BTreeMap>, - extensions: ExtensionsConfig, -} - -#[derive(Default)] -struct SnakeSyncCache { - lists: BTreeMap, - subscriptions: BTreeMap, - known_rooms: BTreeMap>, - extensions: v5::request::Extensions, -} - -type DbConnections = Mutex>; -type DbConnectionsKey = (OwnedUserId, OwnedDeviceId, String); -type DbConnectionsVal = Arc>; -type SnakeConnectionsKey = (OwnedUserId, OwnedDeviceId, Option); -type SnakeConnectionsVal = Arc>; - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - todeviceid_events: args.db["todeviceid_events"].clone(), - userroomid_joined: args.db["userroomid_joined"].clone(), - userroomid_invitestate: args.db["userroomid_invitestate"].clone(), - userroomid_leftstate: args.db["userroomid_leftstate"].clone(), - userroomid_notificationcount: args.db["userroomid_notificationcount"].clone(), - userroomid_highlightcount: args.db["userroomid_highlightcount"].clone(), - pduid_pdu: args.db["pduid_pdu"].clone(), - keychangeid_userid: args.db["keychangeid_userid"].clone(), - roomusertype_roomuserdataid: args.db["roomusertype_roomuserdataid"].clone(), - readreceiptid_readreceipt: args.db["readreceiptid_readreceipt"].clone(), - userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), - }, - services: Services { - server: args.server.clone(), - short: args.depend::("rooms::short"), - state_cache: args.depend::("rooms::state_cache"), - typing: args.depend::("rooms::typing"), - }, - connections: StdMutex::new(BTreeMap::new()), - snake_connections: StdMutex::new(BTreeMap::new()), - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - pub fn snake_connection_cached(&self, key: &SnakeConnectionsKey) -> bool { - self.snake_connections - .lock() - .expect("locked") - .contains_key(key) - } - - pub fn forget_snake_sync_connection(&self, key: &SnakeConnectionsKey) { - self.snake_connections.lock().expect("locked").remove(key); - } - - pub fn remembered(&self, key: &DbConnectionsKey) -> bool { - self.connections.lock().expect("locked").contains_key(key) - } - - pub fn forget_sync_request_connection(&self, key: &DbConnectionsKey) { - self.connections.lock().expect("locked").remove(key); - } - - pub fn update_snake_sync_request_with_cache( - &self, - snake_key: &SnakeConnectionsKey, - request: &mut v5::Request, - ) -> BTreeMap> { - let mut cache = self.snake_connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry(snake_key.clone()) - .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), - ); - let cached = &mut cached.lock().expect("locked"); - drop(cache); - - //v5::Request::try_from_http_request(req, path_args); - for (list_id, list) in &mut request.lists { - if let Some(cached_list) = cached.lists.get(list_id) { - list_or_sticky( - &mut list.room_details.required_state, - &cached_list.room_details.required_state, - ); - some_or_sticky(&mut list.include_heroes, cached_list.include_heroes); - - match (&mut list.filters, cached_list.filters.clone()) { - | (Some(filters), Some(cached_filters)) => { - some_or_sticky(&mut filters.is_invite, cached_filters.is_invite); - // TODO (morguldir): Find out how a client can unset this, probably need - // to change into an option inside ruma - list_or_sticky( - &mut filters.not_room_types, - &cached_filters.not_room_types, - ); - }, - | (_, Some(cached_filters)) => list.filters = Some(cached_filters), - | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), - | (..) => {}, - } - } - cached.lists.insert(list_id.clone(), list.clone()); - } - - cached - .subscriptions - .extend(request.room_subscriptions.clone()); - request - .room_subscriptions - .extend(cached.subscriptions.clone()); - - request.extensions.e2ee.enabled = request - .extensions - .e2ee - .enabled - .or(cached.extensions.e2ee.enabled); - - request.extensions.to_device.enabled = request - .extensions - .to_device - .enabled - .or(cached.extensions.to_device.enabled); - - request.extensions.account_data.enabled = request - .extensions - .account_data - .enabled - .or(cached.extensions.account_data.enabled); - request.extensions.account_data.lists = request - .extensions - .account_data - .lists - .clone() - .or_else(|| cached.extensions.account_data.lists.clone()); - request.extensions.account_data.rooms = request - .extensions - .account_data - .rooms - .clone() - .or_else(|| cached.extensions.account_data.rooms.clone()); - - some_or_sticky(&mut request.extensions.typing.enabled, cached.extensions.typing.enabled); - some_or_sticky( - &mut request.extensions.typing.rooms, - cached.extensions.typing.rooms.clone(), - ); - some_or_sticky( - &mut request.extensions.typing.lists, - cached.extensions.typing.lists.clone(), - ); - some_or_sticky( - &mut request.extensions.receipts.enabled, - cached.extensions.receipts.enabled, - ); - some_or_sticky( - &mut request.extensions.receipts.rooms, - cached.extensions.receipts.rooms.clone(), - ); - some_or_sticky( - &mut request.extensions.receipts.lists, - cached.extensions.receipts.lists.clone(), - ); - - cached.extensions = request.extensions.clone(); - cached.known_rooms.clone() - } - - pub fn update_sync_request_with_cache( - &self, - key: &SnakeConnectionsKey, - request: &mut sync_events::v4::Request, - ) -> BTreeMap> { - let Some(conn_id) = request.conn_id.clone() else { - return BTreeMap::new(); - }; - - let key = into_db_key(key.0.clone(), key.1.clone(), conn_id); - let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry(key).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - })); - let cached = &mut cached.lock().expect("locked"); - drop(cache); - - for (list_id, list) in &mut request.lists { - if let Some(cached_list) = cached.lists.get(list_id) { - list_or_sticky(&mut list.sort, &cached_list.sort); - list_or_sticky( - &mut list.room_details.required_state, - &cached_list.room_details.required_state, - ); - some_or_sticky( - &mut list.room_details.timeline_limit, - cached_list.room_details.timeline_limit, - ); - some_or_sticky( - &mut list.include_old_rooms, - cached_list.include_old_rooms.clone(), - ); - match (&mut list.filters, cached_list.filters.clone()) { - | (Some(filter), Some(cached_filter)) => { - some_or_sticky(&mut filter.is_dm, cached_filter.is_dm); - list_or_sticky(&mut filter.spaces, &cached_filter.spaces); - some_or_sticky(&mut filter.is_encrypted, cached_filter.is_encrypted); - some_or_sticky(&mut filter.is_invite, cached_filter.is_invite); - list_or_sticky(&mut filter.room_types, &cached_filter.room_types); - // Should be made possible to change - list_or_sticky(&mut filter.not_room_types, &cached_filter.not_room_types); - some_or_sticky(&mut filter.room_name_like, cached_filter.room_name_like); - list_or_sticky(&mut filter.tags, &cached_filter.tags); - list_or_sticky(&mut filter.not_tags, &cached_filter.not_tags); - }, - | (_, Some(cached_filters)) => list.filters = Some(cached_filters), - | (Some(list_filters), _) => list.filters = Some(list_filters.clone()), - | (..) => {}, - } - list_or_sticky(&mut list.bump_event_types, &cached_list.bump_event_types); - } - cached.lists.insert(list_id.clone(), list.clone()); - } - - cached - .subscriptions - .extend(request.room_subscriptions.clone()); - request - .room_subscriptions - .extend(cached.subscriptions.clone()); - - request.extensions.e2ee.enabled = request - .extensions - .e2ee - .enabled - .or(cached.extensions.e2ee.enabled); - - request.extensions.to_device.enabled = request - .extensions - .to_device - .enabled - .or(cached.extensions.to_device.enabled); - - request.extensions.account_data.enabled = request - .extensions - .account_data - .enabled - .or(cached.extensions.account_data.enabled); - request.extensions.account_data.lists = request - .extensions - .account_data - .lists - .clone() - .or_else(|| cached.extensions.account_data.lists.clone()); - request.extensions.account_data.rooms = request - .extensions - .account_data - .rooms - .clone() - .or_else(|| cached.extensions.account_data.rooms.clone()); - - cached.extensions = request.extensions.clone(); - - cached.known_rooms.clone() - } - - pub fn update_sync_subscriptions( - &self, - key: &DbConnectionsKey, - subscriptions: BTreeMap, - ) { - let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - })); - let cached = &mut cached.lock().expect("locked"); - drop(cache); - - cached.subscriptions = subscriptions; - } - - pub fn update_sync_known_rooms( - &self, - key: &DbConnectionsKey, - list_id: String, - new_cached_rooms: BTreeSet, - globalsince: u64, - ) { - let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - })); - let cached = &mut cached.lock().expect("locked"); - drop(cache); - - for (room_id, lastsince) in cached - .known_rooms - .entry(list_id.clone()) - .or_default() - .iter_mut() - { - if !new_cached_rooms.contains(room_id) { - *lastsince = 0; - } - } - let list = cached.known_rooms.entry(list_id).or_default(); - for room_id in new_cached_rooms { - list.insert(room_id, globalsince); - } - } - - pub fn update_snake_sync_known_rooms( - &self, - key: &SnakeConnectionsKey, - list_id: String, - new_cached_rooms: BTreeSet, - globalsince: u64, - ) { - assert!(key.2.is_some(), "Some(conn_id) required for this call"); - let mut cache = self.snake_connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry(key.clone()) - .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), - ); - let cached = &mut cached.lock().expect("locked"); - drop(cache); - - for (room_id, lastsince) in cached - .known_rooms - .entry(list_id.clone()) - .or_default() - .iter_mut() - { - if !new_cached_rooms.contains(room_id) { - *lastsince = 0; - } - } - let list = cached.known_rooms.entry(list_id).or_default(); - for room_id in new_cached_rooms { - list.insert(room_id, globalsince); - } - } - - pub fn update_snake_sync_subscriptions( - &self, - key: &SnakeConnectionsKey, - subscriptions: BTreeMap, - ) { - let mut cache = self.snake_connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry(key.clone()) - .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), - ); - let cached = &mut cached.lock().expect("locked"); - drop(cache); - - cached.subscriptions = subscriptions; - } -} - -#[inline] -pub fn into_snake_key(user_id: U, device_id: D, conn_id: C) -> SnakeConnectionsKey -where - U: Into, - D: Into, - C: Into>, -{ - (user_id.into(), device_id.into(), conn_id.into()) -} - -#[inline] -pub fn into_db_key(user_id: U, device_id: D, conn_id: C) -> DbConnectionsKey -where - U: Into, - D: Into, - C: Into, -{ - (user_id.into(), device_id.into(), conn_id.into()) -} - -/// load params from cache if body doesn't contain it, as long as it's allowed -/// in some cases we may need to allow an empty list as an actual value -fn list_or_sticky(target: &mut Vec, cached: &Vec) { - if target.is_empty() { - target.clone_from(cached); - } -} - -fn some_or_sticky(target: &mut Option, cached: Option) { - if target.is_none() { - *target = cached; - } -} diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs deleted file mode 100644 index 96981472..00000000 --- a/src/service/sync/watch.rs +++ /dev/null @@ -1,112 +0,0 @@ -use conduwuit::{Result, implement, trace}; -use futures::{FutureExt, StreamExt, pin_mut, stream::FuturesUnordered}; -use ruma::{DeviceId, UserId}; - -#[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result { - let userid_bytes = user_id.as_bytes().to_vec(); - let mut userid_prefix = userid_bytes.clone(); - userid_prefix.push(0xFF); - - let mut userdeviceid_prefix = userid_prefix.clone(); - userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); - userdeviceid_prefix.push(0xFF); - - let mut futures = FuturesUnordered::new(); - - // Return when *any* user changed their key - // TODO: only send for user they share a room with - futures.push(self.db.todeviceid_events.watch_prefix(&userdeviceid_prefix)); - - futures.push(self.db.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push(self.db.userroomid_invitestate.watch_prefix(&userid_prefix)); - futures.push(self.db.userroomid_leftstate.watch_prefix(&userid_prefix)); - futures.push( - self.db - .userroomid_notificationcount - .watch_prefix(&userid_prefix), - ); - futures.push( - self.db - .userroomid_highlightcount - .watch_prefix(&userid_prefix), - ); - - // Events for rooms we are in - let rooms_joined = self.services.state_cache.rooms_joined(user_id); - - pin_mut!(rooms_joined); - while let Some(room_id) = rooms_joined.next().await { - let Ok(short_roomid) = self.services.short.get_shortroomid(room_id).await else { - continue; - }; - - let roomid_bytes = room_id.as_bytes().to_vec(); - let mut roomid_prefix = roomid_bytes.clone(); - roomid_prefix.push(0xFF); - - // Key changes - futures.push(self.db.keychangeid_userid.watch_prefix(&roomid_prefix)); - - // Room account data - let mut roomuser_prefix = roomid_prefix.clone(); - roomuser_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.db - .roomusertype_roomuserdataid - .watch_prefix(&roomuser_prefix), - ); - - // PDUs - let short_roomid = short_roomid.to_be_bytes().to_vec(); - futures.push(self.db.pduid_pdu.watch_prefix(&short_roomid)); - - // EDUs - let typing_room_id = room_id.to_owned(); - let typing_wait_for_update = async move { - self.services.typing.wait_for_update(&typing_room_id).await; - }; - - futures.push(typing_wait_for_update.boxed()); - futures.push( - self.db - .readreceiptid_readreceipt - .watch_prefix(&roomid_prefix), - ); - } - - let mut globaluserdata_prefix = vec![0xFF]; - globaluserdata_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.db - .roomusertype_roomuserdataid - .watch_prefix(&globaluserdata_prefix), - ); - - // More key changes (used when user is not joined to any rooms) - futures.push(self.db.keychangeid_userid.watch_prefix(&userid_prefix)); - - // One time keys - futures.push( - self.db - .userid_lastonetimekeyupdate - .watch_prefix(&userid_bytes), - ); - - // Server shutdown - futures.push(self.services.server.until_shutdown().boxed()); - - if !self.services.server.running() { - return Ok(()); - } - - // Wait until one of them finds something - trace!(futures = futures.len(), "watch started"); - futures.next().await; - trace!(futures = futures.len(), "watch finished"); - - Ok(()) -} diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs new file mode 100644 index 00000000..2aed1981 --- /dev/null +++ b/src/service/transaction_ids/data.rs @@ -0,0 +1,13 @@ +use ruma::{DeviceId, TransactionId, UserId}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn add_txnid( + &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, data: &[u8], + ) -> Result<()>; + + fn existing_txnid( + &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, + ) -> Result>>; +} diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 9c284b70..bc55861a 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,54 +1,24 @@ -use std::sync::Arc; +mod data; -use conduwuit::{Result, implement}; -use database::{Handle, Map}; +pub use data::Data; use ruma::{DeviceId, TransactionId, UserId}; +use crate::Result; + pub struct Service { - db: Data, + pub db: &'static dyn Data, } -struct Data { - userdevicetxnid_response: Arc, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - db: Data { - userdevicetxnid_response: args.db["userdevicetxnid_response"].clone(), - }, - })) +impl Service { + pub fn add_txnid( + &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, data: &[u8], + ) -> Result<()> { + self.db.add_txnid(user_id, device_id, txn_id, data) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -#[implement(Service)] -pub fn add_txnid( - &self, - user_id: &UserId, - device_id: Option<&DeviceId>, - txn_id: &TransactionId, - data: &[u8], -) { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default()); - key.push(0xFF); - key.extend_from_slice(txn_id.as_bytes()); - - self.db.userdevicetxnid_response.insert(&key, data); -} - -// If there's no entry, this is a new transaction -#[implement(Service)] -pub async fn existing_txnid( - &self, - user_id: &UserId, - device_id: Option<&DeviceId>, - txn_id: &TransactionId, -) -> Result> { - let key = (user_id, device_id, txn_id); - self.db.userdevicetxnid_response.qry(&key).await + pub fn existing_txnid( + &self, user_id: &UserId, device_id: Option<&DeviceId>, txn_id: &TransactionId, + ) -> Result>> { + self.db.existing_txnid(user_id, device_id, txn_id) + } } diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs new file mode 100644 index 00000000..3a157068 --- /dev/null +++ b/src/service/uiaa/data.rs @@ -0,0 +1,17 @@ +use ruma::{api::client::uiaa::UiaaInfo, CanonicalJsonValue, DeviceId, UserId}; + +use crate::Result; + +pub trait Data: Send + Sync { + fn set_uiaa_request( + &self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue, + ) -> Result<()>; + + fn get_uiaa_request(&self, user_id: &UserId, device_id: &DeviceId, session: &str) -> Option; + + fn update_uiaa_session( + &self, user_id: &UserId, device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>, + ) -> Result<()>; + + fn get_uiaa_session(&self, user_id: &UserId, device_id: &DeviceId, session: &str) -> Result; +} diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 7803c736..695aa963 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,300 +1,143 @@ -use std::{ - collections::{BTreeMap, HashSet}, - sync::{Arc, RwLock}, -}; +mod data; -use conduwuit::{ - Err, Error, Result, err, error, implement, utils, - utils::{hash, string::EMPTY}, -}; -use database::{Deserialized, Json, Map}; +use argon2::{PasswordHash, PasswordVerifier}; +pub use data::Data; use ruma::{ - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, api::client::{ error::ErrorKind, uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, + CanonicalJsonValue, DeviceId, UserId, }; +use tracing::error; -use crate::{Dep, config, globals, users}; +use crate::{api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result}; pub struct Service { - userdevicesessionid_uiaarequest: RwLock, - db: Data, - services: Services, + pub db: &'static dyn Data, } -struct Services { - globals: Dep, - users: Dep, - config: Dep, -} - -struct Data { - userdevicesessionid_uiaainfo: Arc, -} - -type RequestMap = BTreeMap; -type RequestKey = (OwnedUserId, OwnedDeviceId, String); - -pub const SESSION_ID_LENGTH: usize = 32; - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - userdevicesessionid_uiaarequest: RwLock::new(RequestMap::new()), - db: Data { - userdevicesessionid_uiaainfo: args.db["userdevicesessionid_uiaainfo"].clone(), - }, - services: Services { - globals: args.depend::("globals"), - users: args.depend::("users"), - config: args.depend::("config"), - }, - })) +impl Service { + /// Creates a new Uiaa session. Make sure the session token is unique. + pub fn create( + &self, user_id: &UserId, device_id: &DeviceId, uiaainfo: &UiaaInfo, json_body: &CanonicalJsonValue, + ) -> Result<()> { + self.db.set_uiaa_request( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), /* TODO: better session error handling (why + * is it optional in ruma?) */ + json_body, + )?; + self.db.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), + Some(uiaainfo), + ) } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} + pub fn try_auth( + &self, user_id: &UserId, device_id: &DeviceId, auth: &AuthData, uiaainfo: &UiaaInfo, + ) -> Result<(bool, UiaaInfo)> { + let mut uiaainfo = auth.session().map_or_else( + || Ok(uiaainfo.clone()), + |session| self.db.get_uiaa_session(user_id, device_id, session), + )?; -#[implement(Service)] -pub async fn read_tokens(&self) -> Result> { - let mut tokens = HashSet::new(); - if let Some(file) = &self.services.config.registration_token_file.as_ref() { - match std::fs::read_to_string(file) { - | Ok(text) => { - text.split_ascii_whitespace().for_each(|token| { - tokens.insert(token.to_owned()); - }); - }, - | Err(e) => error!("Failed to read the registration token file: {e}"), + if uiaainfo.session.is_none() { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); } - } - if let Some(token) = &self.services.config.registration_token { - tokens.insert(token.to_owned()); - } - Ok(tokens) -} + match auth { + // Find out what the user completed + AuthData::Password(Password { + identifier, + password, + .. + }) => { + let UserIdentifier::UserIdOrLocalpart(username) = identifier else { + return Err(Error::BadRequest(ErrorKind::Unrecognized, "Identifier type not recognized.")); + }; -/// Creates a new Uiaa session. Make sure the session token is unique. -#[implement(Service)] -pub fn create( - &self, - user_id: &UserId, - device_id: &DeviceId, - uiaainfo: &UiaaInfo, - json_body: &CanonicalJsonValue, -) { - // TODO: better session error handling (why is uiaainfo.session optional in - // ruma?) - self.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - json_body, - ); + let user_id = UserId::parse_with_server_name(username.clone(), services().globals.server_name()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - Some(uiaainfo), - ); -} + // Check if password is correct + if let Some(hash) = services().users.password_hash(&user_id)? { + let hash_matches = services() + .globals + .argon + .verify_password( + password.as_bytes(), + &PasswordHash::new(&hash).expect("valid hash in database"), + ) + .is_ok(); -#[implement(Service)] -pub async fn try_auth( - &self, - user_id: &UserId, - device_id: &DeviceId, - auth: &AuthData, - uiaainfo: &UiaaInfo, -) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = if let Some(session) = auth.session() { - self.get_uiaa_session(user_id, device_id, session).await? - } else { - uiaainfo.clone() - }; + if !hash_matches { + uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { + kind: ErrorKind::forbidden(), + message: "Invalid username or password.".to_owned(), + }); + return Ok((false, uiaainfo)); + } + } - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } - - match auth { - // Find out what the user completed - | AuthData::Password(Password { - identifier, - password, - #[cfg(feature = "element_hacks")] - user, - .. - }) => { - #[cfg(feature = "element_hacks")] - let username = if let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier { - username - } else if let Some(username) = user { - username - } else { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )); - }; - - #[cfg(not(feature = "element_hacks"))] - let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier else { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )); - }; - - let user_id_from_username = UserId::parse_with_server_name( - username.clone(), - self.services.globals.server_name(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; - - // Check if the access token being used matches the credentials used for UIAA - if user_id.localpart() != user_id_from_username.localpart() { - return Err!(Request(Forbidden("User ID and access token mismatch."))); - } - let user_id = user_id_from_username; - - // Check if password is correct - if let Ok(hash) = self.services.users.password_hash(&user_id).await { - let hash_matches = hash::verify_password(password, &hash).is_ok(); - if !hash_matches { + // Password was correct! Let's add it to `completed` + uiaainfo.completed.push(AuthType::Password); + }, + AuthData::RegistrationToken(t) => { + if Some(t.token.trim()) == services().globals.config.registration_token.as_deref() { + uiaainfo.completed.push(AuthType::RegistrationToken); + } else { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { kind: ErrorKind::forbidden(), - message: "Invalid username or password.".to_owned(), + message: "Invalid registration token.".to_owned(), }); return Ok((false, uiaainfo)); } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push(AuthType::Password); - }, - | AuthData::RegistrationToken(t) => { - let tokens = self.read_tokens().await?; - if tokens.contains(t.token.trim()) { - uiaainfo.completed.push(AuthType::RegistrationToken); - } else { - uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { - kind: ErrorKind::forbidden(), - message: "Invalid registration token.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - }, - | AuthData::Dummy(_) => { - uiaainfo.completed.push(AuthType::Dummy); - }, - | k => error!("type not supported: {:?}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; - } + }, + AuthData::Dummy(_) => { + uiaainfo.completed.push(AuthType::Dummy); + }, + k => error!("type not supported: {:?}", k), } - // We didn't break, so this flow succeeded! - completed = true; - } - if !completed { - self.update_uiaa_session( + // Check if a flow now succeeds + let mut completed = false; + 'flows: for flow in &mut uiaainfo.flows { + for stage in &flow.stages { + if !uiaainfo.completed.contains(stage) { + continue 'flows; + } + } + // We didn't break, so this flow succeeded! + completed = true; + } + + if !completed { + self.db.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session is always set"), + Some(&uiaainfo), + )?; + return Ok((false, uiaainfo)); + } + + // UIAA was successful! Remove this session and return true + self.db.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - ); - - return Ok((false, uiaainfo)); + None, + )?; + Ok((true, uiaainfo)) } - // UIAA was successful! Remove this session and return true - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - None, - ); - - Ok((true, uiaainfo)) -} - -#[implement(Service)] -fn set_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - request: &CanonicalJsonValue, -) { - let key = (user_id.to_owned(), device_id.to_owned(), session.to_owned()); - self.userdevicesessionid_uiaarequest - .write() - .expect("locked for writing") - .insert(key, request.to_owned()); -} - -#[implement(Service)] -pub fn get_uiaa_request( - &self, - user_id: &UserId, - device_id: Option<&DeviceId>, - session: &str, -) -> Option { - let key = ( - user_id.to_owned(), - device_id.unwrap_or_else(|| EMPTY.into()).to_owned(), - session.to_owned(), - ); - - self.userdevicesessionid_uiaarequest - .read() - .expect("locked for reading") - .get(&key) - .cloned() -} - -#[implement(Service)] -fn update_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - uiaainfo: Option<&UiaaInfo>, -) { - let key = (user_id, device_id, session); - - if let Some(uiaainfo) = uiaainfo { - self.db - .userdevicesessionid_uiaainfo - .put(key, Json(uiaainfo)); - } else { - self.db.userdevicesessionid_uiaainfo.del(key); + pub fn get_uiaa_request( + &self, user_id: &UserId, device_id: &DeviceId, session: &str, + ) -> Option { + self.db.get_uiaa_request(user_id, device_id, session) } } - -#[implement(Service)] -async fn get_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, -) -> Result { - let key = (user_id, device_id, session); - self.db - .userdevicesessionid_uiaainfo - .qry(&key) - .await - .deserialized() - .map_err(|_| err!(Request(Forbidden("UIAA session does not exist.")))) -} diff --git a/src/service/users/data.rs b/src/service/users/data.rs new file mode 100644 index 00000000..04074e85 --- /dev/null +++ b/src/service/users/data.rs @@ -0,0 +1,146 @@ +use std::collections::BTreeMap; + +use ruma::{ + api::client::{device::Device, filter::FilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId, +}; + +use crate::Result; + +pub trait Data: Send + Sync { + /// Check if a user has an account on this homeserver. + fn exists(&self, user_id: &UserId) -> Result; + + /// Check if account is deactivated + fn is_deactivated(&self, user_id: &UserId) -> Result; + + /// Returns the number of users registered on this server. + fn count(&self) -> Result; + + /// Find out which user an access token belongs to. + fn find_from_token(&self, token: &str) -> Result>; + + /// Returns an iterator over all users on this homeserver. + fn iter<'a>(&'a self) -> Box> + 'a>; + + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password is + /// greater then zero. + fn list_local_users(&self) -> Result>; + + /// Returns the password hash for the given user. + fn password_hash(&self, user_id: &UserId) -> Result>; + + /// Hash and set the user's password to the Argon2 hash + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; + + /// Returns the displayname of a user on this homeserver. + fn displayname(&self, user_id: &UserId) -> Result>; + + /// Sets a new displayname or removes it if displayname is None. You still + /// need to nofify all rooms of this change. + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; + + /// Get the avatar_url of a user. + fn avatar_url(&self, user_id: &UserId) -> Result>; + + /// Sets a new avatar_url or removes it if avatar_url is None. + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()>; + + /// Get the blurhash of a user. + fn blurhash(&self, user_id: &UserId) -> Result>; + + /// Sets a new avatar_url or removes it if avatar_url is None. + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; + + /// Adds a new device to a user. + fn create_device( + &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, + ) -> Result<()>; + + /// Removes a device from a user. + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + + /// Returns an iterator over all device ids of this user. + fn all_device_ids<'a>(&'a self, user_id: &UserId) -> Box> + 'a>; + + /// Replaces the access token of one device. + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; + + fn add_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, + one_time_key_value: &Raw, + ) -> Result<()>; + + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; + + fn take_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, + ) -> Result)>>; + + fn count_one_time_keys(&self, user_id: &UserId, device_id: &DeviceId) + -> Result>; + + fn add_device_keys(&self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw) -> Result<()>; + + fn add_cross_signing_keys( + &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, + user_signing_key: &Option>, notify: bool, + ) -> Result<()>; + + fn sign_key(&self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId) + -> Result<()>; + + fn keys_changed<'a>( + &'a self, user_or_room_id: &str, from: u64, to: Option, + ) -> Box> + 'a>; + + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; + + fn get_device_keys(&self, user_id: &UserId, device_id: &DeviceId) -> Result>>; + + fn parse_master_key( + &self, user_id: &UserId, master_key: &Raw, + ) -> Result<(Vec, CrossSigningKey)>; + + fn get_key( + &self, key: &[u8], sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>>; + + fn get_master_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>>; + + fn get_self_signing_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>>; + + fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; + + fn add_to_device_event( + &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, + content: serde_json::Value, + ) -> Result<()>; + + fn get_to_device_events(&self, user_id: &UserId, device_id: &DeviceId) -> Result>>; + + fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) -> Result<()>; + + fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()>; + + /// Get device metadata. + fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) -> Result>; + + fn get_devicelist_version(&self, user_id: &UserId) -> Result>; + + fn all_devices_metadata<'a>(&'a self, user_id: &UserId) -> Box> + 'a>; + + /// Creates a new sync filter. Returns the filter id. + fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result; + + fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result>; +} diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 701561a8..b2fdeaf0 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,1103 +1,481 @@ -use std::{collections::BTreeMap, mem, sync::Arc}; - -use conduwuit::{ - Err, Error, Result, Server, at, debug_warn, err, trace, - utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, +mod data; +use std::{ + collections::{BTreeMap, BTreeSet}, + mem, + sync::{Arc, Mutex}, }; -use database::{Deserialized, Ignore, Interfix, Json, Map}; -use futures::{Stream, StreamExt, TryFutureExt}; + +pub use data::Data; use ruma::{ - DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, - OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, - api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{ - AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, + api::client::{ + device::Device, + error::ErrorKind, + filter::FilterDefinition, + sync::sync_events::{ + self, + v4::{ExtensionsConfig, SyncRequestList}, + }, }, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, OwnedRoomId, OwnedUserId, + RoomAliasId, UInt, UserId, }; -use serde_json::json; -use crate::{Dep, account_data, admin, globals, rooms}; +use crate::{services, Error, Result}; + +pub struct SlidingSyncCache { + lists: BTreeMap, + subscriptions: BTreeMap, + known_rooms: BTreeMap>, // For every room, the roomsince number + extensions: ExtensionsConfig, +} + +type DbConnections = Mutex>>>; pub struct Service { - services: Services, - db: Data, -} - -struct Services { - server: Arc, - account_data: Dep, - admin: Dep, - globals: Dep, - state_accessor: Dep, - state_cache: Dep, -} - -struct Data { - keychangeid_userid: Arc, - keyid_key: Arc, - onetimekeyid_onetimekeys: Arc, - openidtoken_expiresatuserid: Arc, - logintoken_expiresatuserid: Arc, - todeviceid_events: Arc, - token_userdeviceid: Arc, - userdeviceid_metadata: Arc, - userdeviceid_token: Arc, - userfilterid_filter: Arc, - userid_avatarurl: Arc, - userid_blurhash: Arc, - userid_devicelistversion: Arc, - userid_displayname: Arc, - userid_lastonetimekeyupdate: Arc, - userid_masterkeyid: Arc, - userid_password: Arc, - userid_selfsigningkeyid: Arc, - userid_usersigningkeyid: Arc, - useridprofilekey_value: Arc, -} - -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - services: Services { - server: args.server.clone(), - account_data: args.depend::("account_data"), - admin: args.depend::("admin"), - globals: args.depend::("globals"), - state_accessor: args - .depend::("rooms::state_accessor"), - state_cache: args.depend::("rooms::state_cache"), - }, - db: Data { - keychangeid_userid: args.db["keychangeid_userid"].clone(), - keyid_key: args.db["keyid_key"].clone(), - onetimekeyid_onetimekeys: args.db["onetimekeyid_onetimekeys"].clone(), - openidtoken_expiresatuserid: args.db["openidtoken_expiresatuserid"].clone(), - logintoken_expiresatuserid: args.db["logintoken_expiresatuserid"].clone(), - todeviceid_events: args.db["todeviceid_events"].clone(), - token_userdeviceid: args.db["token_userdeviceid"].clone(), - userdeviceid_metadata: args.db["userdeviceid_metadata"].clone(), - userdeviceid_token: args.db["userdeviceid_token"].clone(), - userfilterid_filter: args.db["userfilterid_filter"].clone(), - userid_avatarurl: args.db["userid_avatarurl"].clone(), - userid_blurhash: args.db["userid_blurhash"].clone(), - userid_devicelistversion: args.db["userid_devicelistversion"].clone(), - userid_displayname: args.db["userid_displayname"].clone(), - userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), - userid_masterkeyid: args.db["userid_masterkeyid"].clone(), - userid_password: args.db["userid_password"].clone(), - userid_selfsigningkeyid: args.db["userid_selfsigningkeyid"].clone(), - userid_usersigningkeyid: args.db["userid_usersigningkeyid"].clone(), - useridprofilekey_value: args.db["useridprofilekey_value"].clone(), - }, - })) - } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + pub db: &'static dyn Data, + pub connections: DbConnections, } impl Service { - /// Returns true/false based on whether the recipient/receiving user has - /// blocked the sender - pub async fn user_is_ignored(&self, sender_user: &UserId, recipient_user: &UserId) -> bool { - self.services - .account_data - .get_global(recipient_user, GlobalAccountDataEventType::IgnoredUserList) - .await - .is_ok_and(|ignored: IgnoredUserListEvent| { - ignored - .content - .ignored_users - .keys() - .any(|blocked_user| blocked_user == sender_user) - }) - } - - /// Check if a user is an admin - #[inline] - pub async fn is_admin(&self, user_id: &UserId) -> bool { - self.services.admin.user_is_admin(user_id).await - } - - /// Create a new user account on this homeserver. - #[inline] - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password) - } - - /// Deactivate account - pub async fn deactivate_account(&self, user_id: &UserId) -> Result<()> { - // Remove all associated devices - self.all_device_ids(user_id) - .for_each(|device_id| self.remove_device(user_id, device_id)) - .await; - - // Set the password to "" to indicate a deactivated account. Hashes will never - // result in an empty string, so the user will not be able to log in again. - // Systems like changing the password without logging in should check if the - // account is deactivated. - self.set_password(user_id, None)?; - - // TODO: Unhook 3PID - Ok(()) - } - /// Check if a user has an account on this homeserver. - #[inline] - pub async fn exists(&self, user_id: &UserId) -> bool { - self.db.userid_password.get(user_id).await.is_ok() + pub fn exists(&self, user_id: &UserId) -> Result { self.db.exists(user_id) } + + pub fn forget_sync_request_connection(&self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String) { + self.connections + .lock() + .unwrap() + .remove(&(user_id, device_id, conn_id)); + } + + pub fn update_sync_request_with_cache( + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, request: &mut sync_events::v4::Request, + ) -> BTreeMap> { + let Some(conn_id) = request.conn_id.clone() else { + return BTreeMap::new(); + }; + + let mut cache = self.connections.lock().unwrap(); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + for (list_id, list) in &mut request.lists { + if let Some(cached_list) = cached.lists.get(list_id) { + if list.sort.is_empty() { + list.sort.clone_from(&cached_list.sort); + }; + if list.room_details.required_state.is_empty() { + list.room_details + .required_state + .clone_from(&cached_list.room_details.required_state); + }; + list.room_details.timeline_limit = list + .room_details + .timeline_limit + .or(cached_list.room_details.timeline_limit); + list.include_old_rooms = list + .include_old_rooms + .clone() + .or_else(|| cached_list.include_old_rooms.clone()); + match (&mut list.filters, cached_list.filters.clone()) { + (Some(list_filters), Some(cached_filters)) => { + list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); + if list_filters.spaces.is_empty() { + list_filters.spaces = cached_filters.spaces; + } + list_filters.is_encrypted = list_filters.is_encrypted.or(cached_filters.is_encrypted); + list_filters.is_invite = list_filters.is_invite.or(cached_filters.is_invite); + if list_filters.room_types.is_empty() { + list_filters.room_types = cached_filters.room_types; + } + if list_filters.not_room_types.is_empty() { + list_filters.not_room_types = cached_filters.not_room_types; + } + list_filters.room_name_like = list_filters + .room_name_like + .clone() + .or(cached_filters.room_name_like); + if list_filters.tags.is_empty() { + list_filters.tags = cached_filters.tags; + } + if list_filters.not_tags.is_empty() { + list_filters.not_tags = cached_filters.not_tags; + } + }, + (_, Some(cached_filters)) => list.filters = Some(cached_filters), + (Some(list_filters), _) => list.filters = Some(list_filters.clone()), + (..) => {}, + } + if list.bump_event_types.is_empty() { + list.bump_event_types + .clone_from(&cached_list.bump_event_types); + }; + } + cached.lists.insert(list_id.clone(), list.clone()); + } + + cached + .subscriptions + .extend(request.room_subscriptions.clone()); + request + .room_subscriptions + .extend(cached.subscriptions.clone()); + + request.extensions.e2ee.enabled = request + .extensions + .e2ee + .enabled + .or(cached.extensions.e2ee.enabled); + + request.extensions.to_device.enabled = request + .extensions + .to_device + .enabled + .or(cached.extensions.to_device.enabled); + + request.extensions.account_data.enabled = request + .extensions + .account_data + .enabled + .or(cached.extensions.account_data.enabled); + request.extensions.account_data.lists = request + .extensions + .account_data + .lists + .clone() + .or_else(|| cached.extensions.account_data.lists.clone()); + request.extensions.account_data.rooms = request + .extensions + .account_data + .rooms + .clone() + .or_else(|| cached.extensions.account_data.rooms.clone()); + + cached.extensions = request.extensions.clone(); + + cached.known_rooms.clone() + } + + pub fn update_sync_subscriptions( + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, + subscriptions: BTreeMap, + ) { + let mut cache = self.connections.lock().unwrap(); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + cached.subscriptions = subscriptions; + } + + pub fn update_sync_known_rooms( + &self, user_id: OwnedUserId, device_id: OwnedDeviceId, conn_id: String, list_id: String, + new_cached_rooms: BTreeSet, globalsince: u64, + ) { + let mut cache = self.connections.lock().unwrap(); + let cached = Arc::clone( + cache + .entry((user_id, device_id, conn_id)) + .or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + for (roomid, lastsince) in cached + .known_rooms + .entry(list_id.clone()) + .or_default() + .iter_mut() + { + if !new_cached_rooms.contains(roomid) { + *lastsince = 0; + } + } + let list = cached.known_rooms.entry(list_id).or_default(); + for roomid in new_cached_rooms { + list.insert(roomid, globalsince); + } } /// Check if account is deactivated - pub async fn is_deactivated(&self, user_id: &UserId) -> Result { - self.db - .userid_password - .get(user_id) - .map_ok(|val| val.is_empty()) - .map_err(|_| err!(Request(NotFound("User does not exist.")))) - .await + pub fn is_deactivated(&self, user_id: &UserId) -> Result { self.db.is_deactivated(user_id) } + + /// Check if a user is an admin + pub fn is_admin(&self, user_id: &UserId) -> Result { + let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_id = services() + .rooms + .alias + .resolve_local_alias(&admin_room_alias_id)? + .unwrap(); + + services() + .rooms + .state_cache + .is_joined(user_id, &admin_room_id) } - /// Check if account is active, infallible - pub async fn is_active(&self, user_id: &UserId) -> bool { - !self.is_deactivated(user_id).await.unwrap_or(true) - } - - /// Check if account is active, infallible - pub async fn is_active_local(&self, user_id: &UserId) -> bool { - self.services.globals.user_is_local(user_id) && self.is_active(user_id).await + /// Create a new user account on this homeserver. + pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + self.db.set_password(user_id, password)?; + Ok(()) } /// Returns the number of users registered on this server. - #[inline] - pub async fn count(&self) -> usize { self.db.userid_password.count().await } + pub fn count(&self) -> Result { self.db.count() } /// Find out which user an access token belongs to. - pub async fn find_from_token(&self, token: &str) -> Result<(OwnedUserId, OwnedDeviceId)> { - self.db.token_userdeviceid.get(token).await.deserialized() - } - - /// Returns an iterator over all users on this homeserver (offered for - /// compatibility) - #[allow(clippy::iter_without_into_iter, clippy::iter_not_returning_iterator)] - pub fn iter(&self) -> impl Stream + Send + '_ { - self.stream().map(ToOwned::to_owned) + pub fn find_from_token(&self, token: &str) -> Result> { + self.db.find_from_token(token) } /// Returns an iterator over all users on this homeserver. - pub fn stream(&self) -> impl Stream + Send { - self.db.userid_password.keys().ignore_err() - } + pub fn iter(&self) -> impl Iterator> + '_ { self.db.iter() } /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is /// greater then zero. - pub fn list_local_users(&self) -> impl Stream + Send + '_ { - self.db - .userid_password - .stream() - .ignore_err() - .ready_filter_map(|(u, p): (&UserId, &[u8])| (!p.is_empty()).then_some(u)) - } + pub fn list_local_users(&self) -> Result> { self.db.list_local_users() } /// Returns the password hash for the given user. - pub async fn password_hash(&self, user_id: &UserId) -> Result { - self.db.userid_password.get(user_id).await.deserialized() - } + pub fn password_hash(&self, user_id: &UserId) -> Result> { self.db.password_hash(user_id) } /// Hash and set the user's password to the Argon2 hash pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - password - .map(utils::hash::password) - .transpose() - .map_err(|e| { - err!(Request(InvalidParam("Password does not meet the requirements: {e}"))) - })? - .map_or_else( - || self.db.userid_password.insert(user_id, b""), - |hash| self.db.userid_password.insert(user_id, hash), - ); - - Ok(()) + self.db.set_password(user_id, password) } /// Returns the displayname of a user on this homeserver. - pub async fn displayname(&self, user_id: &UserId) -> Result { - self.db.userid_displayname.get(user_id).await.deserialized() - } + pub fn displayname(&self, user_id: &UserId) -> Result> { self.db.displayname(user_id) } /// Sets a new displayname or removes it if displayname is None. You still /// need to nofify all rooms of this change. - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) { - if let Some(displayname) = displayname { - self.db.userid_displayname.insert(user_id, displayname); - } else { - self.db.userid_displayname.remove(user_id); - } + pub async fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + self.db.set_displayname(user_id, displayname) } - /// Get the `avatar_url` of a user. - pub async fn avatar_url(&self, user_id: &UserId) -> Result { - self.db.userid_avatarurl.get(user_id).await.deserialized() - } + /// Get the avatar_url of a user. + pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.db.avatar_url(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) { - match avatar_url { - | Some(avatar_url) => { - self.db.userid_avatarurl.insert(user_id, &avatar_url); - }, - | _ => { - self.db.userid_avatarurl.remove(user_id); - }, - } + pub async fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + self.db.set_avatar_url(user_id, avatar_url) } /// Get the blurhash of a user. - pub async fn blurhash(&self, user_id: &UserId) -> Result { - self.db.userid_blurhash.get(user_id).await.deserialized() - } + pub fn blurhash(&self, user_id: &UserId) -> Result> { self.db.blurhash(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) { - if let Some(blurhash) = blurhash { - self.db.userid_blurhash.insert(user_id, blurhash); - } else { - self.db.userid_blurhash.remove(user_id); - } + pub async fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + self.db.set_blurhash(user_id, blurhash) } /// Adds a new device to a user. - pub async fn create_device( - &self, - user_id: &UserId, - device_id: &DeviceId, - token: &str, - initial_device_display_name: Option, - client_ip: Option, + pub fn create_device( + &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, ) -> Result<()> { - if !self.exists(user_id).await { - return Err!(Request(InvalidParam(error!( - "Called create_device for non-existent user {user_id}" - )))); - } - - let key = (user_id, device_id); - let val = Device { - device_id: device_id.into(), - display_name: initial_device_display_name, - last_seen_ip: client_ip, - last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), - }; - - increment(&self.db.userid_devicelistversion, user_id.as_bytes()); - self.db.userdeviceid_metadata.put(key, Json(val)); - self.set_token(user_id, device_id, token).await + self.db + .create_device(user_id, device_id, token, initial_device_display_name) } /// Removes a device from a user. - pub async fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) { - let userdeviceid = (user_id, device_id); - - // Remove tokens - if let Ok(old_token) = self.db.userdeviceid_token.qry(&userdeviceid).await { - self.db.userdeviceid_token.del(userdeviceid); - self.db.token_userdeviceid.remove(&old_token); - } - - // Remove todevice events - let prefix = (user_id, device_id, Interfix); - self.db - .todeviceid_events - .keys_prefix_raw(&prefix) - .ignore_err() - .ready_for_each(|key| self.db.todeviceid_events.remove(key)) - .await; - - // TODO: Remove onetimekeys - - increment(&self.db.userid_devicelistversion, user_id.as_bytes()); - - self.db.userdeviceid_metadata.del(userdeviceid); - self.mark_device_key_update(user_id).await; + pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + self.db.remove_device(user_id, device_id) } /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - let prefix = (user_id, Interfix); - self.db - .userdeviceid_metadata - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, device_id): (Ignore, &DeviceId)| device_id) - } - - pub async fn get_token(&self, user_id: &UserId, device_id: &DeviceId) -> Result { - let key = (user_id, device_id); - self.db.userdeviceid_token.qry(&key).await.deserialized() + pub fn all_device_ids<'a>(&'a self, user_id: &UserId) -> impl Iterator> + 'a { + self.db.all_device_ids(user_id) } /// Replaces the access token of one device. - pub async fn set_token( - &self, - user_id: &UserId, - device_id: &DeviceId, - token: &str, - ) -> Result<()> { - let key = (user_id, device_id); - if self.db.userdeviceid_metadata.qry(&key).await.is_err() { - return Err!(Database(error!( - ?user_id, - ?device_id, - "User does not exist or device has no metadata." - ))); - } - - // Remove old token - if let Ok(old_token) = self.db.userdeviceid_token.qry(&key).await { - self.db.token_userdeviceid.remove(&old_token); - // It will be removed from userdeviceid_token by the insert later - } - - // Assign token to user device combination - self.db.userdeviceid_token.put_raw(key, token); - self.db.token_userdeviceid.raw_put(token, key); - - Ok(()) + pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + self.db.set_token(user_id, device_id, token) } - pub async fn add_one_time_key( - &self, - user_id: &UserId, - device_id: &DeviceId, - one_time_key_key: &KeyId, + pub fn add_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, - ) -> Result { - // All devices have metadata - // Only existing devices should be able to call this, but we shouldn't assert - // either... - let key = (user_id, device_id); - if self.db.userdeviceid_metadata.qry(&key).await.is_err() { - return Err!(Database(error!( - ?user_id, - ?device_id, - "User does not exist or device has no metadata." - ))); - } - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xFF); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xFF); - // TODO: Use DeviceKeyId::to_string when it's available (and update everything, - // because there are no wrapping quotation marks anymore) - key.extend_from_slice( - serde_json::to_string(one_time_key_key) - .expect("DeviceKeyId::to_string always works") - .as_bytes(), - ); - - self.db - .onetimekeyid_onetimekeys - .raw_put(key, Json(one_time_key_value)); - - let count = self.services.globals.next_count().unwrap(); - self.db.userid_lastonetimekeyupdate.raw_put(user_id, count); - - Ok(()) - } - - pub async fn last_one_time_keys_update(&self, user_id: &UserId) -> u64 { - self.db - .userid_lastonetimekeyupdate - .get(user_id) - .await - .deserialized() - .unwrap_or(0) - } - - pub async fn take_one_time_key( - &self, - user_id: &UserId, - device_id: &DeviceId, - key_algorithm: &OneTimeKeyAlgorithm, - ) -> Result<(OwnedKeyId, Raw)> { - let count = self.services.globals.next_count()?.to_be_bytes(); - self.db.userid_lastonetimekeyupdate.insert(user_id, count); - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); - prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); - prefix.push(b':'); - - let one_time_key = self - .db - .onetimekeyid_onetimekeys - .raw_stream_prefix(&prefix) - .ignore_err() - .map(|(key, val)| { - self.db.onetimekeyid_onetimekeys.remove(key); - - let key = key - .rsplit(|&b| b == 0xFF) - .next() - .ok_or_else(|| err!(Database("OneTimeKeyId in db is invalid."))) - .unwrap(); - - let key = serde_json::from_slice(key) - .map_err(|e| err!(Database("OneTimeKeyId in db is invalid. {e}"))) - .unwrap(); - - let val = serde_json::from_slice(val) - .map_err(|e| err!(Database("OneTimeKeys in db are invalid. {e}"))) - .unwrap(); - - (key, val) - }) - .next() - .await; - - one_time_key.ok_or_else(|| err!(Request(NotFound("No one-time-key found")))) - } - - pub async fn count_one_time_keys( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> BTreeMap { - type KeyVal<'a> = ((Ignore, Ignore, &'a Unquoted), Ignore); - - let mut algorithm_counts = BTreeMap::::new(); - let query = (user_id, device_id); - self.db - .onetimekeyid_onetimekeys - .stream_prefix(&query) - .ignore_err() - .ready_for_each(|((Ignore, Ignore, device_key_id), Ignore): KeyVal<'_>| { - let one_time_key_id: &OneTimeKeyId = device_key_id - .as_str() - .try_into() - .expect("Invalid DeviceKeyID in database"); - - let count: &mut UInt = algorithm_counts - .entry(one_time_key_id.algorithm()) - .or_default(); - - *count = count.saturating_add(1_u32.into()); - }) - .await; - - algorithm_counts - } - - pub async fn add_device_keys( - &self, - user_id: &UserId, - device_id: &DeviceId, - device_keys: &Raw, - ) { - let key = (user_id, device_id); - - self.db.keyid_key.put(key, Json(device_keys)); - self.mark_device_key_update(user_id).await; - } - - pub async fn add_cross_signing_keys( - &self, - user_id: &UserId, - master_key: &Option>, - self_signing_key: &Option>, - user_signing_key: &Option>, - notify: bool, ) -> Result<()> { - // TODO: Check signatures - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - - if let Some(master_key) = master_key { - let (master_key_key, _) = parse_master_key(user_id, master_key)?; - - self.db - .keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes()); - - self.db - .userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key); - } - - // Self-signing key - if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key - .deserialize() - .map_err(|e| err!(Request(InvalidParam("Invalid self signing key: {e:?}"))))? - .keys - .into_values(); - - let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained no key.", - ))?; - - if self_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained more than one key.", - )); - } - - let mut self_signing_key_key = prefix.clone(); - self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); - - self.db - .keyid_key - .insert(&self_signing_key_key, self_signing_key.json().get().as_bytes()); - - self.db - .userid_selfsigningkeyid - .insert(user_id.as_bytes(), &self_signing_key_key); - } - - // User-signing key - if let Some(user_signing_key) = user_signing_key { - let user_signing_key_id = parse_user_signing_key(user_signing_key)?; - - let user_signing_key_key = (user_id, &user_signing_key_id); - self.db - .keyid_key - .put_raw(user_signing_key_key, user_signing_key.json().get().as_bytes()); - - self.db - .userid_usersigningkeyid - .raw_put(user_id, user_signing_key_key); - } - - if notify { - self.mark_device_key_update(user_id).await; - } - - Ok(()) + self.db + .add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) } - pub async fn sign_key( - &self, - target_id: &UserId, - key_id: &str, - signature: (String, String), - sender_id: &UserId, - ) -> Result { - let key = (target_id, key_id); - - let mut cross_signing_key: serde_json::Value = self - .db - .keyid_key - .qry(&key) - .await - .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key"))))? - .deserialized() - .map_err(|e| err!(Database(debug_warn!("key in keyid_key is invalid: {e:?}"))))?; - - let signatures = cross_signing_key - .get_mut("signatures") - .ok_or_else(|| { - err!(Database(debug_warn!("key in keyid_key has no signatures field"))) - })? - .as_object_mut() - .ok_or_else(|| { - err!(Database(debug_warn!("key in keyid_key has invalid signatures field."))) - })? - .entry(sender_id.to_string()) - .or_insert_with(|| serde_json::Map::new().into()); - - signatures - .as_object_mut() - .ok_or_else(|| { - err!(Database(debug_warn!("signatures in keyid_key for a user is invalid."))) - })? - .insert(signature.0, signature.1.into()); - - let key = (target_id, key_id); - self.db.keyid_key.put(key, Json(cross_signing_key)); - - self.mark_device_key_update(target_id).await; - - Ok(()) + pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { + self.db.last_one_time_keys_update(user_id) + } + + pub fn take_one_time_key( + &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, + ) -> Result)>> { + self.db.take_one_time_key(user_id, device_id, key_algorithm) + } + + pub fn count_one_time_keys( + &self, user_id: &UserId, device_id: &DeviceId, + ) -> Result> { + self.db.count_one_time_keys(user_id, device_id) + } + + pub fn add_device_keys(&self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw) -> Result<()> { + self.db.add_device_keys(user_id, device_id, device_keys) + } + + pub fn add_cross_signing_keys( + &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, + user_signing_key: &Option>, notify: bool, + ) -> Result<()> { + self.db + .add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key, notify) + } + + pub fn sign_key( + &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, + ) -> Result<()> { + self.db.sign_key(target_id, key_id, signature, sender_id) } - #[inline] pub fn keys_changed<'a>( - &'a self, - user_id: &'a UserId, - from: u64, - to: Option, - ) -> impl Stream + Send + 'a { - self.keys_changed_user_or_room(user_id.as_str(), from, to) - .map(|(user_id, ..)| user_id) + &'a self, user_or_room_id: &str, from: u64, to: Option, + ) -> impl Iterator> + 'a { + self.db.keys_changed(user_or_room_id, from, to) } - #[inline] - pub fn room_keys_changed<'a>( - &'a self, - room_id: &'a RoomId, - from: u64, - to: Option, - ) -> impl Stream + Send + 'a { - self.keys_changed_user_or_room(room_id.as_str(), from, to) + pub fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { self.db.mark_device_key_update(user_id) } + + pub fn get_device_keys(&self, user_id: &UserId, device_id: &DeviceId) -> Result>> { + self.db.get_device_keys(user_id, device_id) } - fn keys_changed_user_or_room<'a>( - &'a self, - user_or_room_id: &'a str, - from: u64, - to: Option, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = ((&'a str, u64), &'a UserId); + pub fn parse_master_key( + &self, user_id: &UserId, master_key: &Raw, + ) -> Result<(Vec, CrossSigningKey)> { + self.db.parse_master_key(user_id, master_key) + } - let to = to.unwrap_or(u64::MAX); - let start = (user_or_room_id, from.saturating_add(1)); + pub fn get_key( + &self, key: &[u8], sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { self.db - .keychangeid_userid - .stream_from(&start) - .ignore_err() - .ready_take_while(move |((prefix, count), _): &KeyVal<'_>| { - *prefix == user_or_room_id && *count <= to - }) - .map(|((_, count), user_id): KeyVal<'_>| (user_id, count)) + .get_key(key, sender_user, user_id, allowed_signatures) } - pub async fn mark_device_key_update(&self, user_id: &UserId) { - let count = self.services.globals.next_count().unwrap(); - - self.services - .state_cache - .rooms_joined(user_id) - // Don't send key updates to unencrypted rooms - .filter(|room_id| self.services.state_accessor.is_encrypted_room(room_id)) - .ready_for_each(|room_id| { - let key = (room_id, count); - self.db.keychangeid_userid.put_raw(key, user_id); - }) - .await; - - let key = (user_id, count); - self.db.keychangeid_userid.put_raw(key, user_id); - } - - pub async fn get_device_keys<'a>( - &'a self, - user_id: &'a UserId, - device_id: &DeviceId, - ) -> Result> { - let key_id = (user_id, device_id); - self.db.keyid_key.qry(&key_id).await.deserialized() - } - - pub async fn get_key( - &self, - key_id: &[u8], - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &F, - ) -> Result> - where - F: Fn(&UserId) -> bool + Send + Sync, - { - let key: serde_json::Value = self.db.keyid_key.get(key_id).await.deserialized()?; - - let cleaned = clean_signatures(key, sender_user, user_id, allowed_signatures)?; - let raw_value = serde_json::value::to_raw_value(&cleaned)?; - Ok(Raw::from_json(raw_value)) - } - - pub async fn get_master_key( - &self, - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &F, - ) -> Result> - where - F: Fn(&UserId) -> bool + Send + Sync, - { - let key_id = self.db.userid_masterkeyid.get(user_id).await?; - - self.get_key(&key_id, sender_user, user_id, allowed_signatures) - .await - } - - pub async fn get_self_signing_key( - &self, - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &F, - ) -> Result> - where - F: Fn(&UserId) -> bool + Send + Sync, - { - let key_id = self.db.userid_selfsigningkeyid.get(user_id).await?; - - self.get_key(&key_id, sender_user, user_id, allowed_signatures) - .await - } - - pub async fn get_user_signing_key(&self, user_id: &UserId) -> Result> { + pub fn get_master_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { self.db - .userid_usersigningkeyid - .get(user_id) - .and_then(|key_id| self.db.keyid_key.get(&*key_id)) - .await - .deserialized() + .get_master_key(sender_user, user_id, allowed_signatures) } - pub async fn add_to_device_event( - &self, - sender: &UserId, - target_user_id: &UserId, - target_device_id: &DeviceId, - event_type: &str, + pub fn get_self_signing_key( + &self, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.db + .get_self_signing_key(sender_user, user_id, allowed_signatures) + } + + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { + self.db.get_user_signing_key(user_id) + } + + pub fn add_to_device_event( + &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - ) { - let count = self.services.globals.next_count().unwrap(); - - let key = (target_user_id, target_device_id, count); - self.db.todeviceid_events.put( - key, - Json(json!({ - "type": event_type, - "sender": sender, - "content": content, - })), - ); - } - - pub fn get_to_device_events<'a>( - &'a self, - user_id: &'a UserId, - device_id: &'a DeviceId, - since: Option, - to: Option, - ) -> impl Stream> + Send + 'a { - type Key<'a> = (&'a UserId, &'a DeviceId, u64); - - let from = (user_id, device_id, since.map_or(0, |since| since.saturating_add(1))); - - self.db - .todeviceid_events - .stream_from(&from) - .ignore_err() - .ready_take_while(move |((user_id_, device_id_, count), _): &(Key<'_>, _)| { - user_id == *user_id_ - && device_id == *device_id_ - && to.is_none_or(|to| *count <= to) - }) - .map(at!(1)) - } - - pub async fn remove_to_device_events( - &self, - user_id: &UserId, - device_id: &DeviceId, - until: Until, - ) where - Until: Into> + Send, - { - type Key<'a> = (&'a UserId, &'a DeviceId, u64); - - let until = until.into().unwrap_or(u64::MAX); - let from = (user_id, device_id, until); - self.db - .todeviceid_events - .rev_keys_from(&from) - .ignore_err() - .ready_take_while(move |(user_id_, device_id_, _): &Key<'_>| { - user_id == *user_id_ && device_id == *device_id_ - }) - .ready_for_each(|key: Key<'_>| { - self.db.todeviceid_events.del(key); - }) - .await; - } - - pub async fn update_device_metadata( - &self, - user_id: &UserId, - device_id: &DeviceId, - device: &Device, ) -> Result<()> { - increment(&self.db.userid_devicelistversion, user_id.as_bytes()); + self.db + .add_to_device_event(sender, target_user_id, target_device_id, event_type, content) + } - let key = (user_id, device_id); - self.db.userdeviceid_metadata.put(key, Json(device)); + pub fn get_to_device_events(&self, user_id: &UserId, device_id: &DeviceId) -> Result>> { + self.db.get_to_device_events(user_id, device_id) + } - Ok(()) + pub fn remove_to_device_events(&self, user_id: &UserId, device_id: &DeviceId, until: u64) -> Result<()> { + self.db.remove_to_device_events(user_id, device_id, until) + } + + pub fn update_device_metadata(&self, user_id: &UserId, device_id: &DeviceId, device: &Device) -> Result<()> { + self.db.update_device_metadata(user_id, device_id, device) } /// Get device metadata. - pub async fn get_device_metadata( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result { - self.db - .userdeviceid_metadata - .qry(&(user_id, device_id)) - .await - .deserialized() + pub fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) -> Result> { + self.db.get_device_metadata(user_id, device_id) } - pub async fn get_devicelist_version(&self, user_id: &UserId) -> Result { - self.db - .userid_devicelistversion - .get(user_id) - .await - .deserialized() + pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { + self.db.get_devicelist_version(user_id) } - pub fn all_devices_metadata<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - let key = (user_id, Interfix); - self.db - .userdeviceid_metadata - .stream_prefix(&key) - .ignore_err() - .map(|(_, val): (Ignore, Device)| val) + pub fn all_devices_metadata<'a>(&'a self, user_id: &UserId) -> impl Iterator> + 'a { + self.db.all_devices_metadata(user_id) + } + + /// Deactivate account + pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { + // Remove all associated devices + for device_id in self.all_device_ids(user_id) { + self.remove_device(user_id, &device_id?)?; + } + + // Set the password to "" to indicate a deactivated account. Hashes will never + // result in an empty string, so the user will not be able to log in again. + // Systems like changing the password without logging in should check if the + // account is deactivated. + self.db.set_password(user_id, None)?; + + // TODO: Unhook 3PID + Ok(()) } /// Creates a new sync filter. Returns the filter id. - pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> String { - let filter_id = utils::random_string(4); - - let key = (user_id, &filter_id); - self.db.userfilterid_filter.put(key, Json(filter)); - - filter_id + pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { + self.db.create_filter(user_id, filter) } - pub async fn get_filter( - &self, - user_id: &UserId, - filter_id: &str, - ) -> Result { - let key = (user_id, filter_id); - self.db.userfilterid_filter.qry(&key).await.deserialized() + pub fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result> { + self.db.get_filter(user_id, filter_id) } - - /// Creates an OpenID token, which can be used to prove that a user has - /// access to an account (primarily for integrations) - pub fn create_openid_token(&self, user_id: &UserId, token: &str) -> Result { - use std::num::Saturating as Sat; - - let expires_in = self.services.server.config.openid_token_ttl; - let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in) * Sat(1000); - - let mut value = expires_at.0.to_be_bytes().to_vec(); - value.extend_from_slice(user_id.as_bytes()); - - self.db - .openidtoken_expiresatuserid - .insert(token.as_bytes(), value.as_slice()); - - Ok(expires_in) - } - - /// Find out which user an OpenID access token belongs to. - pub async fn find_from_openid_token(&self, token: &str) -> Result { - let Ok(value) = self.db.openidtoken_expiresatuserid.get(token).await else { - return Err!(Request(Unauthorized("OpenID token is unrecognised"))); - }; - - let (expires_at_bytes, user_bytes) = value.split_at(0_u64.to_be_bytes().len()); - let expires_at = - u64::from_be_bytes(expires_at_bytes.try_into().map_err(|e| { - err!(Database("expires_at in openid_userid is invalid u64. {e}")) - })?); - - if expires_at < utils::millis_since_unix_epoch() { - debug_warn!("OpenID token is expired, removing"); - self.db.openidtoken_expiresatuserid.remove(token.as_bytes()); - - return Err!(Request(Unauthorized("OpenID token is expired"))); - } - - let user_string = utils::string_from_bytes(user_bytes) - .map_err(|e| err!(Database("User ID in openid_userid is invalid unicode. {e}")))?; - - OwnedUserId::try_from(user_string) - .map_err(|e| err!(Database("User ID in openid_userid is invalid. {e}"))) - } - - /// Creates a short-lived login token, which can be used to log in using the - /// `m.login.token` mechanism. - pub fn create_login_token(&self, user_id: &UserId, token: &str) -> u64 { - use std::num::Saturating as Sat; - - let expires_in = self.services.server.config.login_token_ttl; - let expires_at = Sat(utils::millis_since_unix_epoch()) + Sat(expires_in); - - let value = (expires_at.0, user_id); - self.db.logintoken_expiresatuserid.raw_put(token, value); - - expires_in - } - - /// Find out which user a login token belongs to. - /// Removes the token to prevent double-use attacks. - pub async fn find_from_login_token(&self, token: &str) -> Result { - let Ok(value) = self.db.logintoken_expiresatuserid.get(token).await else { - return Err!(Request(Forbidden("Login token is unrecognised"))); - }; - let (expires_at, user_id): (u64, OwnedUserId) = value.deserialized()?; - - if expires_at < utils::millis_since_unix_epoch() { - trace!(?user_id, ?token, "Removing expired login token"); - - self.db.logintoken_expiresatuserid.remove(token); - - return Err!(Request(Forbidden("Login token is expired"))); - } - - self.db.logintoken_expiresatuserid.remove(token); - - Ok(user_id) - } - - /// Gets a specific user profile key - pub async fn profile_key( - &self, - user_id: &UserId, - profile_key: &str, - ) -> Result { - let key = (user_id, profile_key); - self.db - .useridprofilekey_value - .qry(&key) - .await - .deserialized() - } - - /// Gets all the user's profile keys and values in an iterator - pub fn all_profile_keys<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + 'a + Send { - type KeyVal = ((Ignore, String), serde_json::Value); - - let prefix = (user_id, Interfix); - self.db - .useridprofilekey_value - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, key), val): KeyVal| (key, val)) - } - - /// Sets a new profile key value, removes the key if value is None - pub fn set_profile_key( - &self, - user_id: &UserId, - profile_key: &str, - profile_key_value: Option, - ) { - // TODO: insert to the stable MSC4175 key when it's stable - let key = (user_id, profile_key); - - if let Some(value) = profile_key_value { - self.db.useridprofilekey_value.put(key, Json(value)); - } else { - self.db.useridprofilekey_value.del(key); - } - } - - /// Get the timezone of a user. - pub async fn timezone(&self, user_id: &UserId) -> Result { - // TODO: transparently migrate unstable key usage to the stable key once MSC4133 - // and MSC4175 are stable, likely a remove/insert in this block. - - // first check the unstable prefix then check the stable prefix - let unstable_key = (user_id, "us.cloke.msc4175.tz"); - let stable_key = (user_id, "m.tz"); - self.db - .useridprofilekey_value - .qry(&unstable_key) - .or_else(|_| self.db.useridprofilekey_value.qry(&stable_key)) - .await - .deserialized() - } - - /// Sets a new timezone or removes it if timezone is None. - pub fn set_timezone(&self, user_id: &UserId, timezone: Option) { - // TODO: insert to the stable MSC4175 key when it's stable - let key = (user_id, "us.cloke.msc4175.tz"); - - if let Some(timezone) = timezone { - self.db.useridprofilekey_value.put_raw(key, &timezone); - } else { - self.db.useridprofilekey_value.del(key); - } - } -} - -pub fn parse_master_key( - user_id: &UserId, - master_key: &Raw, -) -> Result<(Vec, CrossSigningKey)> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - - let master_key = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?; - let mut master_key_ids = master_key.keys.values(); - let master_key_id = master_key_ids - .next() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Master key contained no key."))?; - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); - Ok((master_key_key, master_key)) -} - -pub fn parse_user_signing_key(user_signing_key: &Raw) -> Result { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids - .next() - .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; - - if user_signing_key_ids.next().is_some() { - return Err!(Request(InvalidParam("User signing key contained more than one key."))); - } - - Ok(user_signing_key_id) } /// Ensure that a user only sees signatures from themselves and the target user -fn clean_signatures( - mut cross_signing_key: serde_json::Value, - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &F, -) -> Result -where - F: Fn(&UserId) -> bool + Send + Sync, -{ +pub(crate) fn clean_signatures bool>( + cross_signing_key: &mut serde_json::Value, sender_user: Option<&UserId>, user_id: &UserId, allowed_signatures: F, +) -> Result<(), Error> { if let Some(signatures) = cross_signing_key .get_mut("signatures") .and_then(|v| v.as_object_mut()) @@ -1105,23 +483,14 @@ where // Don't allocate for the full size of the current signatures, but require // at most one resize if nothing is dropped let new_capacity = signatures.len() / 2; - for (user, signature) in - mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) - { - let sid = <&UserId>::try_from(user.as_str()) - .map_err(|_| Error::bad_database("Invalid user ID in database."))?; + for (user, signature) in mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) { + let sid = + <&UserId>::try_from(user.as_str()).map_err(|_| Error::bad_database("Invalid user ID in database."))?; if sender_user == Some(user_id) || sid == user_id || allowed_signatures(sid) { signatures.insert(user, signature); } } } - Ok(cross_signing_key) -} - -//TODO: this is an ABA -fn increment(db: &Arc, key: &[u8]) { - let old = db.get_blocking(key); - let new = utils::increment(old.ok().as_deref()); - db.insert(key, new); + Ok(()) } diff --git a/src/utils/error.rs b/src/utils/error.rs new file mode 100644 index 00000000..fb48b792 --- /dev/null +++ b/src/utils/error.rs @@ -0,0 +1,203 @@ +use std::convert::Infallible; + +use http::StatusCode; +use ruma::{ + api::client::{ + error::{Error as RumaError, ErrorBody, ErrorKind}, + uiaa::{UiaaInfo, UiaaResponse}, + }, + OwnedServerName, +}; +use thiserror::Error; +use tracing::{debug, error}; +use ErrorKind::{ + Forbidden, GuestAccessForbidden, LimitExceeded, MissingToken, NotFound, ThreepidAuthFailed, ThreepidDenied, + TooLarge, Unauthorized, Unknown, UnknownToken, Unrecognized, UserDeactivated, WrongRoomKeysVersion, +}; + +use crate::RumaResponse; + +pub type Result = std::result::Result; + +#[derive(Error)] +pub enum Error { + #[cfg(feature = "sqlite")] + #[error("There was a problem with the connection to the sqlite database: {source}")] + SqliteError { + #[from] + source: rusqlite::Error, + }, + #[cfg(feature = "rocksdb")] + #[error("There was a problem with the connection to the rocksdb database: {source}")] + RocksDbError { + #[from] + source: rust_rocksdb::Error, + }, + #[error("Could not generate an image.")] + ImageError { + #[from] + source: image::error::ImageError, + }, + #[error("Could not connect to server: {source}")] + ReqwestError { + #[from] + source: reqwest::Error, + }, + #[error("Could build regular expression: {source}")] + RegexError { + #[from] + source: regex::Error, + }, + #[error("{0}")] + FederationError(OwnedServerName, RumaError), + #[error("Could not do this io: {source}")] + IoError { + #[from] + source: std::io::Error, + }, + #[error("There was a problem with your configuration file: {0}")] + BadConfig(String), + #[error("{0}")] + BadServerResponse(&'static str), + #[error("{0}")] + /// Don't create this directly. Use Error::bad_database instead. + BadDatabase(&'static str), + #[error("uiaa")] + Uiaa(UiaaInfo), + #[error("{0}: {1}")] + BadRequest(ErrorKind, &'static str), + #[error("{0}")] + Conflict(&'static str), // This is only needed for when a room alias already exists + #[error("{0}")] + ExtensionError(#[from] axum::extract::rejection::ExtensionRejection), + #[error("{0}")] + PathError(#[from] axum::extract::rejection::PathRejection), + #[error("from {0}: {1}")] + RedactionError(OwnedServerName, ruma::canonical_json::RedactionError), + #[error("{0} in {1}")] + InconsistentRoomState(&'static str, ruma::OwnedRoomId), + #[error("{0}")] + AdminCommand(&'static str), + #[error("{0}")] + Error(String), +} + +impl Error { + pub fn bad_database(message: &'static str) -> Self { + error!("BadDatabase: {}", message); + Self::BadDatabase(message) + } + + pub fn bad_config(message: &str) -> Self { + error!("BadConfig: {}", message); + Self::BadConfig(message.to_owned()) + } +} + +impl Error { + pub fn to_response(&self) -> RumaResponse { + if let Self::Uiaa(uiaainfo) = self { + return RumaResponse(UiaaResponse::AuthResponse(uiaainfo.clone())); + } + + if let Self::FederationError(origin, error) = self { + let mut error = error.clone(); + error.body = ErrorBody::Standard { + kind: error.error_kind().unwrap_or(&Unknown).clone(), + message: format!("Answer from {origin}: {error}"), + }; + return RumaResponse(UiaaResponse::MatrixError(error)); + } + + let message = format!("{self}"); + let (kind, status_code) = match self { + Self::BadRequest(kind, _) => ( + kind.clone(), + match kind { + WrongRoomKeysVersion { + .. + } + | Forbidden { + .. + } + | GuestAccessForbidden + | ThreepidAuthFailed + | UserDeactivated + | ThreepidDenied => StatusCode::FORBIDDEN, + Unauthorized + | UnknownToken { + .. + } + | MissingToken => StatusCode::UNAUTHORIZED, + NotFound | Unrecognized => StatusCode::NOT_FOUND, + LimitExceeded { + .. + } => StatusCode::TOO_MANY_REQUESTS, + TooLarge => StatusCode::PAYLOAD_TOO_LARGE, + _ => StatusCode::BAD_REQUEST, + }, + ), + Self::Conflict(_) => (Unknown, StatusCode::CONFLICT), + _ => (Unknown, StatusCode::INTERNAL_SERVER_ERROR), + }; + + debug!("Returning an error: {status_code}: {message}"); + RumaResponse(UiaaResponse::MatrixError(RumaError { + body: ErrorBody::Standard { + kind, + message, + }, + status_code, + })) + } + + /// Returns the Matrix error code / error kind + pub fn error_code(&self) -> ErrorKind { + if let Self::FederationError(_, error) = self { + return error.error_kind().unwrap_or(&Unknown).clone(); + } + + match self { + Self::BadRequest(kind, _) => kind.clone(), + _ => Unknown, + } + } + + /// Sanitizes public-facing errors that can leak sensitive information. + pub fn sanitized_error(&self) -> String { + let db_error = String::from("Database or I/O error occurred."); + + match self { + #[cfg(feature = "sqlite")] + Self::SqliteError { + .. + } => db_error, + #[cfg(feature = "rocksdb")] + Self::RocksDbError { + .. + } => db_error, + Self::IoError { + .. + } => db_error, + Self::BadConfig { + .. + } => db_error, + Self::BadDatabase { + .. + } => db_error, + _ => self.to_string(), + } + } +} + +impl From for Error { + fn from(i: Infallible) -> Self { match i {} } +} + +impl axum::response::IntoResponse for Error { + fn into_response(self) -> axum::response::Response { self.to_response().into_response() } +} + +impl std::fmt::Debug for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self) } +} diff --git a/src/utils/mod.rs b/src/utils/mod.rs new file mode 100644 index 00000000..225c2200 --- /dev/null +++ b/src/utils/mod.rs @@ -0,0 +1,185 @@ +pub(crate) mod error; + +use std::{ + cmp, + cmp::Ordering, + fmt, + str::FromStr, + time::{SystemTime, UNIX_EPOCH}, +}; + +use argon2::{password_hash::SaltString, PasswordHasher}; +use rand::prelude::*; +use ring::digest; +use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject, OwnedUserId}; + +use crate::{services, Error, Result}; + +pub(crate) fn clamp(val: T, min: T, max: T) -> T { cmp::min(cmp::max(val, min), max) } + +pub(crate) fn millis_since_unix_epoch() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64 +} + +pub(crate) fn increment(old: Option<&[u8]>) -> Vec { + let number = match old.map(TryInto::try_into) { + Some(Ok(bytes)) => { + let number = u64::from_be_bytes(bytes); + number + 1 + }, + _ => 1, // Start at one. since 0 should return the first event in the db + }; + + number.to_be_bytes().to_vec() +} + +pub fn generate_keypair() -> Vec { + let mut value = random_string(8).as_bytes().to_vec(); + value.push(0xFF); + value.extend_from_slice( + &ruma::signatures::Ed25519KeyPair::generate().expect("Ed25519KeyPair generation always works (?)"), + ); + value +} + +/// Parses the bytes into an u64. +pub fn u64_from_bytes(bytes: &[u8]) -> Result { + let array: [u8; 8] = bytes.try_into()?; + Ok(u64::from_be_bytes(array)) +} + +/// Parses the bytes into a string. +pub fn string_from_bytes(bytes: &[u8]) -> Result { + String::from_utf8(bytes.to_vec()) +} + +/// Parses a `OwnedUserId` from bytes. +pub fn user_id_from_bytes(bytes: &[u8]) -> Result { + OwnedUserId::try_from( + string_from_bytes(bytes).map_err(|_| Error::bad_database("Failed to parse string from bytes"))?, + ) + .map_err(|_| Error::bad_database("Failed to parse user id from bytes")) +} + +pub fn random_string(length: usize) -> String { + thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(length) + .map(char::from) + .collect() +} + +/// Calculate a new hash for the given password +pub fn calculate_password_hash(password: &str) -> Result { + let salt = SaltString::generate(thread_rng()); + services() + .globals + .argon + .hash_password(password.as_bytes(), &salt) + .map(|it| it.to_string()) +} + +#[tracing::instrument(skip(keys))] +pub fn calculate_hash(keys: &[&[u8]]) -> Vec { + // We only hash the pdu's event ids, not the whole pdu + let bytes = keys.join(&0xFF); + let hash = digest::digest(&digest::SHA256, &bytes); + hash.as_ref().to_owned() +} + +pub(crate) fn common_elements( + mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, +) -> Option>> { + let first_iterator = iterators.next()?; + let mut other_iterators = iterators.map(Iterator::peekable).collect::>(); + + Some(first_iterator.filter(move |target| { + other_iterators.iter_mut().all(|it| { + while let Some(element) = it.peek() { + match check_order(element, target) { + Ordering::Greater => return false, // We went too far + Ordering::Equal => return true, // Element is in both iters + Ordering::Less => { + // Keep searching + it.next(); + }, + } + } + false + }) + })) +} + +/// Fallible conversion from any value that implements `Serialize` to a +/// `CanonicalJsonObject`. +/// +/// `value` must serialize to an `serde_json::Value::Object`. +pub(crate) fn to_canonical_object(value: T) -> Result { + use serde::ser::Error; + + match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? { + serde_json::Value::Object(map) => try_from_json_map(map), + _ => Err(CanonicalJsonError::SerDe(serde_json::Error::custom("Value must be an object"))), + } +} + +pub(crate) fn deserialize_from_str<'de, D: serde::de::Deserializer<'de>, T: FromStr, E: fmt::Display>( + deserializer: D, +) -> Result { + struct Visitor, E>(std::marker::PhantomData); + impl, Err: fmt::Display> serde::de::Visitor<'_> for Visitor { + type Value = T; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "a parsable string") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + v.parse().map_err(serde::de::Error::custom) + } + } + deserializer.deserialize_str(Visitor(std::marker::PhantomData)) +} + +// Copied from librustdoc: +// https://github.com/rust-lang/rust/blob/cbaeec14f90b59a91a6b0f17fc046c66fa811892/src/librustdoc/html/escape.rs + +/// Wrapper struct which will emit the HTML-escaped version of the contained +/// string when passed to a format string. +pub(crate) struct HtmlEscape<'a>(pub(crate) &'a str); + +impl fmt::Display for HtmlEscape<'_> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + // Because the internet is always right, turns out there's not that many + // characters to escape: http://stackoverflow.com/questions/7381974 + let HtmlEscape(s) = *self; + let pile_o_bits = s; + let mut last = 0; + for (i, ch) in s.char_indices() { + let s = match ch { + '>' => ">", + '<' => "<", + '&' => "&", + '\'' => "'", + '"' => """, + _ => continue, + }; + fmt.write_str(&pile_o_bits[last..i])?; + fmt.write_str(s)?; + // NOTE: we only expect single byte characters here - which is fine as long as + // we only match single byte characters + last = i + 1; + } + + if last < s.len() { + fmt.write_str(&pile_o_bits[last..])?; + } + Ok(()) + } +} diff --git a/tests/cargo_smoke.sh b/tests/cargo_smoke.sh deleted file mode 100755 index 946790c3..00000000 --- a/tests/cargo_smoke.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -export CARGO_TARGET_DIR="target/smokes" - -run () { - RUN_COMMAND=$@ - echo -ne "\033[1;33mTEST\033[0m $RUN_COMMAND" - ERRORS=$($RUN_COMMAND 2>&1>/tmp/uwu_smoketest.out) - RESULT=$? - echo - if test $RESULT -ne 0; then - cat /tmp/uwu_smoketest.out - echo -e "$ERRORS" - echo -e "\033[1;5;41;37mFAIL\033[0m exit ($RESULT): $RUN_COMMAND" - exit $RESULT - else - echo -ne "\033[1F" - echo -e "\033[1;32mPASS\033[0m $RUN_COMMAND" - fi -} - -conduwuit () { - UWU_OPTS=$@ - rm -rf /tmp/uwu_smoketest.db - echo -e "[global]\nserver_name = \"localhost\"\ndatabase_path = \"/tmp/uwu_smoketest.db\"" > /tmp/uwu_smoketest.toml - cargo $UWU_OPTS -- -c /tmp/uwu_smoketest.toml & - sleep 15s - kill -QUIT %1 - wait %1 - return $? -} - -element () { - TOOLCHAIN=$1; shift - ELEMENT_OPTS=$@ - run cargo "$TOOLCHAIN" check $ELEMENT_OPTS --all-targets - run cargo "$TOOLCHAIN" clippy $ELEMENT_OPTS --all-targets -- -D warnings - if [ "$BUILD" != "check" ]; then - run cargo "$TOOLCHAIN" build $ELEMENT_OPTS --all-targets - run cargo "$TOOLCHAIN" test $ELEMENT_OPTS --all-targets - run cargo "$TOOLCHAIN" bench $ELEMENT_OPTS --all-targets - run cargo "$TOOLCHAIN" run $ELEMENT_OPTS --bin conduwuit -- -V - run conduwuit "$TOOLCHAIN" run $ELEMENT_OPTS --bin conduwuit - fi -} - -vector () { - TOOLCHAIN=$1; shift - VECTOR_OPTS=$@ - element "$TOOLCHAIN" $VECTOR_OPTS --no-default-features - element "$TOOLCHAIN" $VECTOR_OPTS --features=default - element "$TOOLCHAIN" $VECTOR_OPTS --all-features -} - -matrix () { - run cargo +nightly fmt --all --check - vector "" --profile=dev - vector "" --profile=release - vector +nightly --profile=dev - vector +nightly --profile=release -} - -BUILD=${1:-build} -matrix && exit 0 diff --git a/tests/complement/Dockerfile b/tests/complement/Dockerfile new file mode 100644 index 00000000..2de4b13b --- /dev/null +++ b/tests/complement/Dockerfile @@ -0,0 +1,68 @@ +FROM rust:1.75.0 + +WORKDIR /workdir + +RUN apt-get update && apt-get install -y --no-install-recommends \ + libclang-dev + +COPY Cargo.toml Cargo.toml +COPY Cargo.lock Cargo.lock +COPY src src +RUN cargo build --release --features=axum_dual_protocol \ + && mv target/release/conduit conduit \ + && rm -rf target + +COPY conduwuit-example.toml conduit.toml + +ENV SERVER_NAME=localhost +ENV CONDUIT_CONFIG=/workdir/conduit.toml + +RUN sed -i "s/port = 6167/port = [8448, 8008]/g" conduit.toml +RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml +RUN sed -i "s/allow_registration = false/allow_registration = true/g" conduit.toml +RUN sed -i "s/allow_guest_registration = false/allow_guest_registration = true/g" conduit.toml +RUN sed -i "s/registration_token/#registration_token/g" conduit.toml +RUN sed -i "s/allow_guest_registration = false/allow_guest_registration = true/g" conduit.toml +RUN sed -i "s/allow_public_room_directory_over_federation = false/allow_public_room_directory_over_federation = true/g" conduit.toml +RUN sed -i "s/allow_public_room_directory_without_auth = false/allow_public_room_directory_without_auth = true/g" conduit.toml +RUN sed -i "s/allow_device_name_federation = false/allow_device_name_federation = true/g" conduit.toml +RUN sed -i "/\"127.0.0.0/d" conduit.toml +RUN sed -i "/\"10.0.0.0/d" conduit.toml +RUN sed -i "/\"172.16.0.0/d" conduit.toml +RUN sed -i "/\"192./d" conduit.toml +RUN sed -i "/\"169./d" conduit.toml +RUN sed -i "/\"::1/d" conduit.toml +RUN sed -i "/\"fe80/d" conduit.toml +RUN sed -i "/\"fc00/d" conduit.toml +RUN sed -i "/\"fec0/d" conduit.toml +RUN sed -i "/\"2001/d" conduit.toml +RUN sed -i "/\"ff00/d" conduit.toml +RUN sed -i "s/#log = \"warn\"/log = \"debug\"/g" conduit.toml +RUN sed -i 's/#\strusted_servers\s=\s\["matrix.org"\]/trusted_servers = []/g' conduit.toml +RUN sed -i 's/# `yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse` to/yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true/g' conduit.toml + +# https://stackoverflow.com/questions/76049656/unexpected-notvalidforname-with-rusts-tonic-with-tls +RUN echo "authorityKeyIdentifier=keyid,issuer" >> extensions.ext +RUN echo "basicConstraints=CA:FALSE" >> extensions.ext +RUN echo 'subjectAltName = @alt_names' >> extensions.ext +RUN echo '[alt_names]' >> extensions.ext +RUN echo "DNS.1 = servername" >> extensions.ext +RUN echo "IP.1 = ipaddress" >> extensions.ext + + +EXPOSE 8008 8448 + +CMD uname -a && \ + cp -f -v /complement/ca/ca.crt /usr/local/share/ca-certificates/complement.crt && \ + update-ca-certificates && \ + sed -i "s/servername/${SERVER_NAME}/g" extensions.ext && \ + sed -i "s/ipaddress/`hostname -i`/g" extensions.ext && \ + openssl req -newkey rsa:2048 -noenc -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" -keyout $SERVER_NAME.key -out $SERVER_NAME.csr && \ + openssl x509 -signkey $SERVER_NAME.key -in $SERVER_NAME.csr -req -days 2 -out $SERVER_NAME.crt && \ + openssl x509 -req -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -in $SERVER_NAME.csr -out $SERVER_NAME.crt -days 2 -CAcreateserial -extfile extensions.ext && \ + sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ + sed -i 's/#\s\[global.tls\]/\[global.tls\]/g' conduit.toml && \ + sed -i "s/# certs = \"\/path\/to\/my\/certificate.crt\"/certs = \"${SERVER_NAME}.crt\"/g" conduit.toml && \ + sed -i "s/# key = \"\/path\/to\/my\/private_key.key\"/key = \"${SERVER_NAME}.key\"/g" conduit.toml && \ + sed -i "s/#dual_protocol = false/dual_protocol = true/g" conduit.toml && \ + /workdir/conduit diff --git a/tests/complement/README.md b/tests/complement/README.md new file mode 100644 index 00000000..3223aa6f --- /dev/null +++ b/tests/complement/README.md @@ -0,0 +1,16 @@ +# Complement + +## What's that? + +Have a look at [its repository](https://github.com/matrix-org/complement). + +## How do I use it with Conduit? + +The script at [`../bin/complement`](../bin/complement) has automation for this. +It takes a few command line arguments: + +- Path to Complement's source code +- A `.jsonl` file to write test logs to +- A `.jsonl` file to write test results to + +Example: `./bin/complement "../complement" "logs.jsonl" "results.jsonl"` diff --git a/tests/complement/failed_tests.jsonl b/tests/complement/failed_tests.jsonl new file mode 100644 index 00000000..5c9ce5cb --- /dev/null +++ b/tests/complement/failed_tests.jsonl @@ -0,0 +1,596 @@ +{ + "Action": "fail", + "Test": "TestBannedUserCannotSendJoin" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV1" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV1/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV1/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV2" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV2/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV2/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/knock_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/knock_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/regular_event" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummaryJoinRules" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/max_depth" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/pagination" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/query_whole_graph" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/redact_link" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/suggested_only" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation/good_connectivity" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation/interrupted_connectivity" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederationOnRoomJoin" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation/stopped_server" +} +{ + "Action": "fail", + "Test": "TestEventAuth" +} +{ + "Action": "fail", + "Test": "TestFederationKeyUploadQuery" +} +{ + "Action": "fail", + "Test": "TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST" +} +{ + "Action": "fail", + "Test": "TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST" +} +{ + "Action": "fail", + "Test": "TestFederationRejectInvite" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata" +} +{ + "Action": "fail", + "Test": "TestGetMissingEventsGapFilling" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_shared_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_world_readable_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundFederationRejectsEventsWithRejectedAuthEvents" +} +{ + "Action": "fail", + "Test": "TestJoinFederatedRoomFromApplicationServiceBridgeUser" +} +{ + "Action": "fail", + "Test": "TestJumpToDateEndpoint" +} +{ + "Action": "fail", + "Test": "TestKnocking" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock" +} +{ + "Action": "fail", + "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnockRoomsInPublicRoomsDirectory" +} +{ + "Action": "fail", + "Test": "TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestLocalPngThumbnail" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/ASCII" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation" +} +{ + "Action": "fail", + "Test": "TestNetworkPartitionOrdering" +} +{ + "Action": "fail", + "Test": "TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6" +} +{ + "Action": "fail", + "Test": "TestRemotePresence" +} +{ + "Action": "fail", + "Test": "TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members" +} +{ + "Action": "fail", + "Test": "TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoin" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoin" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinFailOver" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinLocalUser" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsSpacesSummaryFederation" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsSpacesSummaryLocal" +} +{ + "Action": "fail", + "Test": "TestToDeviceMessagesOverFederation" +} +{ + "Action": "fail", + "Test": "TestToDeviceMessagesOverFederation/interrupted_connectivity" +} +{ + "Action": "fail", + "Test": "TestToDeviceMessagesOverFederation/stopped_server" +} +{ + "Action": "fail", + "Test": "TestUnbanViaInvite" +} +{ + "Action": "fail", + "Test": "TestUnknownEndpoints" +} +{ + "Action": "fail", + "Test": "TestUnknownEndpoints/Key_endpoints" +} +{ + "Action": "fail", + "Test": "TestUnrejectRejectedEvents" +} diff --git a/tests/complement/full_results.jsonl b/tests/complement/full_results.jsonl new file mode 100644 index 00000000..0b0b9521 --- /dev/null +++ b/tests/complement/full_results.jsonl @@ -0,0 +1,896 @@ +{ + "Action": "fail", + "Test": "TestBannedUserCannotSendJoin" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV1" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV1/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV1/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV2" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV2/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonJoinViaSendJoinV2/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/leave_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonKnockViaSendKnock/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/knock_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV1/regular_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/event_with_mismatched_state_key" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/invite_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/join_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/knock_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event" +} +{ + "Action": "fail", + "Test": "TestCannotSendNonLeaveViaSendLeaveV2/regular_event" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummaryJoinRules" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/max_depth" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/pagination" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/query_whole_graph" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/redact_link" +} +{ + "Action": "fail", + "Test": "TestClientSpacesSummary/suggested_only" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation/good_connectivity" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation/interrupted_connectivity" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederationOnRoomJoin" +} +{ + "Action": "fail", + "Test": "TestDeviceListsUpdateOverFederation/stopped_server" +} +{ + "Action": "fail", + "Test": "TestEventAuth" +} +{ + "Action": "fail", + "Test": "TestFederationKeyUploadQuery" +} +{ + "Action": "fail", + "Test": "TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST" +} +{ + "Action": "fail", + "Test": "TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST" +} +{ + "Action": "fail", + "Test": "TestFederationRejectInvite" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining" +} +{ + "Action": "fail", + "Test": "TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata" +} +{ + "Action": "fail", + "Test": "TestGetMissingEventsGapFilling" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_shared_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_world_readable_visibility" +} +{ + "Action": "fail", + "Test": "TestInboundFederationRejectsEventsWithRejectedAuthEvents" +} +{ + "Action": "fail", + "Test": "TestJoinFederatedRoomFromApplicationServiceBridgeUser" +} +{ + "Action": "fail", + "Test": "TestJumpToDateEndpoint" +} +{ + "Action": "fail", + "Test": "TestKnocking" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_in_the_room_can_reject_a_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it" +} +{ + "Action": "fail", + "Test": "TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock" +} +{ + "Action": "fail", + "Test": "TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed" +} +{ + "Action": "fail", + "Test": "TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01" +} +{ + "Action": "fail", + "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock" +} +{ + "Action": "fail", + "Test": "TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01" +} +{ + "Action": "fail", + "Test": "TestKnockRoomsInPublicRoomsDirectory" +} +{ + "Action": "fail", + "Test": "TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestLocalPngThumbnail" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/ASCII" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally" +} +{ + "Action": "fail", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation" +} +{ + "Action": "fail", + "Test": "TestNetworkPartitionOrdering" +} +{ + "Action": "fail", + "Test": "TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6" +} +{ + "Action": "fail", + "Test": "TestRemotePresence" +} +{ + "Action": "fail", + "Test": "TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members" +} +{ + "Action": "fail", + "Test": "TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoin" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoin" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinFailOver" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinLocalUser" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsSpacesSummaryFederation" +} +{ + "Action": "fail", + "Test": "TestRestrictedRoomsSpacesSummaryLocal" +} +{ + "Action": "fail", + "Test": "TestToDeviceMessagesOverFederation" +} +{ + "Action": "fail", + "Test": "TestToDeviceMessagesOverFederation/interrupted_connectivity" +} +{ + "Action": "fail", + "Test": "TestToDeviceMessagesOverFederation/stopped_server" +} +{ + "Action": "fail", + "Test": "TestUnbanViaInvite" +} +{ + "Action": "fail", + "Test": "TestUnknownEndpoints" +} +{ + "Action": "fail", + "Test": "TestUnknownEndpoints/Key_endpoints" +} +{ + "Action": "fail", + "Test": "TestUnrejectRejectedEvents" +} +{ + "Action": "pass", + "Test": "TestACLs" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/invite_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/knock_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/non-state_membership_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/event_with_mismatched_state_key" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/invite_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/knock_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event" +} +{ + "Action": "pass", + "Test": "TestFederatedClientSpaces" +} +{ + "Action": "pass", + "Test": "TestFederationRedactSendsWithoutEvent" +} +{ + "Action": "pass", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room" +} +{ + "Action": "pass", + "Test": "TestInboundFederationKeys" +} +{ + "Action": "pass", + "Test": "TestInboundFederationProfile" +} +{ + "Action": "pass", + "Test": "TestInboundFederationProfile/Inbound_federation_can_query_profile_data" +} +{ + "Action": "pass", + "Test": "TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected" +} +{ + "Action": "pass", + "Test": "TestIsDirectFlagFederation" +} +{ + "Action": "pass", + "Test": "TestIsDirectFlagLocal" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomFailOver" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinViaRoomIDAndServerName" +} +{ + "Action": "pass", + "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" +} +{ + "Action": "pass", + "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" +} +{ + "Action": "pass", + "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock" +} +{ + "Action": "pass", + "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock#01" +} +{ + "Action": "pass", + "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" +} +{ + "Action": "pass", + "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" +} +{ + "Action": "pass", + "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" +} +{ + "Action": "pass", + "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name" +} +{ + "Action": "pass", + "Test": "TestOutboundFederationProfile" +} +{ + "Action": "pass", + "Test": "TestOutboundFederationProfile/Outbound_federation_can_query_profile_data" +} +{ + "Action": "pass", + "Test": "TestOutboundFederationSend" +} +{ + "Action": "pass", + "Test": "TestRemoteAliasRequestsUnderstandUnicode" +} +{ + "Action": "pass", + "Test": "TestRemotePngThumbnail" +} +{ + "Action": "pass", + "Test": "TestRemoteTyping" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestToDeviceMessagesOverFederation/good_connectivity" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Client-server_endpoints" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Media_endpoints" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Server-server_endpoints" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Unknown_prefix" +} +{ + "Action": "pass", + "Test": "TestUserAppearsInChangedDeviceListOnJoinOverFederation" +} +{ + "Action": "pass", + "Test": "TestWriteMDirectAccountData" +} +{ + "Action": "skip", + "Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline" +} +{ + "Action": "skip", + "Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline" +} +{ + "Action": "skip", + "Test": "TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments" +} +{ + "Action": "skip", + "Test": "TestSendJoinPartialStateResponse" +} diff --git a/tests/complement/passed_tests.jsonl b/tests/complement/passed_tests.jsonl new file mode 100644 index 00000000..98022b31 --- /dev/null +++ b/tests/complement/passed_tests.jsonl @@ -0,0 +1,284 @@ +{ + "Action": "pass", + "Test": "TestACLs" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/invite_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/knock_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV1/non-state_membership_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/event_with_mismatched_state_key" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/invite_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/knock_event" +} +{ + "Action": "pass", + "Test": "TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event" +} +{ + "Action": "pass", + "Test": "TestFederatedClientSpaces" +} +{ + "Action": "pass", + "Test": "TestFederationRedactSendsWithoutEvent" +} +{ + "Action": "pass", + "Test": "TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room" +} +{ + "Action": "pass", + "Test": "TestInboundFederationKeys" +} +{ + "Action": "pass", + "Test": "TestInboundFederationProfile" +} +{ + "Action": "pass", + "Test": "TestInboundFederationProfile/Inbound_federation_can_query_profile_data" +} +{ + "Action": "pass", + "Test": "TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected" +} +{ + "Action": "pass", + "Test": "TestIsDirectFlagFederation" +} +{ + "Action": "pass", + "Test": "TestIsDirectFlagLocal" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomFailOver" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join" +} +{ + "Action": "pass", + "Test": "TestJoinViaRoomIDAndServerName" +} +{ + "Action": "pass", + "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail" +} +{ + "Action": "pass", + "Test": "TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01" +} +{ + "Action": "pass", + "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock" +} +{ + "Action": "pass", + "Test": "TestKnocking/A_user_in_the_room_can_accept_a_knock#01" +} +{ + "Action": "pass", + "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" +} +{ + "Action": "pass", + "Test": "TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" +} +{ + "Action": "pass", + "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'" +} +{ + "Action": "pass", + "Test": "TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name" +} +{ + "Action": "pass", + "Test": "TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation" +} +{ + "Action": "pass", + "Test": "TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name" +} +{ + "Action": "pass", + "Test": "TestOutboundFederationProfile" +} +{ + "Action": "pass", + "Test": "TestOutboundFederationProfile/Outbound_federation_can_query_profile_data" +} +{ + "Action": "pass", + "Test": "TestOutboundFederationSend" +} +{ + "Action": "pass", + "Test": "TestRemoteAliasRequestsUnderstandUnicode" +} +{ + "Action": "pass", + "Test": "TestRemotePngThumbnail" +} +{ + "Action": "pass", + "Test": "TestRemoteTyping" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_initially" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules" +} +{ + "Action": "pass", + "Test": "TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited" +} +{ + "Action": "pass", + "Test": "TestToDeviceMessagesOverFederation/good_connectivity" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Client-server_endpoints" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Media_endpoints" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Server-server_endpoints" +} +{ + "Action": "pass", + "Test": "TestUnknownEndpoints/Unknown_prefix" +} +{ + "Action": "pass", + "Test": "TestUserAppearsInChangedDeviceListOnJoinOverFederation" +} +{ + "Action": "pass", + "Test": "TestWriteMDirectAccountData" +} diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list new file mode 100644 index 00000000..99091989 --- /dev/null +++ b/tests/sytest/are-we-synapse-yet.list @@ -0,0 +1,866 @@ +reg GET /register yields a set of flows +reg POST /register can create a user +reg POST /register downcases capitals in usernames +reg POST /register returns the same device_id as that in the request +reg POST /register rejects registration of usernames with '!' +reg POST /register rejects registration of usernames with '"' +reg POST /register rejects registration of usernames with ':' +reg POST /register rejects registration of usernames with '?' +reg POST /register rejects registration of usernames with '\' +reg POST /register rejects registration of usernames with '@' +reg POST /register rejects registration of usernames with '[' +reg POST /register rejects registration of usernames with ']' +reg POST /register rejects registration of usernames with '{' +reg POST /register rejects registration of usernames with '|' +reg POST /register rejects registration of usernames with '}' +reg POST /register rejects registration of usernames with '£' +reg POST /register rejects registration of usernames with 'é' +reg POST /register rejects registration of usernames with '\n' +reg POST /register rejects registration of usernames with ''' +reg POST /r0/admin/register with shared secret +reg POST /r0/admin/register admin with shared secret +reg POST /r0/admin/register with shared secret downcases capitals +reg POST /r0/admin/register with shared secret disallows symbols +reg POST rejects invalid utf-8 in JSON +log GET /login yields a set of flows +log POST /login can log in as a user +log POST /login returns the same device_id as that in the request +log POST /login can log in as a user with just the local part of the id +log POST /login as non-existing user is rejected +log POST /login wrong password is rejected +log Interactive authentication types include SSO +log Can perform interactive authentication with SSO +log The user must be consistent through an interactive authentication session with SSO +log The operation must be consistent through an interactive authentication session +v1s GET /events initially +v1s GET /initialSync initially +csa Version responds 200 OK with valid structure +pro PUT /profile/:user_id/displayname sets my name +pro GET /profile/:user_id/displayname publicly accessible +pro PUT /profile/:user_id/avatar_url sets my avatar +pro GET /profile/:user_id/avatar_url publicly accessible +dev GET /device/{deviceId} +dev GET /device/{deviceId} gives a 404 for unknown devices +dev GET /devices +dev PUT /device/{deviceId} updates device fields +dev PUT /device/{deviceId} gives a 404 for unknown devices +dev DELETE /device/{deviceId} +dev DELETE /device/{deviceId} requires UI auth user to match device owner +dev DELETE /device/{deviceId} with no body gives a 401 +dev The deleted device must be consistent through an interactive auth session +dev Users receive device_list updates for their own devices +pre GET /presence/:user_id/status fetches initial status +pre PUT /presence/:user_id/status updates my presence +crm POST /createRoom makes a public room +crm POST /createRoom makes a private room +crm POST /createRoom makes a private room with invites +crm POST /createRoom makes a room with a name +crm POST /createRoom makes a room with a topic +syn Can /sync newly created room +crm POST /createRoom creates a room with the given version +crm POST /createRoom rejects attempts to create rooms with numeric versions +crm POST /createRoom rejects attempts to create rooms with unknown versions +crm POST /createRoom ignores attempts to set the room version via creation_content +mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event +rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels +mem GET /rooms/:room_id/joined_members fetches my membership +v1s GET /rooms/:room_id/initialSync fetches initial sync state +pub GET /publicRooms lists newly-created room +ali GET /directory/room/:room_alias yields room ID +mem GET /joined_rooms lists newly-created room +rst POST /rooms/:room_id/state/m.room.name sets name +rst GET /rooms/:room_id/state/m.room.name gets name +rst POST /rooms/:room_id/state/m.room.topic sets topic +rst GET /rooms/:room_id/state/m.room.topic gets topic +rst GET /rooms/:room_id/state fetches entire room state +crm POST /createRoom with creation content +ali PUT /directory/room/:room_alias creates alias +nsp GET /rooms/:room_id/aliases lists aliases +jon POST /rooms/:room_id/join can join a room +jon POST /join/:room_alias can join a room +jon POST /join/:room_id can join a room +jon POST /join/:room_id can join a room with custom content +jon POST /join/:room_alias can join a room with custom content +lev POST /rooms/:room_id/leave can leave a room +inv POST /rooms/:room_id/invite can send an invite +ban POST /rooms/:room_id/ban can ban a user +snd POST /rooms/:room_id/send/:event_type sends a message +snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message +snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id +get GET /rooms/:room_id/messages returns a message +get GET /rooms/:room_id/messages lazy loads members correctly +typ PUT /rooms/:room_id/typing/:user_id sets typing notification +typ Typing notifications don't leak (3 subtests) +rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels +rst PUT /rooms/:room_id/state/m.room.power_levels can set levels +rst PUT power_levels should not explode if the old power levels were empty +rst Both GET and PUT work +rct POST /rooms/:room_id/receipt can create receipts +red POST /rooms/:room_id/read_markers can create read marker +med POST /media/r0/upload can create an upload +med GET /media/r0/download can fetch the value again +cap GET /capabilities is present and well formed for registered user +cap GET /r0/capabilities is not public +reg Register with a recaptcha +reg registration is idempotent, without username specified +reg registration is idempotent, with username specified +reg registration remembers parameters +reg registration accepts non-ascii passwords +reg registration with inhibit_login inhibits login +reg User signups are forbidden from starting with '_' +reg Can register using an email address +log Can login with 3pid and password using m.login.password +log login types include SSO +log /login/cas/redirect redirects if the old m.login.cas login type is listed +log Can login with new user via CAS +lox Can logout current device +lox Can logout all devices +lox Request to logout with invalid an access token is rejected +lox Request to logout without an access token is rejected +log After changing password, can't log in with old password +log After changing password, can log in with new password +log After changing password, existing session still works +log After changing password, a different session no longer works by default +log After changing password, different sessions can optionally be kept +psh Pushers created with a different access token are deleted on password change +psh Pushers created with a the same access token are not deleted on password change +acc Can deactivate account +acc Can't deactivate account with wrong password +acc After deactivating account, can't log in with password +acc After deactivating account, can't log in with an email +v1s initialSync sees my presence status +pre Presence change reports an event to myself +pre Friends presence changes reports events +crm Room creation reports m.room.create to myself +crm Room creation reports m.room.member to myself +rst Setting room topic reports m.room.topic to myself +v1s Global initialSync +v1s Global initialSync with limit=0 gives no messages +v1s Room initialSync +v1s Room initialSync with limit=0 gives no messages +rst Setting state twice is idempotent +jon Joining room twice is idempotent +syn New room members see their own join event +v1s New room members see existing users' presence in room initialSync +syn Existing members see new members' join events +syn Existing members see new members' presence +v1s All room members see all room members' presence in global initialSync +f,jon Remote users can join room by alias +syn New room members see their own join event +v1s New room members see existing members' presence in room initialSync +syn Existing members see new members' join events +syn Existing members see new member's presence +v1s New room members see first user's profile information in global initialSync +v1s New room members see first user's profile information in per-room initialSync +f,jon Remote users may not join unfederated rooms +syn Local room members see posted message events +v1s Fetching eventstream a second time doesn't yield the message again +syn Local non-members don't see posted message events +get Local room members can get room messages +f,syn Remote room members also see posted message events +f,get Remote room members can get room messages +get Message history can be paginated +f,get Message history can be paginated over federation +eph Ephemeral messages received from clients are correctly expired +ali Room aliases can contain Unicode +f,ali Remote room alias queries can handle Unicode +ali Canonical alias can be set +ali Canonical alias can include alt_aliases +ali Regular users can add and delete aliases in the default room configuration +ali Regular users can add and delete aliases when m.room.aliases is restricted +ali Deleting a non-existent alias should return a 404 +ali Users can't delete other's aliases +ali Users with sufficient power-level can delete other's aliases +ali Can delete canonical alias +ali Alias creators can delete alias with no ops +ali Alias creators can delete canonical alias with no ops +ali Only room members can list aliases of a room +inv Can invite users to invite-only rooms +inv Uninvited users cannot join the room +inv Invited user can reject invite +f,inv Invited user can reject invite over federation +f,inv Invited user can reject invite over federation several times +inv Invited user can reject invite for empty room +f,inv Invited user can reject invite over federation for empty room +inv Invited user can reject local invite after originator leaves +inv Invited user can see room metadata +f,inv Remote invited user can see room metadata +inv Users cannot invite themselves to a room +inv Users cannot invite a user that is already in the room +ban Banned user is kicked and may not rejoin until unbanned +f,ban Remote banned user is kicked and may not rejoin until unbanned +ban 'ban' event respects room powerlevel +plv setting 'm.room.name' respects room powerlevel +plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) +plv Unprivileged users can set m.room.topic if it only needs level 0 +plv Users cannot set ban powerlevel higher than their own (2 subtests) +plv Users cannot set kick powerlevel higher than their own (2 subtests) +plv Users cannot set redact powerlevel higher than their own (2 subtests) +v1s Check that event streams started after a client joined a room work (SYT-1) +v1s Event stream catches up fully after many messages +xxx POST /rooms/:room_id/redact/:event_id as power user redacts message +xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message +xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message +xxx POST /redact disallows redaction of event in different room +xxx Redaction of a redaction redacts the redaction reason +v1s A departed room is still included in /initialSync (SPEC-216) +v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) +rst Can get rooms/{roomId}/state for a departed room (SPEC-216) +mem Can get rooms/{roomId}/members for a departed room (SPEC-216) +get Can get rooms/{roomId}/messages for a departed room (SPEC-216) +rst Can get 'm.room.name' state for a departed room (SPEC-216) +syn Getting messages going forward is limited for a departed room (SPEC-216) +3pd Can invite existing 3pid +3pd Can invite existing 3pid with no ops into a private room +3pd Can invite existing 3pid in createRoom +3pd Can invite unbound 3pid +f,3pd Can invite unbound 3pid over federation +3pd Can invite unbound 3pid with no ops into a private room +f,3pd Can invite unbound 3pid over federation with no ops into a private room +f,3pd Can invite unbound 3pid over federation with users from both servers +3pd Can accept unbound 3pid invite after inviter leaves +3pd Can accept third party invite with /join +3pd 3pid invite join with wrong but valid signature are rejected +3pd 3pid invite join valid signature but revoked keys are rejected +3pd 3pid invite join valid signature but unreachable ID server are rejected +gst Guest user cannot call /events globally +gst Guest users can join guest_access rooms +gst Guest users can send messages to guest_access rooms if joined +gst Guest user calling /events doesn't tightloop +gst Guest users are kicked from guest_access rooms on revocation of guest_access +gst Guest user can set display names +gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation +gst Guest user can upgrade to fully featured user +gst Guest user cannot upgrade other users +pub GET /publicRooms lists rooms +pub GET /publicRooms includes avatar URLs +gst Guest users can accept invites to private rooms over federation +gst Guest users denied access over federation if guest access prohibited +mem Room members can override their displayname on a room-specific basis +mem Room members can join a room with an overridden displayname +mem Users cannot kick users from a room they are not in +mem Users cannot kick users who have already left a room +typ Typing notification sent to local room members +f,typ Typing notifications also sent to remote room members +typ Typing can be explicitly stopped +rct Read receipts are visible to /initialSync +rct Read receipts are sent as events +rct Receipts must be m.read +pro displayname updates affect room member events +pro avatar_url updates affect room member events +gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users +gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users +gst Guest non-joined user cannot call /events on shared room +gst Guest non-joined user cannot call /events on invited room +gst Guest non-joined user cannot call /events on joined room +gst Guest non-joined user cannot call /events on default room +gst Guest non-joined user can call /events on world_readable room +gst Guest non-joined users can get state for world_readable rooms +gst Guest non-joined users can get individual state for world_readable rooms +gst Guest non-joined users cannot room initalSync for non-world_readable rooms +gst Guest non-joined users can room initialSync for world_readable rooms +gst Guest non-joined users can get individual state for world_readable rooms after leaving +gst Guest non-joined users cannot send messages to guest_access rooms if not joined +gst Guest users can sync from world_readable guest_access rooms if joined +gst Guest users can sync from shared guest_access rooms if joined +gst Guest users can sync from invited guest_access rooms if joined +gst Guest users can sync from joined guest_access rooms if joined +gst Guest users can sync from default guest_access rooms if joined +ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users +ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users +ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users +ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users +ath m.room.history_visibility == "default" allows/forbids appropriately for Real users +ath Real non-joined user cannot call /events on shared room +ath Real non-joined user cannot call /events on invited room +ath Real non-joined user cannot call /events on joined room +ath Real non-joined user cannot call /events on default room +ath Real non-joined user can call /events on world_readable room +ath Real non-joined users can get state for world_readable rooms +ath Real non-joined users can get individual state for world_readable rooms +ath Real non-joined users cannot room initalSync for non-world_readable rooms +ath Real non-joined users can room initialSync for world_readable rooms +ath Real non-joined users can get individual state for world_readable rooms after leaving +ath Real non-joined users cannot send messages to guest_access rooms if not joined +ath Real users can sync from world_readable guest_access rooms if joined +ath Real users can sync from shared guest_access rooms if joined +ath Real users can sync from invited guest_access rooms if joined +ath Real users can sync from joined guest_access rooms if joined +ath Real users can sync from default guest_access rooms if joined +ath Only see history_visibility changes on boundaries +f,ath Backfill works correctly with history visibility set to joined +fgt Forgotten room messages cannot be paginated +fgt Forgetting room does not show up in v2 /sync +fgt Can forget room you've been kicked from +fgt Can't forget room you're still in +fgt Can re-join room if re-invited +ath Only original members of the room can see messages from erased users +mem /joined_rooms returns only joined rooms +mem /joined_members return joined members +ctx /context/ on joined room works +ctx /context/ on non world readable room does not work +ctx /context/ returns correct number of events +ctx /context/ with lazy_load_members filter works +get /event/ on joined room works +get /event/ on non world readable room does not work +get /event/ does not allow access to events before the user joined +mem Can get rooms/{roomId}/members +mem Can get rooms/{roomId}/members at a given point +mem Can filter rooms/{roomId}/members +upg /upgrade creates a new room +upg /upgrade should preserve room visibility for public rooms +upg /upgrade should preserve room visibility for private rooms +upg /upgrade copies >100 power levels to the new room +upg /upgrade copies the power levels to the new room +upg /upgrade preserves the power level of the upgrading user in old and new rooms +upg /upgrade copies important state to the new room +upg /upgrade copies ban events to the new room +upg local user has push rules copied to upgraded room +f,upg remote user has push rules copied to upgraded room +upg /upgrade moves aliases to the new room +upg /upgrade moves remote aliases to the new room +upg /upgrade preserves direct room state +upg /upgrade preserves room federation ability +upg /upgrade restricts power levels in the old room +upg /upgrade restricts power levels in the old room when the old PLs are unusual +upg /upgrade to an unknown version is rejected +upg /upgrade is rejected if the user can't send state events +upg /upgrade of a bogus room fails gracefully +upg Cannot send tombstone event that points to the same room +f,upg Local and remote users' homeservers remove a room from their public directory on upgrade +rst Name/topic keys are correct +f,pub Can get remote public room list +pub Can paginate public room list +pub Can search public room list +syn Can create filter +syn Can download filter +syn Can sync +syn Can sync a joined room +syn Full state sync includes joined rooms +syn Newly joined room is included in an incremental sync +syn Newly joined room has correct timeline in incremental sync +syn Newly joined room includes presence in incremental sync +syn Get presence for newly joined members in incremental sync +syn Can sync a room with a single message +syn Can sync a room with a message with a transaction id +syn A message sent after an initial sync appears in the timeline of an incremental sync. +syn A filtered timeline reaches its limit +syn Syncing a new room with a large timeline limit isn't limited +syn A full_state incremental update returns only recent timeline +syn A prev_batch token can be used in the v1 messages API +syn A next_batch token can be used in the v1 messages API +syn User sees their own presence in a sync +syn User is offline if they set_presence=offline in their sync +syn User sees updates to presence from other users in the incremental sync. +syn State is included in the timeline in the initial sync +f,syn State from remote users is included in the state in the initial sync +syn Changes to state are included in an incremental sync +syn Changes to state are included in an gapped incremental sync +f,syn State from remote users is included in the timeline in an incremental sync +syn A full_state incremental update returns all state +syn When user joins a room the state is included in the next sync +syn A change to displayname should not result in a full state sync +syn A change to displayname should appear in incremental /sync +syn When user joins a room the state is included in a gapped sync +syn When user joins and leaves a room in the same batch, the full state is still included in the next sync +syn Current state appears in timeline in private history +syn Current state appears in timeline in private history with many messages before +syn Current state appears in timeline in private history with many messages after +syn Rooms a user is invited to appear in an initial sync +syn Rooms a user is invited to appear in an incremental sync +syn Newly joined room is included in an incremental sync after invite +syn Sync can be polled for updates +syn Sync is woken up for leaves +syn Left rooms appear in the leave section of sync +syn Newly left rooms appear in the leave section of incremental sync +syn We should see our own leave event, even if history_visibility is restricted (SYN-662) +syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) +syn Newly left rooms appear in the leave section of gapped sync +syn Previously left rooms don't appear in the leave section of sync +syn Left rooms appear in the leave section of full state sync +syn Archived rooms only contain history from before the user left +syn Banned rooms appear in the leave section of sync +syn Newly banned rooms appear in the leave section of incremental sync +syn Newly banned rooms appear in the leave section of incremental sync +syn Typing events appear in initial sync +syn Typing events appear in incremental sync +syn Typing events appear in gapped sync +syn Read receipts appear in initial v2 /sync +syn New read receipts appear in incremental v2 /sync +syn Can pass a JSON filter as a query parameter +syn Can request federation format via the filter +syn Read markers appear in incremental v2 /sync +syn Read markers appear in initial v2 /sync +syn Read markers can be updated +syn Lazy loading parameters in the filter are strictly boolean +syn The only membership state included in an initial sync is for all the senders in the timeline +syn The only membership state included in an incremental sync is for senders in the timeline +syn The only membership state included in a gapped incremental sync is for senders in the timeline +syn Gapped incremental syncs include all state changes +syn Old leaves are present in gapped incremental syncs +syn Leaves are present in non-gapped incremental syncs +syn Old members are included in gappy incr LL sync if they start speaking +syn Members from the gap are included in gappy incr LL sync +syn We don't send redundant membership state across incremental syncs by default +syn We do send redundant membership state across incremental syncs if asked +syn Unnamed room comes with a name summary +syn Named room comes with just joined member count summary +syn Room summary only has 5 heroes +syn Room summary counts change when membership changes +rmv User can create and send/receive messages in a room with version 1 +rmv User can create and send/receive messages in a room with version 1 (2 subtests) +rmv local user can join room with version 1 +rmv User can invite local user to room with version 1 +rmv remote user can join room with version 1 +rmv User can invite remote user to room with version 1 +rmv Remote user can backfill in a room with version 1 +rmv Can reject invites over federation for rooms with version 1 +rmv Can receive redactions from regular users over federation in room version 1 +rmv User can create and send/receive messages in a room with version 2 +rmv User can create and send/receive messages in a room with version 2 (2 subtests) +rmv local user can join room with version 2 +rmv User can invite local user to room with version 2 +rmv remote user can join room with version 2 +rmv User can invite remote user to room with version 2 +rmv Remote user can backfill in a room with version 2 +rmv Can reject invites over federation for rooms with version 2 +rmv Can receive redactions from regular users over federation in room version 2 +rmv User can create and send/receive messages in a room with version 3 +rmv User can create and send/receive messages in a room with version 3 (2 subtests) +rmv local user can join room with version 3 +rmv User can invite local user to room with version 3 +rmv remote user can join room with version 3 +rmv User can invite remote user to room with version 3 +rmv Remote user can backfill in a room with version 3 +rmv Can reject invites over federation for rooms with version 3 +rmv Can receive redactions from regular users over federation in room version 3 +rmv User can create and send/receive messages in a room with version 4 +rmv User can create and send/receive messages in a room with version 4 (2 subtests) +rmv local user can join room with version 4 +rmv User can invite local user to room with version 4 +rmv remote user can join room with version 4 +rmv User can invite remote user to room with version 4 +rmv Remote user can backfill in a room with version 4 +rmv Can reject invites over federation for rooms with version 4 +rmv Can receive redactions from regular users over federation in room version 4 +rmv User can create and send/receive messages in a room with version 5 +rmv User can create and send/receive messages in a room with version 5 (2 subtests) +rmv local user can join room with version 5 +rmv User can invite local user to room with version 5 +rmv remote user can join room with version 5 +rmv User can invite remote user to room with version 5 +rmv Remote user can backfill in a room with version 5 +rmv Can reject invites over federation for rooms with version 5 +rmv Can receive redactions from regular users over federation in room version 5 +rmv User can create and send/receive messages in a room with version 6 +rmv User can create and send/receive messages in a room with version 6 (2 subtests) +rmv local user can join room with version 6 +rmv User can invite local user to room with version 6 +rmv remote user can join room with version 6 +rmv User can invite remote user to room with version 6 +rmv Remote user can backfill in a room with version 6 +rmv Can reject invites over federation for rooms with version 6 +rmv Can receive redactions from regular users over federation in room version 6 +rmv Inbound federation rejects invites which include invalid JSON for room version 6 +rmv Outbound federation rejects invite response which include invalid JSON for room version 6 +rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 +rmv Server rejects invalid JSON in a version 6 room +pre Presence changes are reported to local room members +f,pre Presence changes are also reported to remote room members +pre Presence changes to UNAVAILABLE are reported to local room members +f,pre Presence changes to UNAVAILABLE are reported to remote room members +v1s Newly created users see their own presence in /initialSync (SYT-34) +dvk Can upload device keys +dvk Should reject keys claiming to belong to a different user +dvk Can query device keys using POST +dvk Can query specific device keys using POST +dvk query for user with no keys returns empty key dict +dvk Can claim one time key using POST +f,dvk Can query remote device keys using POST +f,dvk Can claim remote one time key using POST +dvk Local device key changes appear in v2 /sync +dvk Local new device changes appear in v2 /sync +dvk Local delete device changes appear in v2 /sync +dvk Local update device changes appear in v2 /sync +dvk Can query remote device keys using POST after notification +f,dev Device deletion propagates over federation +f,dev If remote user leaves room, changes device and rejoins we see update in sync +f,dev If remote user leaves room we no longer receive device updates +dvk Local device key changes appear in /keys/changes +dvk New users appear in /keys/changes +f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes +dvk Get left notifs in sync and /keys/changes when other user leaves +dvk Get left notifs for other users in sync and /keys/changes when user leaves +f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes +dkb Can create backup version +dkb Can update backup version +dkb Responds correctly when backup is empty +dkb Can backup keys +dkb Can update keys with better versions +dkb Will not update keys with worse versions +dkb Will not back up to an old backup version +dkb Can delete backup +dkb Deleted & recreated backups are empty +dkb Can create more than 10 backup versions +xsk Can upload self-signing keys +xsk Fails to upload self-signing keys with no auth +xsk Fails to upload self-signing key without master key +xsk Changing master key notifies local users +xsk Changing user-signing key notifies local users +f,xsk can fetch self-signing keys over federation +f,xsk uploading self-signing key notifies over federation +f,xsk uploading signed devices gets propagated over federation +tag Can add tag +tag Can remove tag +tag Can list tags for a room +v1s Tags appear in the v1 /events stream +v1s Tags appear in the v1 /initalSync +v1s Tags appear in the v1 room initial sync +tag Tags appear in an initial v2 /sync +tag Newly updated tags appear in an incremental v2 /sync +tag Deleted tags appear in an incremental v2 /sync +tag local user has tags copied to the new room +f,tag remote user has tags copied to the new room +sch Can search for an event by body +sch Can get context around search results +sch Can back-paginate search results +sch Search works across an upgraded room and its predecessor +sch Search results with rank ordering do not include redacted events +sch Search results with recent ordering do not include redacted events +acc Can add account data +acc Can add account data to room +acc Can get account data without syncing +acc Can get room account data without syncing +v1s Latest account data comes down in /initialSync +v1s Latest account data comes down in room initialSync +v1s Account data appears in v1 /events stream +v1s Room account data appears in v1 /events stream +acc Latest account data appears in v2 /sync +acc New account data appears in incremental v2 /sync +oid Can generate a openid access_token that can be exchanged for information about a user +oid Invalid openid access tokens are rejected +oid Requests to userinfo without access tokens are rejected +std Can send a message directly to a device using PUT /sendToDevice +std Can recv a device message using /sync +std Can recv device messages until they are acknowledged +std Device messages with the same txn_id are deduplicated +std Device messages wake up /sync +std Can recv device messages over federation +fsd Device messages over federation wake up /sync +std Can send messages with a wildcard device id +std Can send messages with a wildcard device id to two devices +std Wildcard device messages wake up /sync +fsd Wildcard device messages over federation wake up /sync +adm /whois +nsp /purge_history +nsp /purge_history by ts +nsp Can backfill purged history +nsp Shutdown room +ign Ignore user in existing room +ign Ignore invite in full sync +ign Ignore invite in incremental sync +fky Checking local federation server +fky Federation key API allows unsigned requests for keys +fky Federation key API can act as a notary server via a GET request +fky Federation key API can act as a notary server via a POST request +fky Key notary server should return an expired key if it can't find any others +fky Key notary server must not overwrite a valid key with a spurious result from the origin server +fqu Non-numeric ports in server names are rejected +fqu Outbound federation can query profile data +fqu Inbound federation can query profile data +fqu Outbound federation can query room alias directory +fqu Inbound federation can query room alias directory +fsj Outbound federation can query v1 /send_join +fsj Outbound federation can query v2 /send_join +fmj Outbound federation passes make_join failures through to the client +fsj Inbound federation can receive v1 /send_join +fsj Inbound federation can receive v2 /send_join +fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms +fsj Inbound /v1/send_join rejects incorrectly-signed joins +fsj Inbound /v1/send_join rejects joins from other servers +fau Inbound federation rejects remote attempts to kick local users to rooms +frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support +frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support +frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 +frv Inbound federation accepts attempts to join v2 rooms from servers with support +frv Outbound federation correctly handles unsupported room versions +frv A pair of servers can establish a join in a v2 room +fsj Outbound federation rejects send_join responses with no m.room.create event +frv Outbound federation rejects m.room.create events with an unknown room version +fsj Event with an invalid signature in the send_join response should not cause room join to fail +fsj Inbound: send_join rejects invalid JSON for room version 6 +fed Outbound federation can send events +fed Inbound federation can receive events +fed Inbound federation can receive redacted events +fed Ephemeral messages received from servers are correctly expired +fed Events whose auth_events are in the wrong room do not mess up the room state +fed Inbound federation can return events +fed Inbound federation redacts events from erased users +fme Outbound federation can request missing events +fme Inbound federation can return missing events for world_readable visibility +fme Inbound federation can return missing events for shared visibility +fme Inbound federation can return missing events for invite visibility +fme Inbound federation can return missing events for joined visibility +fme outliers whose auth_events are in a different room are correctly rejected +fbk Outbound federation can backfill events +fbk Inbound federation can backfill events +fbk Backfill checks the events requested belong to the room +fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination +fiv Outbound federation can send invites via v1 API +fiv Outbound federation can send invites via v2 API +fiv Inbound federation can receive invites via v1 API +fiv Inbound federation can receive invites via v2 API +fiv Inbound federation can receive invite and reject when remote replies with a 403 +fiv Inbound federation can receive invite and reject when remote replies with a 500 +fiv Inbound federation can receive invite and reject when remote is unreachable +fiv Inbound federation rejects invites which are not signed by the sender +fiv Inbound federation can receive invite rejections +fiv Inbound federation rejects incorrectly-signed invite rejections +fsl Inbound /v1/send_leave rejects leaves from other servers +fst Inbound federation can get state for a room +fst Inbound federation of state requires event_id as a mandatory paramater +fst Inbound federation can get state_ids for a room +fst Inbound federation of state_ids requires event_id as a mandatory paramater +fst Federation rejects inbound events where the prev_events cannot be found +fst Room state at a rejected message event is the same as its predecessor +fst Room state at a rejected state event is the same as its predecessor +fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state +fst Federation handles empty auth_events in state_ids sanely +fst Getting state checks the events requested belong to the room +fst Getting state IDs checks the events requested belong to the room +fst Should not be able to take over the room by pretending there is no PL event +fpb Inbound federation can get public room list +fed Outbound federation sends receipts +fed Inbound federation rejects receipts from wrong remote +fed Inbound federation ignores redactions from invalid servers room > v3 +fed An event which redacts an event in a different room should be ignored +fed An event which redacts itself should be ignored +fed A pair of events which redact each other should be ignored +fdk Local device key changes get to remote servers +fdk Server correctly handles incoming m.device_list_update +fdk Server correctly resyncs when client query keys and there is no remote cache +fdk Server correctly resyncs when server leaves and rejoins a room +fdk Local device key changes get to remote servers with correct prev_id +fdk Device list doesn't change if remote server is down +fdk If a device list update goes missing, the server resyncs on the next one +fst Name/topic keys are correct +fau Remote servers cannot set power levels in rooms without existing powerlevels +fau Remote servers should reject attempts by non-creators to set the power levels +fau Inbound federation rejects typing notifications from wrong remote +fau Users cannot set notifications powerlevel higher than their own +fed Forward extremities remain so even after the next events are populated as outliers +fau Banned servers cannot send events +fau Banned servers cannot /make_join +fau Banned servers cannot /send_join +fau Banned servers cannot /make_leave +fau Banned servers cannot /send_leave +fau Banned servers cannot /invite +fau Banned servers cannot get room state +fau Banned servers cannot get room state ids +fau Banned servers cannot backfill +fau Banned servers cannot /event_auth +fau Banned servers cannot get missing events +fau Server correctly handles transactions that break edu limits +fau Inbound federation correctly soft fails events +fau Inbound federation accepts a second soft-failed event +fau Inbound federation correctly handles soft failed events as extremities +med Can upload with Unicode file name +med Can download with Unicode file name locally +f,med Can download with Unicode file name over federation +med Alternative server names do not cause a routing loop +med Can download specifying a different Unicode file name +med Can upload without a file name +med Can download without a file name locally +f,med Can download without a file name over federation +med Can upload with ASCII file name +med Can download file 'ascii' +med Can download file 'name with spaces' +med Can download file 'name;with;semicolons' +med Can download specifying a different ASCII file name +med Can send image in room message +med Can fetch images in room +med POSTed media can be thumbnailed +f,med Remote media can be thumbnailed +med Test URL preview +med Can read configuration endpoint +nsp Can quarantine media in rooms +udr User appears in user directory +udr User in private room doesn't appear in user directory +udr User joining then leaving public room appears and dissappears from directory +udr Users appear/disappear from directory when join_rules are changed +udr Users appear/disappear from directory when history_visibility are changed +udr Users stay in directory when join_rules are changed but history_visibility is world_readable +f,udr User in remote room doesn't appear in user directory after server left room +udr User directory correctly update on display name change +udr User in shared private room does appear in user directory +udr User in shared private room does appear in user directory until leave +udr User in dir while user still shares private rooms +nsp Create group +nsp Add group rooms +nsp Remove group rooms +nsp Get local group profile +nsp Get local group users +nsp Add/remove local group rooms +nsp Get local group summary +nsp Get remote group profile +nsp Get remote group users +nsp Add/remove remote group rooms +nsp Get remote group summary +nsp Add local group users +nsp Remove self from local group +nsp Remove other from local group +nsp Add remote group users +nsp Remove self from remote group +nsp Listing invited users of a remote group when not a member returns a 403 +nsp Add group category +nsp Remove group category +nsp Get group categories +nsp Add group role +nsp Remove group role +nsp Get group roles +nsp Add room to group summary +nsp Adding room to group summary keeps room_id when fetching rooms in group +nsp Adding multiple rooms to group summary have correct order +nsp Remove room from group summary +nsp Add room to group summary with category +nsp Remove room from group summary with category +nsp Add user to group summary +nsp Adding multiple users to group summary have correct order +nsp Remove user from group summary +nsp Add user to group summary with role +nsp Remove user from group summary with role +nsp Local group invites come down sync +nsp Group creator sees group in sync +nsp Group creator sees group in initial sync +nsp Get/set local group publicity +nsp Bulk get group publicity +nsp Joinability comes down summary +nsp Set group joinable and join it +nsp Group is not joinable by default +nsp Group is joinable over federation +nsp Room is transitioned on local and remote groups upon room upgrade +3pd Can bind 3PID via home server +3pd Can bind and unbind 3PID via homeserver +3pd Can unbind 3PID via homeserver when bound out of band +3pd 3PIDs are unbound after account deactivation +3pd Can bind and unbind 3PID via /unbind by specifying the identity server +3pd Can bind and unbind 3PID via /unbind without specifying the identity server +app AS can create a user +app AS can create a user with an underscore +app AS can create a user with inhibit_login +app AS cannot create users outside its own namespace +app Regular users cannot register within the AS namespace +app AS can make room aliases +app Regular users cannot create room aliases within the AS namespace +app AS-ghosted users can use rooms via AS +app AS-ghosted users can use rooms themselves +app Ghost user must register before joining room +app AS can set avatar for ghosted users +app AS can set displayname for ghosted users +app AS can't set displayname for random users +app Inviting an AS-hosted user asks the AS server +app Accesing an AS-hosted room alias asks the AS server +app Events in rooms with AS-hosted room aliases are sent to AS server +app AS user (not ghost) can join room without registering +app AS user (not ghost) can join room without registering, with user_id query param +app HS provides query metadata +app HS can provide query metadata on a single protocol +app HS will proxy request for 3PU mapping +app HS will proxy request for 3PL mapping +app AS can publish rooms in their own list +app AS and main public room lists are separate +app AS can deactivate a user +psh Test that a message is pushed +psh Invites are pushed +psh Rooms with names are correctly named in pushed +psh Rooms with canonical alias are correctly named in pushed +psh Rooms with many users are correctly pushed +psh Don't get pushed for rooms you've muted +psh Rejected events are not pushed +psh Can add global push rule for room +psh Can add global push rule for sender +psh Can add global push rule for content +psh Can add global push rule for override +psh Can add global push rule for underride +psh Can add global push rule for content +psh New rules appear before old rules by default +psh Can add global push rule before an existing rule +psh Can add global push rule after an existing rule +psh Can delete a push rule +psh Can disable a push rule +psh Adding the same push rule twice is idempotent +psh Messages that notify from another user increment unread notification count +psh Messages that highlight from another user increment unread highlight count +psh Can change the actions of default rules +psh Changing the actions of an unknown default rule fails with 404 +psh Can change the actions of a user specified rule +psh Changing the actions of an unknown rule fails with 404 +psh Can fetch a user's pushers +psh Push rules come down in an initial /sync +psh Adding a push rule wakes up an incremental /sync +psh Disabling a push rule wakes up an incremental /sync +psh Enabling a push rule wakes up an incremental /sync +psh Setting actions for a push rule wakes up an incremental /sync +psh Can enable/disable default rules +psh Enabling an unknown default rule fails with 404 +psh Test that rejected pushers are removed. +psh Notifications can be viewed with GET /notifications +psh Trying to add push rule with no scope fails with 400 +psh Trying to add push rule with invalid scope fails with 400 +psh Trying to add push rule with missing template fails with 400 +psh Trying to add push rule with missing rule_id fails with 400 +psh Trying to add push rule with empty rule_id fails with 400 +psh Trying to add push rule with invalid template fails with 400 +psh Trying to add push rule with rule_id with slashes fails with 400 +psh Trying to add push rule with override rule without conditions fails with 400 +psh Trying to add push rule with underride rule without conditions fails with 400 +psh Trying to add push rule with condition without kind fails with 400 +psh Trying to add push rule with content rule without pattern fails with 400 +psh Trying to add push rule with no actions fails with 400 +psh Trying to add push rule with invalid action fails with 400 +psh Trying to add push rule with invalid attr fails with 400 +psh Trying to add push rule with invalid value for enabled fails with 400 +psh Trying to get push rules with no trailing slash fails with 400 +psh Trying to get push rules with scope without trailing slash fails with 400 +psh Trying to get push rules with template without tailing slash fails with 400 +psh Trying to get push rules with unknown scope fails with 400 +psh Trying to get push rules with unknown template fails with 400 +psh Trying to get push rules with unknown attribute fails with 400 +psh Trying to get push rules with unknown rule_id fails with 404 +psh Rooms with names are correctly named in pushes +v1s GET /initialSync with non-numeric 'limit' +v1s GET /events with non-numeric 'limit' +v1s GET /events with negative 'limit' +v1s GET /events with non-numeric 'timeout' +ath Event size limits +syn Check creating invalid filters returns 4xx +f,pre New federated private chats get full presence information (SYN-115) +pre Left room members do not cause problems for presence +crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) +typ Typing notifications don't leak +ban Non-present room members cannot ban others +psh Getting push rules doesn't corrupt the cache SYN-390 +inv Test that we can be reinvited to a room we created +syn Multiple calls to /sync should not cause 500 errors +gst Guest user can call /events on another world_readable room (SYN-606) +gst Real user can call /events on another world_readable room (SYN-606) +gst Events come down the correct room +pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list +std Can send a to-device message to two users which both receive it using /sync +fme Outbound federation will ignore a missing event with bad JSON for room version 6 +fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 +jso Invalid JSON integers +jso Invalid JSON floats +jso Invalid JSON special values +inv Can invite users to invite-only rooms (2 subtests) +plv setting 'm.room.name' respects room powerlevel (2 subtests) +psh Messages that notify from another user increment notification_count +psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count +dvk Can claim one time key using POST (2 subtests) +fdk Can query remote device keys using POST (1 subtests) +fdk Can claim remote one time key using POST (2 subtests) +fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py new file mode 100755 index 00000000..3d21fa41 --- /dev/null +++ b/tests/sytest/are-we-synapse-yet.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 + +from __future__ import division +import argparse +import re +import sys + +# Usage: $ ./are-we-synapse-yet.py [-v] results.tap +# This script scans a results.tap file from Dendrite's CI process and spits out +# a rating of how close we are to Synapse parity, based purely on SyTests. +# The main complexity is grouping tests sensibly into features like 'Registration' +# and 'Federation'. Then it just checks the ones which are passing and calculates +# percentages for each group. Produces results like: +# +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# Login : 7% (1/15 tests) +# V1 CS APIs : 10% (3/30 tests) +# ... +# +# or in verbose mode: +# +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# ✓ GET /register yields a set of flows +# ✓ POST /register can create a user +# ✓ POST /register downcases capitals in usernames +# ... +# +# You can also tack `-v` on to see exactly which tests each category falls under. + +test_mappings = { + "nsp": "Non-Spec API", + "unk": "Unknown API (no group specified)", + "app": "Application Services API", + "f": "Federation", # flag to mark test involves federation + + "federation_apis": { + "fky": "Key API", + "fsj": "send_join API", + "fmj": "make_join API", + "fsl": "send_leave API", + "fiv": "Invite API", + "fqu": "Query API", + "frv": "room versions", + "fau": "Auth", + "fbk": "Backfill API", + "fme": "get_missing_events API", + "fst": "State APIs", + "fpb": "Public Room API", + "fdk": "Device Key APIs", + "fed": "Federation API", + "fsd": "Send-to-Device APIs", + }, + + "client_apis": { + "reg": "Registration", + "log": "Login", + "lox": "Logout", + "v1s": "V1 CS APIs", + "csa": "Misc CS APIs", + "pro": "Profile", + "dev": "Devices", + "dvk": "Device Keys", + "dkb": "Device Key Backup", + "xsk": "Cross-signing Keys", + "pre": "Presence", + "crm": "Create Room", + "syn": "Sync API", + "rmv": "Room Versions", + "rst": "Room State APIs", + "pub": "Public Room APIs", + "mem": "Room Membership", + "ali": "Room Aliases", + "jon": "Joining Rooms", + "lev": "Leaving Rooms", + "inv": "Inviting users to Rooms", + "ban": "Banning users", + "snd": "Sending events", + "get": "Getting events for Rooms", + "rct": "Receipts", + "red": "Read markers", + "med": "Media APIs", + "cap": "Capabilities API", + "typ": "Typing API", + "psh": "Push APIs", + "acc": "Account APIs", + "eph": "Ephemeral Events", + "plv": "Power Levels", + "xxx": "Redaction", + "3pd": "Third-Party ID APIs", + "gst": "Guest APIs", + "ath": "Room Auth", + "fgt": "Forget APIs", + "ctx": "Context APIs", + "upg": "Room Upgrade APIs", + "tag": "Tagging APIs", + "sch": "Search APIs", + "oid": "OpenID API", + "std": "Send-to-Device APIs", + "adm": "Server Admin API", + "ign": "Ignore Users", + "udr": "User Directory APIs", + "jso": "Enforced canonical JSON", + }, +} + +# optional 'not ' with test number then anything but '#' +re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)") + +# Parses lines like the following: +# +# SUCCESS: ok 3 POST /register downcases capitals in usernames +# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version +# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts +# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail +# +# Only SUCCESS lines are treated as success, the rest are not implemented. +# +# Returns a dict like: +# { name: "...", ok: True } +def parse_test_line(line): + if not line.startswith("ok ") and not line.startswith("not ok "): + return + re_match = re_testname.match(line) + test_name = re_match.groups()[1].replace("(expected fail) ", "").strip() + test_pass = False + if line.startswith("ok ") and not "# skip " in line: + test_pass = True + return { + "name": test_name, + "ok": test_pass, + } + +# Prints the stats for a complete section. +# header_name => "Client-Server APIs" +# gid_to_tests => { gid: { : True|False }} +# gid_to_name => { gid: "Group Name" } +# verbose => True|False +# Produces: +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# Login : 7% (1/15 tests) +# V1 CS APIs : 10% (3/30 tests) +# ... +# or in verbose mode: +# Client-Server APIs: 29% (196/666 tests) +# ------------------- +# Registration : 62% (20/32 tests) +# ✓ GET /register yields a set of flows +# ✓ POST /register can create a user +# ✓ POST /register downcases capitals in usernames +# ... +def print_stats(header_name, gid_to_tests, gid_to_name, verbose): + subsections = [] # Registration: 100% (13/13 tests) + subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"] + total_passing = 0 + total_tests = 0 + for gid, tests in gid_to_tests.items(): + group_total = len(tests) + if group_total == 0: + continue + group_passing = 0 + test_names_and_marks = [] + for name, passing in tests.items(): + if passing: + group_passing += 1 + test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") + + total_tests += group_total + total_passing += group_passing + pct = "{0:.0f}%".format(group_passing/group_total * 100) + line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) + subsections.append(line) + subsection_test_names[line] = test_names_and_marks + + pct = "{0:.0f}%".format(total_passing/total_tests * 100) + print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) + print("-" * (len(header_name)+1)) + for line in subsections: + print(" %s" % (line,)) + if verbose: + for test_name_and_pass_mark in subsection_test_names[line]: + print(" %s" % (test_name_and_pass_mark,)) + print("") + print("") + +def main(results_tap_path, verbose): + # Load up test mappings + test_name_to_group_id = {} + fed_tests = set() + client_tests = set() + with open("./are-we-synapse-yet.list", "r") as f: + for line in f.readlines(): + test_name = " ".join(line.split(" ")[1:]).strip() + groups = line.split(" ")[0].split(",") + for gid in groups: + if gid == "f" or gid in test_mappings["federation_apis"]: + fed_tests.add(test_name) + else: + client_tests.add(test_name) + if gid == "f": + continue # we expect another group ID + test_name_to_group_id[test_name] = gid + + # parse results.tap + summary = { + "client": { + # gid: { + # test_name: OK + # } + }, + "federation": { + # gid: { + # test_name: OK + # } + }, + "appservice": { + "app": {}, + }, + "nonspec": { + "nsp": {}, + "unk": {} + }, + } + with open(results_tap_path, "r") as f: + for line in f.readlines(): + test_result = parse_test_line(line) + if not test_result: + continue + name = test_result["name"] + group_id = test_name_to_group_id.get(name) + if not group_id: + summary["nonspec"]["unk"][name] = test_result["ok"] + if group_id == "nsp": + summary["nonspec"]["nsp"][name] = test_result["ok"] + elif group_id == "app": + summary["appservice"]["app"][name] = test_result["ok"] + elif group_id in test_mappings["federation_apis"]: + group = summary["federation"].get(group_id, {}) + group[name] = test_result["ok"] + summary["federation"][group_id] = group + elif group_id in test_mappings["client_apis"]: + group = summary["client"].get(group_id, {}) + group[name] = test_result["ok"] + summary["client"][group_id] = group + + print("Are We Synapse Yet?") + print("===================") + print("") + print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) + print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) + print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) + print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("tap_file", help="path to results.tap") + parser.add_argument("-v", action="store_true", help="show individual test names in output") + args = parser.parse_args() + main(args.tap_file, args.v) \ No newline at end of file diff --git a/tests/sytest/show-expected-fail-tests.sh b/tests/sytest/show-expected-fail-tests.sh new file mode 100755 index 00000000..320d4ebd --- /dev/null +++ b/tests/sytest/show-expected-fail-tests.sh @@ -0,0 +1,105 @@ +#! /bin/bash +# +# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) +# and checks whether a test name that exists in the whitelist (that should pass), failed or not. +# +# An optional blacklist file can be added, also containing test names, where if a test name is +# present, the script will not error even if the test is in the whitelist file and failed +# +# For each of these files, lines starting with '#' are ignored. +# +# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] + +results_file=$1 +whitelist_file=$2 +blacklist_file=$3 + +fail_build=0 + +if [ $# -lt 2 ]; then + echo "Usage: $0 results.tap whitelist [blacklist]" + exit 1 +fi + +if [ ! -f "$results_file" ]; then + echo "ERROR: Specified results file '${results_file}' doesn't exist." + fail_build=1 +fi + +if [ ! -f "$whitelist_file" ]; then + echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." + fail_build=1 +fi + +blacklisted_tests=() + +# Check if a blacklist file was provided +if [ $# -eq 3 ]; then + # Read test blacklist file + if [ ! -f "$blacklist_file" ]; then + echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." + fail_build=1 + fi + + # Read each line, ignoring those that start with '#' + blacklisted_tests="" + search_non_comments=$(grep -v '^#' ${blacklist_file}) + while read -r line ; do + # Record the blacklisted test name + blacklisted_tests+=("${line}") + done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop +fi + +[ "$fail_build" = 0 ] || exit 1 + +passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') +tests_to_add="" +already_in_whitelist="" + +while read -r test_name; do + # Ignore empty lines + [ "${test_name}" = "" ] && continue + + grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 + if [ "$?" != "0" ]; then + # Check if this test name is blacklisted + if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then + # Don't notify about this test + continue + fi + + # Append this test_name to the existing list + tests_to_add="${tests_to_add}${test_name}\n" + fail_build=1 + else + already_in_whitelist="${already_in_whitelist}${test_name}\n" + fi +done <<< "${passed_but_expected_fail}" + +# TODO: Check that the same test doesn't exist in both the whitelist and blacklist +# TODO: Check that the same test doesn't appear twice in the whitelist|blacklist + +# Trim test output strings +tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") +already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") + +# Format output with markdown for buildkite annotation rendering purposes +if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then + echo "### 📜 SyTest Whitelist Maintenance" +fi + +if [ -n "${tests_to_add}" ]; then + echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" + echo "\`\`\`" + echo -e "${tests_to_add}" + echo "\`\`\`" +fi + +if [ -n "${already_in_whitelist}" ]; then + echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" + echo "\`\`\`" + echo -e "${already_in_whitelist}" + echo "\`\`\`" +fi + +exit ${fail_build} diff --git a/tests/sytest/sytest-blacklist b/tests/sytest/sytest-blacklist new file mode 100644 index 00000000..009de225 --- /dev/null +++ b/tests/sytest/sytest-blacklist @@ -0,0 +1,7 @@ +# This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged +POST /createRoom makes a public room +# These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed +POST /createRoom makes a room with a name +POST /createRoom makes a room with a topic +Can /sync newly created room +POST /createRoom ignores attempts to set the room version via creation_content \ No newline at end of file diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist new file mode 100644 index 00000000..1c969dba --- /dev/null +++ b/tests/sytest/sytest-whitelist @@ -0,0 +1,516 @@ +/event/ does not allow access to events before the user joined +/event/ on joined room works +/event/ on non world readable room does not work +/joined_members return joined members +/joined_rooms returns only joined rooms +/whois +3pid invite join valid signature but revoked keys are rejected +3pid invite join valid signature but unreachable ID server are rejected +3pid invite join with wrong but valid signature are rejected +A change to displayname should appear in incremental /sync +A full_state incremental update returns all state +A full_state incremental update returns only recent timeline +A message sent after an initial sync appears in the timeline of an incremental sync. +A next_batch token can be used in the v1 messages API +A pair of events which redact each other should be ignored +A pair of servers can establish a join in a v2 room +A prev_batch token can be used in the v1 messages API +AS can create a user +AS can create a user with an underscore +AS can create a user with inhibit_login +AS can set avatar for ghosted users +AS can set displayname for ghosted users +AS can't set displayname for random users +AS cannot create users outside its own namespace +AS user (not ghost) can join room without registering +AS user (not ghost) can join room without registering, with user_id query param +After changing password, a different session no longer works by default +After changing password, can log in with new password +After changing password, can't log in with old password +After changing password, different sessions can optionally be kept +After changing password, existing session still works +After deactivating account, can't log in with an email +After deactivating account, can't log in with password +Alias creators can delete alias with no ops +Alias creators can delete canonical alias with no ops +Alternative server names do not cause a routing loop +An event which redacts an event in a different room should be ignored +An event which redacts itself should be ignored +Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list +Backfill checks the events requested belong to the room +Backfill works correctly with history visibility set to joined +Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination +Banned servers cannot /event_auth +Banned servers cannot /invite +Banned servers cannot /make_join +Banned servers cannot /make_leave +Banned servers cannot /send_join +Banned servers cannot /send_leave +Banned servers cannot backfill +Banned servers cannot get missing events +Banned servers cannot get room state +Banned servers cannot get room state ids +Banned servers cannot send events +Banned user is kicked and may not rejoin until unbanned +Both GET and PUT work +Can /sync newly created room +Can add account data +Can add account data to room +Can add tag +Can claim one time key using POST +Can claim remote one time key using POST +Can create filter +Can deactivate account +Can delete canonical alias +Can download file 'ascii' +Can download file 'name with spaces' +Can download file 'name;with;semicolons' +Can download filter +Can download specifying a different ASCII file name +Can download specifying a different Unicode file name +Can download with Unicode file name locally +Can download with Unicode file name over federation +Can download without a file name locally +Can download without a file name over federation +Can forget room you've been kicked from +Can get 'm.room.name' state for a departed room (SPEC-216) +Can get account data without syncing +Can get remote public room list +Can get room account data without syncing +Can get rooms/{roomId}/members +Can get rooms/{roomId}/members for a departed room (SPEC-216) +Can get rooms/{roomId}/state for a departed room (SPEC-216) +Can invite users to invite-only rooms +Can list tags for a room +Can logout all devices +Can logout current device +Can paginate public room list +Can pass a JSON filter as a query parameter +Can query device keys using POST +Can query remote device keys using POST +Can query specific device keys using POST +Can re-join room if re-invited +Can read configuration endpoint +Can receive redactions from regular users over federation in room version 1 +Can receive redactions from regular users over federation in room version 2 +Can receive redactions from regular users over federation in room version 3 +Can receive redactions from regular users over federation in room version 4 +Can receive redactions from regular users over federation in room version 5 +Can receive redactions from regular users over federation in room version 6 +Can recv a device message using /sync +Can recv a device message using /sync +Can recv device messages over federation +Can recv device messages until they are acknowledged +Can recv device messages until they are acknowledged +Can reject invites over federation for rooms with version 1 +Can reject invites over federation for rooms with version 2 +Can reject invites over federation for rooms with version 3 +Can reject invites over federation for rooms with version 4 +Can reject invites over federation for rooms with version 5 +Can reject invites over federation for rooms with version 6 +Can remove tag +Can search public room list +Can send a message directly to a device using PUT /sendToDevice +Can send a message directly to a device using PUT /sendToDevice +Can send a to-device message to two users which both receive it using /sync +Can send image in room message +Can send messages with a wildcard device id +Can send messages with a wildcard device id +Can send messages with a wildcard device id to two devices +Can send messages with a wildcard device id to two devices +Can sync +Can sync a joined room +Can sync a room with a message with a transaction id +Can sync a room with a single message +Can upload device keys +Can upload with ASCII file name +Can upload with Unicode file name +Can upload without a file name +Can't deactivate account with wrong password +Can't forget room you're still in +Changes to state are included in an gapped incremental sync +Changes to state are included in an incremental sync +Changing the actions of an unknown default rule fails with 404 +Changing the actions of an unknown rule fails with 404 +Checking local federation server +Creators can delete alias +Current state appears in timeline in private history +Current state appears in timeline in private history with many messages before +DELETE /device/{deviceId} +DELETE /device/{deviceId} requires UI auth user to match device owner +DELETE /device/{deviceId} with no body gives a 401 +Deleted tags appear in an incremental v2 /sync +Deleting a non-existent alias should return a 404 +Device list doesn't change if remote server is down +Device messages over federation wake up /sync +Device messages wake up /sync +Device messages wake up /sync +Device messages with the same txn_id are deduplicated +Device messages with the same txn_id are deduplicated +Enabling an unknown default rule fails with 404 +Event size limits +Event with an invalid signature in the send_join response should not cause room join to fail +Events come down the correct room +Events whose auth_events are in the wrong room do not mess up the room state +Existing members see new members' join events +Federation key API allows unsigned requests for keys +Federation key API can act as a notary server via a GET request +Federation key API can act as a notary server via a POST request +Federation rejects inbound events where the prev_events cannot be found +Fetching eventstream a second time doesn't yield the message again +Forgetting room does not show up in v2 /sync +Full state sync includes joined rooms +GET /capabilities is present and well formed for registered user +GET /device/{deviceId} +GET /device/{deviceId} gives a 404 for unknown devices +GET /devices +GET /directory/room/:room_alias yields room ID +GET /events initially +GET /events with negative 'limit' +GET /events with non-numeric 'limit' +GET /events with non-numeric 'timeout' +GET /initialSync initially +GET /joined_rooms lists newly-created room +GET /login yields a set of flows +GET /media/r0/download can fetch the value again +GET /profile/:user_id/avatar_url publicly accessible +GET /profile/:user_id/displayname publicly accessible +GET /publicRooms includes avatar URLs +GET /publicRooms lists newly-created room +GET /publicRooms lists rooms +GET /r0/capabilities is not public +GET /register yields a set of flows +GET /rooms/:room_id/joined_members fetches my membership +GET /rooms/:room_id/messages returns a message +GET /rooms/:room_id/state fetches entire room state +GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership +GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event +GET /rooms/:room_id/state/m.room.name gets name +GET /rooms/:room_id/state/m.room.power_levels can fetch levels +GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels +GET /rooms/:room_id/state/m.room.topic gets topic +Get left notifs for other users in sync and /keys/changes when user leaves +Getting messages going forward is limited for a departed room (SPEC-216) +Getting push rules doesn't corrupt the cache SYN-390 +Getting state IDs checks the events requested belong to the room +Getting state checks the events requested belong to the room +Ghost user must register before joining room +Guest non-joined user cannot call /events on default room +Guest non-joined user cannot call /events on invited room +Guest non-joined user cannot call /events on joined room +Guest non-joined user cannot call /events on shared room +Guest non-joined users can get individual state for world_readable rooms +Guest non-joined users can get individual state for world_readable rooms after leaving +Guest non-joined users can get state for world_readable rooms +Guest non-joined users cannot room initalSync for non-world_readable rooms +Guest non-joined users cannot send messages to guest_access rooms if not joined +Guest user can set display names +Guest user cannot call /events globally +Guest user cannot upgrade other users +Guest users can accept invites to private rooms over federation +Guest users can join guest_access rooms +Guest users can send messages to guest_access rooms if joined +If a device list update goes missing, the server resyncs on the next one +If remote user leaves room we no longer receive device updates +If remote user leaves room, changes device and rejoins we see update in /keys/changes +If remote user leaves room, changes device and rejoins we see update in sync +Inbound /make_join rejects attempts to join rooms where all users have left +Inbound /v1/make_join rejects remote attempts to join local users to rooms +Inbound /v1/send_join rejects incorrectly-signed joins +Inbound /v1/send_join rejects joins from other servers +Inbound /v1/send_leave rejects leaves from other servers +Inbound federation accepts a second soft-failed event +Inbound federation accepts attempts to join v2 rooms from servers with support +Inbound federation can backfill events +Inbound federation can get public room list +Inbound federation can get state for a room +Inbound federation can get state_ids for a room +Inbound federation can query profile data +Inbound federation can query room alias directory +Inbound federation can receive events +Inbound federation can receive invites via v1 API +Inbound federation can receive invites via v2 API +Inbound federation can receive redacted events +Inbound federation can receive v1 /send_join +Inbound federation can receive v2 /send_join +Inbound federation can return events +Inbound federation can return missing events for invite visibility +Inbound federation can return missing events for world_readable visibility +Inbound federation correctly soft fails events +Inbound federation of state requires event_id as a mandatory paramater +Inbound federation of state_ids requires event_id as a mandatory paramater +Inbound federation rejects attempts to join v1 rooms from servers without v1 support +Inbound federation rejects attempts to join v2 rooms from servers lacking version support +Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 +Inbound federation rejects invite rejections which include invalid JSON for room version 6 +Inbound federation rejects invites which include invalid JSON for room version 6 +Inbound federation rejects receipts from wrong remote +Inbound federation rejects remote attempts to join local users to rooms +Inbound federation rejects remote attempts to kick local users to rooms +Inbound federation rejects typing notifications from wrong remote +Inbound: send_join rejects invalid JSON for room version 6 +Invalid JSON floats +Invalid JSON integers +Invalid JSON special values +Invited user can reject invite +Invited user can reject invite over federation +Invited user can reject invite over federation for empty room +Invited user can reject invite over federation several times +Invited user can see room metadata +Inviting an AS-hosted user asks the AS server +Lazy loading parameters in the filter are strictly boolean +Left rooms appear in the leave section of full state sync +Local delete device changes appear in v2 /sync +Local device key changes appear in /keys/changes +Local device key changes appear in v2 /sync +Local device key changes get to remote servers +Local new device changes appear in v2 /sync +Local non-members don't see posted message events +Local room members can get room messages +Local room members see posted message events +Local update device changes appear in v2 /sync +Local users can peek by room alias +Local users can peek into world_readable rooms by room ID +Message history can be paginated +Message history can be paginated over federation +Name/topic keys are correct +New account data appears in incremental v2 /sync +New read receipts appear in incremental v2 /sync +New room members see their own join event +New users appear in /keys/changes +Newly banned rooms appear in the leave section of incremental sync +Newly joined room is included in an incremental sync +Newly joined room is included in an incremental sync after invite +Newly left rooms appear in the leave section of gapped sync +Newly left rooms appear in the leave section of incremental sync +Newly updated tags appear in an incremental v2 /sync +Non-numeric ports in server names are rejected +Outbound federation can backfill events +Outbound federation can query profile data +Outbound federation can query room alias directory +Outbound federation can query v1 /send_join +Outbound federation can query v2 /send_join +Outbound federation can request missing events +Outbound federation can send events +Outbound federation can send invites via v1 API +Outbound federation can send invites via v2 API +Outbound federation can send room-join requests +Outbound federation correctly handles unsupported room versions +Outbound federation passes make_join failures through to the client +Outbound federation rejects backfill containing invalid JSON for events in room version 6 +Outbound federation rejects m.room.create events with an unknown room version +Outbound federation rejects send_join responses with no m.room.create event +Outbound federation sends receipts +Outbound federation will ignore a missing event with bad JSON for room version 6 +POST /createRoom creates a room with the given version +POST /createRoom ignores attempts to set the room version via creation_content +POST /createRoom makes a private room +POST /createRoom makes a private room with invites +POST /createRoom makes a public room +POST /createRoom makes a room with a name +POST /createRoom makes a room with a topic +POST /createRoom rejects attempts to create rooms with numeric versions +POST /createRoom rejects attempts to create rooms with unknown versions +POST /createRoom with creation content +POST /join/:room_alias can join a room +POST /join/:room_alias can join a room with custom content +POST /join/:room_id can join a room +POST /join/:room_id can join a room with custom content +POST /login as non-existing user is rejected +POST /login can log in as a user +POST /login can log in as a user with just the local part of the id +POST /login returns the same device_id as that in the request +POST /login wrong password is rejected +POST /media/r0/upload can create an upload +POST /redact disallows redaction of event in different room +POST /register allows registration of usernames with '-' +POST /register allows registration of usernames with '.' +POST /register allows registration of usernames with '/' +POST /register allows registration of usernames with '3' +POST /register allows registration of usernames with '=' +POST /register allows registration of usernames with '_' +POST /register allows registration of usernames with 'q' +POST /register can create a user +POST /register downcases capitals in usernames +POST /register rejects registration of usernames with '!' +POST /register rejects registration of usernames with '"' +POST /register rejects registration of usernames with ''' +POST /register rejects registration of usernames with ':' +POST /register rejects registration of usernames with '?' +POST /register rejects registration of usernames with '@' +POST /register rejects registration of usernames with '[' +POST /register rejects registration of usernames with '\' +POST /register rejects registration of usernames with '\n' +POST /register rejects registration of usernames with ']' +POST /register rejects registration of usernames with '{' +POST /register rejects registration of usernames with '|' +POST /register rejects registration of usernames with '}' +POST /register rejects registration of usernames with '£' +POST /register rejects registration of usernames with 'é' +POST /register returns the same device_id as that in the request +POST /rooms/:room_id/ban can ban a user +POST /rooms/:room_id/invite can send an invite +POST /rooms/:room_id/join can join a room +POST /rooms/:room_id/leave can leave a room +POST /rooms/:room_id/read_markers can create read marker +POST /rooms/:room_id/receipt can create receipts +POST /rooms/:room_id/redact/:event_id as original message sender redacts message +POST /rooms/:room_id/redact/:event_id as power user redacts message +POST /rooms/:room_id/redact/:event_id as random user does not redact message +POST /rooms/:room_id/send/:event_type sends a message +POST /rooms/:room_id/state/m.room.name sets name +POST /rooms/:room_id/state/m.room.topic sets topic +POST /rooms/:room_id/upgrade can upgrade a room version +POST rejects invalid utf-8 in JSON +POSTed media can be thumbnailed +PUT /device/{deviceId} gives a 404 for unknown devices +PUT /device/{deviceId} updates device fields +PUT /directory/room/:room_alias creates alias +PUT /profile/:user_id/avatar_url sets my avatar +PUT /profile/:user_id/displayname sets my name +PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id +PUT /rooms/:room_id/send/:event_type/:txn_id sends a message +PUT /rooms/:room_id/state/m.room.power_levels can set levels +PUT /rooms/:room_id/typing/:user_id sets typing notification +PUT power_levels should not explode if the old power levels were empty +Peeked rooms only turn up in the sync for the device who peeked them +Previously left rooms don't appear in the leave section of sync +Push rules come down in an initial /sync +Read markers appear in incremental v2 /sync +Read markers appear in initial v2 /sync +Read markers can be updated +Read receipts appear in initial v2 /sync +Real non-joined user cannot call /events on default room +Real non-joined user cannot call /events on invited room +Real non-joined user cannot call /events on joined room +Real non-joined user cannot call /events on shared room +Real non-joined users can get individual state for world_readable rooms +Real non-joined users can get individual state for world_readable rooms after leaving +Real non-joined users can get state for world_readable rooms +Real non-joined users cannot room initalSync for non-world_readable rooms +Real non-joined users cannot send messages to guest_access rooms if not joined +Receipts must be m.read +Redaction of a redaction redacts the redaction reason +Regular users can add and delete aliases in the default room configuration +Regular users can add and delete aliases when m.room.aliases is restricted +Regular users cannot create room aliases within the AS namespace +Regular users cannot register within the AS namespace +Remote media can be thumbnailed +Remote room alias queries can handle Unicode +Remote room members also see posted message events +Remote room members can get room messages +Remote user can backfill in a room with version 1 +Remote user can backfill in a room with version 2 +Remote user can backfill in a room with version 3 +Remote user can backfill in a room with version 4 +Remote user can backfill in a room with version 5 +Remote user can backfill in a room with version 6 +Remote users can join room by alias +Remote users may not join unfederated rooms +Request to logout with invalid an access token is rejected +Request to logout without an access token is rejected +Room aliases can contain Unicode +Room creation reports m.room.create to myself +Room creation reports m.room.member to myself +Room members can join a room with an overridden displayname +Room members can override their displayname on a room-specific basis +Room state at a rejected message event is the same as its predecessor +Room state at a rejected state event is the same as its predecessor +Rooms a user is invited to appear in an incremental sync +Rooms a user is invited to appear in an initial sync +Rooms can be created with an initial invite list (SYN-205) +Server correctly handles incoming m.device_list_update +Server correctly handles transactions that break edu limits +Server correctly resyncs when client query keys and there is no remote cache +Server correctly resyncs when server leaves and rejoins a room +Server rejects invalid JSON in a version 6 room +Setting room topic reports m.room.topic to myself +Should not be able to take over the room by pretending there is no PL event +Should reject keys claiming to belong to a different user +State from remote users is included in the state in the initial sync +State from remote users is included in the timeline in an incremental sync +State is included in the timeline in the initial sync +Sync can be polled for updates +Sync is woken up for leaves +Syncing a new room with a large timeline limit isn't limited +Tags appear in an initial v2 /sync +Trying to get push rules with unknown rule_id fails with 404 +Typing can be explicitly stopped +Typing events appear in gapped sync +Typing events appear in incremental sync +Typing events appear in initial sync +Typing notification sent to local room members +Typing notifications also sent to remote room members +Typing notifications don't leak +Uninvited users cannot join the room +Unprivileged users can set m.room.topic if it only needs level 0 +User appears in user directory +User in private room doesn't appear in user directory +User joining then leaving public room appears and dissappears from directory +User in shared private room does appear in user directory until leave +User can create and send/receive messages in a room with version 1 +User can create and send/receive messages in a room with version 2 +User can create and send/receive messages in a room with version 3 +User can create and send/receive messages in a room with version 4 +User can create and send/receive messages in a room with version 5 +User can create and send/receive messages in a room with version 6 +User can invite local user to room with version 1 +User can invite local user to room with version 2 +User can invite local user to room with version 3 +User can invite local user to room with version 4 +User can invite local user to room with version 5 +User can invite local user to room with version 6 +User can invite remote user to room with version 1 +User can invite remote user to room with version 2 +User can invite remote user to room with version 3 +User can invite remote user to room with version 4 +User can invite remote user to room with version 5 +User can invite remote user to room with version 6 +User directory correctly update on display name change +User in dir while user still shares private rooms +User in shared private room does appear in user directory +User is offline if they set_presence=offline in their sync +User signups are forbidden from starting with '_' +Users can't delete other's aliases +Users cannot invite a user that is already in the room +Users cannot invite themselves to a room +Users cannot kick users from a room they are not in +Users cannot kick users who have already left a room +Users cannot set ban powerlevel higher than their own +Users cannot set kick powerlevel higher than their own +Users cannot set notifications powerlevel higher than their own +Users cannot set redact powerlevel higher than their own +Users receive device_list updates for their own devices +Users with sufficient power-level can delete other's aliases +Version responds 200 OK with valid structure +We can't peek into rooms with invited history_visibility +We can't peek into rooms with joined history_visibility +We can't peek into rooms with shared history_visibility +We don't send redundant membership state across incremental syncs by default +We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) +We should see our own leave event, even if history_visibility is restricted (SYN-662) +Wildcard device messages over federation wake up /sync +Wildcard device messages wake up /sync +Wildcard device messages wake up /sync +avatar_url updates affect room member events +displayname updates affect room member events +local user can join room with version 1 +local user can join room with version 2 +local user can join room with version 3 +local user can join room with version 4 +local user can join room with version 5 +local user can join room with version 6 +m.room.history_visibility == "joined" allows/forbids appropriately for Guest users +m.room.history_visibility == "joined" allows/forbids appropriately for Real users +m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users +m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users +query for user with no keys returns empty key dict +remote user can join room with version 1 +remote user can join room with version 2 +remote user can join room with version 3 +remote user can join room with version 4 +remote user can join room with version 5 +remote user can join room with version 6 +setting 'm.room.name' respects room powerlevel +setting 'm.room.power_levels' respects room powerlevel +Federation publicRoom Name/topic keys are correct diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl deleted file mode 100644 index 97c2e1b1..00000000 --- a/tests/test_results/complement/test_results.jsonl +++ /dev/null @@ -1,651 +0,0 @@ -{"Action":"pass","Test":"TestACLs"} -{"Action":"pass","Test":"TestAddAccountData"} -{"Action":"pass","Test":"TestAddAccountData/Can_add_global_account_data"} -{"Action":"pass","Test":"TestAddAccountData/Can_add_room_account_data"} -{"Action":"fail","Test":"TestArchivedRoomsHistory"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} -{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty"} -{"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} -{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} -{"Action":"fail","Test":"TestAsyncUpload"} -{"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} -{"Action":"fail","Test":"TestAsyncUpload/Create_media"} -{"Action":"fail","Test":"TestAsyncUpload/Download_media"} -{"Action":"fail","Test":"TestAsyncUpload/Download_media_over__matrix/client/v1/media/download"} -{"Action":"fail","Test":"TestAsyncUpload/Not_yet_uploaded"} -{"Action":"fail","Test":"TestAsyncUpload/Upload_media"} -{"Action":"pass","Test":"TestAvatarUrlUpdate"} -{"Action":"pass","Test":"TestBannedUserCannotSendJoin"} -{"Action":"skip","Test":"TestCanRegisterAdmin"} -{"Action":"pass","Test":"TestCannotKickLeftUser"} -{"Action":"fail","Test":"TestCannotKickNonPresentUser"} -{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} -{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} -{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} -{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/join_event"} -{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/leave_event"} -{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/non-state_membership_event"} -{"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/regular_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/event_with_mismatched_state_key"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/invite_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/knock_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/leave_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/non-state_membership_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV1/regular_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/event_with_mismatched_state_key"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/invite_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/knock_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/leave_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/non-state_membership_event"} -{"Action":"pass","Test":"TestCannotSendNonJoinViaSendJoinV2/regular_event"} -{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock"} -{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/event_with_mismatched_state_key"} -{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/invite_event"} -{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/join_event"} -{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/leave_event"} -{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/non-state_membership_event"} -{"Action":"pass","Test":"TestCannotSendNonKnockViaSendKnock/regular_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/event_with_mismatched_state_key"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/invite_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/join_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/knock_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/non-state_membership_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV1/regular_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/event_with_mismatched_state_key"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/invite_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/join_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/knock_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"} -{"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/regular_event"} -{"Action":"pass","Test":"TestChangePassword"} -{"Action":"pass","Test":"TestChangePassword/After_changing_password,_a_different_session_no_longer_works_by_default"} -{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can't_log_in_with_old_password"} -{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} -{"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} -{"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} -{"Action":"pass","Test":"TestChangePasswordPushers"} -{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} -{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} -{"Action":"fail","Test":"TestClientSpacesSummary"} -{"Action":"pass","Test":"TestClientSpacesSummary/max_depth"} -{"Action":"fail","Test":"TestClientSpacesSummary/pagination"} -{"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} -{"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} -{"Action":"fail","Test":"TestClientSpacesSummary/suggested_only"} -{"Action":"fail","Test":"TestClientSpacesSummaryJoinRules"} -{"Action":"pass","Test":"TestContent"} -{"Action":"pass","Test":"TestContentCSAPIMediaV1"} -{"Action":"pass","Test":"TestContentMediaV1"} -{"Action":"pass","Test":"TestCumulativeJoinLeaveJoinSync"} -{"Action":"pass","Test":"TestDeactivateAccount"} -{"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} -{"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} -{"Action":"pass","Test":"TestDeactivateAccount/Can_deactivate_account"} -{"Action":"pass","Test":"TestDeactivateAccount/Password_flow_is_available"} -{"Action":"fail","Test":"TestDelayedEvents"} -{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_with_an_invalid_action"} -{"Action":"pass","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_delay_ID"} -{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_request_body"} -{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_an_action"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_events_are_empty_on_startup"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_message_events_are_sent_on_timeout"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_another_user"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_the_same_user"} -{"Action":"skip","Test":"TestDelayedEvents/delayed_state_events_are_kept_on_server_restart"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_sent_on_timeout"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_cancelled"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_restarted"} -{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_sent_on_request"} -{"Action":"pass","Test":"TestDelayedEvents/parallel"} -{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_cancel_a_delayed_event_without_a_matching_delay_ID"} -{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_restart_a_delayed_event_without_a_matching_delay_ID"} -{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_send_a_delayed_event_without_a_matching_delay_ID"} -{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings"} -{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings/Deleting_a_user's_device_should_delete_any_local_notification_settings_entries_from_their_account_data"} -{"Action":"pass","Test":"TestDemotingUsersViaUsersDefault"} -{"Action":"fail","Test":"TestDeviceListUpdates"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_local_user"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_remote_user"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_local_user"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_remote_user"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_joins_a_room"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_leaves_a_room"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_rejoins_a_room"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_joins_a_room"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_leaves_a_room"} -{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_rejoins_a_room"} -{"Action":"fail","Test":"TestDeviceListsUpdateOverFederation"} -{"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/good_connectivity"} -{"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/interrupted_connectivity"} -{"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/stopped_server"} -{"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} -{"Action":"fail","Test":"TestDeviceManagement"} -{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} -{"Action":"pass","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} -{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} -{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} -{"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} -{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} -{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} -{"Action":"pass","Test":"TestDisplayNameUpdate"} -{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} -{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} -{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} -{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} -{"Action":"pass","Test":"TestEvent"} -{"Action":"pass","Test":"TestEvent/Parallel"} -{"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} -{"Action":"pass","Test":"TestEvent/Parallel/Large_State_Event"} -{"Action":"pass","Test":"TestEventAuth"} -{"Action":"pass","Test":"TestEventAuth/returns_auth_events_for_the_requested_event"} -{"Action":"pass","Test":"TestEventAuth/returns_the_auth_chain_for_the_requested_event"} -{"Action":"fail","Test":"TestEventRelationships"} -{"Action":"fail","Test":"TestFederatedClientSpaces"} -{"Action":"fail","Test":"TestFederatedEventRelationships"} -{"Action":"fail","Test":"TestFederationKeyUploadQuery"} -{"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST"} -{"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} -{"Action":"pass","Test":"TestFederationRedactSendsWithoutEvent"} -{"Action":"pass","Test":"TestFederationRejectInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_reject_invite_when_homeserver_is_already_participating_in_the_room"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata"} -{"Action":"pass","Test":"TestFederationThumbnail"} -{"Action":"pass","Test":"TestFetchEvent"} -{"Action":"fail","Test":"TestFetchEventNonWorldReadable"} -{"Action":"pass","Test":"TestFetchEventWorldReadable"} -{"Action":"fail","Test":"TestFetchHistoricalInvitedEventFromBeforeInvite"} -{"Action":"pass","Test":"TestFetchHistoricalInvitedEventFromBetweenInvite"} -{"Action":"fail","Test":"TestFetchHistoricalJoinedEventDenied"} -{"Action":"pass","Test":"TestFetchHistoricalSharedEvent"} -{"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} -{"Action":"pass","Test":"TestFilter"} -{"Action":"fail","Test":"TestFilterMessagesByRelType"} -{"Action":"pass","Test":"TestGappedSyncLeaveSection"} -{"Action":"pass","Test":"TestGetFilteredRoomMembers"} -{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/join"} -{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/leave"} -{"Action":"pass","Test":"TestGetFilteredRoomMembers/not_membership"} -{"Action":"fail","Test":"TestGetMissingEventsGapFilling"} -{"Action":"pass","Test":"TestGetRoomMembers"} -{"Action":"fail","Test":"TestGetRoomMembersAtPoint"} -{"Action":"fail","Test":"TestInboundCanReturnMissingEvents"} -{"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility"} -{"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility"} -{"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_shared_visibility"} -{"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_world_readable_visibility"} -{"Action":"pass","Test":"TestInboundFederationKeys"} -{"Action":"pass","Test":"TestInboundFederationProfile"} -{"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} -{"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} -{"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} -{"Action":"pass","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} -{"Action":"pass","Test":"TestIsDirectFlagFederation"} -{"Action":"pass","Test":"TestIsDirectFlagLocal"} -{"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} -{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser"} -{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser/join_remote_federated_room_as_application_service_user"} -{"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents"} -{"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join"} -{"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join"} -{"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join"} -{"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join"} -{"Action":"pass","Test":"TestJoinViaRoomIDAndServerName"} -{"Action":"fail","Test":"TestJson"} -{"Action":"fail","Test":"TestJson/Parallel"} -{"Action":"fail","Test":"TestJson/Parallel/Invalid_JSON_special_values"} -{"Action":"fail","Test":"TestJson/Parallel/Invalid_numerical_values"} -{"Action":"fail","Test":"TestJumpToDateEndpoint"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/can_paginate_after_getting_remote_event_from_timestamp_to_event_endpoint"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_backwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_forwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/when_looking_backwards_before_the_room_was_created,_should_be_able_to_find_event_that_was_imported"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_after_given_timestmap"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_before_given_timestmap"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_after_given_timestmap_when_all_message_timestamps_are_the_same"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_before_given_timestamp_when_all_message_timestamps_are_the_same"} -{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_after_the_latest_timestmap"} -{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_before_the_earliest_timestmap"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_private_room_you_are_not_a_member_of"} -{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_public_room_you_are_not_a_member_of"} -{"Action":"fail","Test":"TestKeyChangesLocal"} -{"Action":"fail","Test":"TestKeyChangesLocal/New_login_should_create_a_device_lists.changed_entry"} -{"Action":"fail","Test":"TestKeyClaimOrdering"} -{"Action":"pass","Test":"TestKeysQueryWithDeviceIDAsObjectFails"} -{"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectory"} -{"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room"} -{"Action":"fail","Test":"TestKnocking"} -{"Action":"fail","Test":"TestKnocking/A_user_can_knock_on_a_room_without_a_reason"} -{"Action":"fail","Test":"TestKnocking/A_user_can_knock_on_a_room_without_a_reason#01"} -{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in"} -{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_in#01"} -{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} -{"Action":"pass","Test":"TestKnocking/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} -{"Action":"pass","Test":"TestKnocking/A_user_in_the_room_can_accept_a_knock"} -{"Action":"pass","Test":"TestKnocking/A_user_in_the_room_can_accept_a_knock#01"} -{"Action":"fail","Test":"TestKnocking/A_user_in_the_room_can_reject_a_knock"} -{"Action":"fail","Test":"TestKnocking/A_user_in_the_room_can_reject_a_knock#01"} -{"Action":"fail","Test":"TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"} -{"Action":"fail","Test":"TestKnocking/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"} -{"Action":"fail","Test":"TestKnocking/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"} -{"Action":"pass","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} -{"Action":"pass","Test":"TestKnocking/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} -{"Action":"pass","Test":"TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"} -{"Action":"pass","Test":"TestKnocking/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"} -{"Action":"pass","Test":"TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"} -{"Action":"pass","Test":"TestKnocking/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"} -{"Action":"pass","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} -{"Action":"pass","Test":"TestKnocking/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} -{"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"} -{"Action":"fail","Test":"TestKnocking/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} -{"Action":"pass","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} -{"Action":"pass","Test":"TestKnocking/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_can_knock_on_a_room_without_a_reason#01"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_in#01"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_cannot_knock_on_a_room_they_are_already_invited_to#01"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_accept_a_knock#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_in_the_room_can_reject_a_knock#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_already_knocked_is_allowed_to_knock_again_on_the_same_room#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/A_user_that_has_knocked_on_a_local_room_can_rescind_their_knock_and_then_knock_again"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/A_user_that_is_banned_from_a_room_cannot_knock_on_it#01"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Attempting_to_join_a_room_with_join_rule_'knock'_without_an_invite_should_fail#01"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Change_the_join_rule_of_a_room_from_'invite'_to_'knock'#01"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_a_join_rule_other_than_'knock'_should_fail#01"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed"} -{"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} -{"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} -{"Action":"pass","Test":"TestLeakyTyping"} -{"Action":"pass","Test":"TestLeaveEventInviteRejection"} -{"Action":"fail","Test":"TestLeaveEventVisibility"} -{"Action":"fail","Test":"TestLeftRoomFixture"} -{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} -{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/members_for_a_departed_room"} -{"Action":"pass","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/messages_for_a_departed_room"} -{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/state_for_a_departed_room"} -{"Action":"pass","Test":"TestLeftRoomFixture/Getting_messages_going_forward_is_limited_for_a_departed_room"} -{"Action":"pass","Test":"TestLocalPngThumbnail"} -{"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} -{"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} -{"Action":"pass","Test":"TestLogin"} -{"Action":"pass","Test":"TestLogin/parallel"} -{"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} -{"Action":"pass","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} -{"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} -{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} -{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} -{"Action":"pass","Test":"TestLogin/parallel/POST_/login_returns_the_same_device_id_as_that_in_the_request"} -{"Action":"pass","Test":"TestLogin/parallel/POST_/login_wrong_password_is_rejected"} -{"Action":"pass","Test":"TestLogout"} -{"Action":"pass","Test":"TestLogout/Can_logout_all_devices"} -{"Action":"pass","Test":"TestLogout/Can_logout_current_device"} -{"Action":"pass","Test":"TestLogout/Request_to_logout_with_invalid_an_access_token_is_rejected"} -{"Action":"pass","Test":"TestLogout/Request_to_logout_without_an_access_token_is_rejected"} -{"Action":"fail","Test":"TestMSC3757OwnedState"} -{"Action":"pass","Test":"TestMSC3967"} -{"Action":"pass","Test":"TestMediaConfig"} -{"Action":"pass","Test":"TestMediaFilenames"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'ascii'_over_/_matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name;with;semicolons'_over_/_matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_file_'name_with_spaces'_over_/_matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_download_specifying_a_different_ASCII_file_name_over__matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII/Can_upload_with_ASCII_file_name"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_specifying_a_different_Unicode_file_name_over__matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_locally_over__matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_download_with_Unicode_file_name_over_federation_via__matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Can_upload_with_Unicode_file_name"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_as_inline_via__matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_safe_media_types_with_parameters_as_inline_via__matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments"} -{"Action":"pass","Test":"TestMediaFilenames/Parallel/Unicode/Will_serve_unsafe_media_types_as_attachments_via__matrix/client/v1/media/download"} -{"Action":"pass","Test":"TestMediaWithoutFileName"} -{"Action":"pass","Test":"TestMediaWithoutFileName/parallel"} -{"Action":"pass","Test":"TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_locally"} -{"Action":"pass","Test":"TestMediaWithoutFileName/parallel/Can_download_without_a_file_name_over_federation"} -{"Action":"pass","Test":"TestMediaWithoutFileName/parallel/Can_upload_without_a_file_name"} -{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1"} -{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel"} -{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_locally"} -{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_over_federation"} -{"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_upload_without_a_file_name"} -{"Action":"fail","Test":"TestMembersLocal"} -{"Action":"fail","Test":"TestMembersLocal/Parallel"} -{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_join_events"} -{"Action":"fail","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_incremental_sync)"} -{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_initial_sync)"} -{"Action":"pass","Test":"TestMembersLocal/Parallel/New_room_members_see_their_own_join_event"} -{"Action":"fail","Test":"TestMembershipOnEvents"} -{"Action":"fail","Test":"TestNetworkPartitionOrdering"} -{"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} -{"Action":"pass","Test":"TestOlderLeftRoomsNotInLeaveSection"} -{"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} -{"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} -{"Action":"pass","Test":"TestOutboundFederationProfile"} -{"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} -{"Action":"pass","Test":"TestOutboundFederationSend"} -{"Action":"fail","Test":"TestPollsLocalPushRules"} -{"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} -{"Action":"pass","Test":"TestPowerLevels"} -{"Action":"pass","Test":"TestPowerLevels/GET_/rooms/:room_id/state/m.room.power_levels_can_fetch_levels"} -{"Action":"pass","Test":"TestPowerLevels/PUT_/rooms/:room_id/state/m.room.power_levels_can_set_levels"} -{"Action":"pass","Test":"TestPowerLevels/PUT_power_levels_should_not_explode_if_the_old_power_levels_were_empty"} -{"Action":"fail","Test":"TestPresence"} -{"Action":"fail","Test":"TestPresence/GET_/presence/:user_id/status_fetches_initial_status"} -{"Action":"pass","Test":"TestPresence/PUT_/presence/:user_id/status_updates_my_presence"} -{"Action":"pass","Test":"TestPresence/Presence_can_be_set_from_sync"} -{"Action":"pass","Test":"TestPresence/Presence_changes_are_reported_to_local_room_members"} -{"Action":"pass","Test":"TestPresence/Presence_changes_to_UNAVAILABLE_are_reported_to_local_room_members"} -{"Action":"pass","Test":"TestPresenceSyncDifferentRooms"} -{"Action":"pass","Test":"TestProfileAvatarURL"} -{"Action":"pass","Test":"TestProfileAvatarURL/GET_/profile/:user_id/avatar_url_publicly_accessible"} -{"Action":"pass","Test":"TestProfileAvatarURL/PUT_/profile/:user_id/avatar_url_sets_my_avatar"} -{"Action":"pass","Test":"TestProfileDisplayName"} -{"Action":"pass","Test":"TestProfileDisplayName/GET_/profile/:user_id/displayname_publicly_accessible"} -{"Action":"pass","Test":"TestProfileDisplayName/PUT_/profile/:user_id/displayname_sets_my_name"} -{"Action":"pass","Test":"TestPushRuleCacheHealth"} -{"Action":"pass","Test":"TestPushSync"} -{"Action":"pass","Test":"TestPushSync/Adding_a_push_rule_wakes_up_an_incremental_/sync"} -{"Action":"pass","Test":"TestPushSync/Disabling_a_push_rule_wakes_up_an_incremental_/sync"} -{"Action":"pass","Test":"TestPushSync/Enabling_a_push_rule_wakes_up_an_incremental_/sync"} -{"Action":"pass","Test":"TestPushSync/Push_rules_come_down_in_an_initial_/sync"} -{"Action":"pass","Test":"TestPushSync/Setting_actions_for_a_push_rule_wakes_up_an_incremental_/sync"} -{"Action":"pass","Test":"TestRegistration"} -{"Action":"pass","Test":"TestRegistration/parallel"} -{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_INVALID_USERNAME_for_invalid_user_name"} -{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_USER_IN_USE_for_registered_user_name"} -{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_available_for_unregistered_user_name"} -{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_admin_with_shared_secret"} -{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret"} -{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_disallows_symbols"} -{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_downcases_capitals"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/-"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/."} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_//"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/3"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/="} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/_"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/q"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_can_create_a_user"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_downcases_capitals_in_usernames"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_if_user_already_exists"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_usernames_with_special_characters"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_returns_the_same_device_id_as_that_in_the_request"} -{"Action":"pass","Test":"TestRegistration/parallel/POST_{}_returns_a_set_of_flows"} -{"Action":"pass","Test":"TestRegistration/parallel/Registration_accepts_non-ascii_passwords"} -{"Action":"pass","Test":"TestRelations"} -{"Action":"fail","Test":"TestRelationsPagination"} -{"Action":"pass","Test":"TestRelationsPaginationSync"} -{"Action":"pass","Test":"TestRemoteAliasRequestsUnderstandUnicode"} -{"Action":"pass","Test":"TestRemotePngThumbnail"} -{"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} -{"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/media/v3_endpoint"} -{"Action":"fail","Test":"TestRemotePresence"} -{"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} -{"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"} -{"Action":"pass","Test":"TestRemoteTyping"} -{"Action":"fail","Test":"TestRemovingAccountData"} -{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_DELETE_works"} -{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_PUT_works"} -{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_account_data_via_PUT_works"} -{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_data_via_DELETE_works"} -{"Action":"fail","Test":"TestRequestEncodingFails"} -{"Action":"fail","Test":"TestRequestEncodingFails/POST_rejects_invalid_utf-8_in_JSON"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoin"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_initially"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_with_mangled_join_rules"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoin/Join_should_succeed_when_joined_to_allowed_room"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_initially"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"} -{"Action":"pass","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsLocalJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_initially"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_when_left_allowed_room"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_fail_with_mangled_join_rules"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoin/Join_should_succeed_when_joined_to_allowed_room"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinFailOver"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinFailOverInMSC3787Room"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_initially"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_when_left_allowed_room"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_fail_with_mangled_join_rules"} -{"Action":"pass","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_invited"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinInMSC3787Room/Join_should_succeed_when_joined_to_allowed_room"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUser"} -{"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"} -{"Action":"pass","Test":"TestRestrictedRoomsSpacesSummaryFederation"} -{"Action":"fail","Test":"TestRestrictedRoomsSpacesSummaryLocal"} -{"Action":"pass","Test":"TestRoomAlias"} -{"Action":"pass","Test":"TestRoomAlias/Parallel"} -{"Action":"pass","Test":"TestRoomAlias/Parallel/GET_/rooms/:room_id/aliases_lists_aliases"} -{"Action":"pass","Test":"TestRoomAlias/Parallel/Only_room_members_can_list_aliases_of_a_room"} -{"Action":"pass","Test":"TestRoomAlias/Parallel/PUT_/directory/room/:room_alias_creates_alias"} -{"Action":"pass","Test":"TestRoomAlias/Parallel/Room_aliases_can_contain_Unicode"} -{"Action":"fail","Test":"TestRoomCanonicalAlias"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} -{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} -{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} -{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} -{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} -{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} -{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} -{"Action":"pass","Test":"TestRoomCreate"} -{"Action":"pass","Test":"TestRoomCreate/Parallel"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/Can_/sync_newly_created_room"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_creates_a_room_with_the_given_version"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_ignores_attempts_to_set_the_room_version_via_creation_content"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room_with_invites"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_public_room"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_name"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_topic"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_numeric_versions"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_unknown_versions"} -{"Action":"pass","Test":"TestRoomCreate/Parallel/Rooms_can_be_created_with_an_initial_invite_list_(SYN-205)"} -{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} -{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} -{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} -{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} -{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} -{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} -{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} -{"Action":"pass","Test":"TestRoomDeleteAlias"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} -{"Action":"fail","Test":"TestRoomForget"} -{"Action":"fail","Test":"TestRoomForget/Parallel"} -{"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} -{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_we_weren't_an_actual_member"} -{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_you've_been_kicked_from"} -{"Action":"pass","Test":"TestRoomForget/Parallel/Can_re-join_room_if_re-invited"} -{"Action":"pass","Test":"TestRoomForget/Parallel/Forgetting_room_does_not_show_up_in_v2_initial_/sync"} -{"Action":"fail","Test":"TestRoomForget/Parallel/Forgotten_room_messages_cannot_be_paginated"} -{"Action":"fail","Test":"TestRoomForget/Parallel/Leave_for_forgotten_room_shows_up_in_v2_incremental_/sync"} -{"Action":"pass","Test":"TestRoomImageRoundtrip"} -{"Action":"fail","Test":"TestRoomMembers"} -{"Action":"fail","Test":"TestRoomMembers/Parallel"} -{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room"} -{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room_with_custom_content"} -{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room"} -{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room_with_custom_content"} -{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/ban_can_ban_a_user"} -{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/invite_can_send_an_invite"} -{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/join_can_join_a_room"} -{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/leave_can_leave_a_room"} -{"Action":"pass","Test":"TestRoomMembers/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} -{"Action":"pass","Test":"TestRoomMessagesLazyLoading"} -{"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} -{"Action":"pass","Test":"TestRoomReadMarkers"} -{"Action":"pass","Test":"TestRoomReceipts"} -{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin"} -{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} -{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} -{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} -{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} -{"Action":"pass","Test":"TestRoomSpecificUsernameChange"} -{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} -{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} -{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} -{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} -{"Action":"fail","Test":"TestRoomState"} -{"Action":"fail","Test":"TestRoomState/Parallel"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/directory/room/:room_alias_yields_room_ID"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/joined_rooms_lists_newly-created_room"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/publicRooms_lists_newly-created_room"} -{"Action":"fail","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_fetches_my_membership"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_is_forbidden_after_leaving_room"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id?format=event_fetches_my_membership_event"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id_fetches_my_membership"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.name_gets_name"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.power_levels_fetches_powerlevels"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.topic_gets_topic"} -{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state_fetches_entire_room_state"} -{"Action":"pass","Test":"TestRoomState/Parallel/POST_/rooms/:room_id/state/m.room.name_sets_name"} -{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} -{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} -{"Action":"pass","Test":"TestRoomSummary"} -{"Action":"pass","Test":"TestRoomsInvite"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_a_user_that_is_already_in_the_room"} -{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_themselves_to_a_room"} -{"Action":"fail","Test":"TestSearch"} -{"Action":"fail","Test":"TestSearch/parallel"} -{"Action":"fail","Test":"TestSearch/parallel/Can_back-paginate_search_results"} -{"Action":"fail","Test":"TestSearch/parallel/Can_get_context_around_search_results"} -{"Action":"pass","Test":"TestSearch/parallel/Can_search_for_an_event_by_body"} -{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_rank_ordering_do_not_include_redacted_events"} -{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_recent_ordering_do_not_include_redacted_events"} -{"Action":"pass","Test":"TestSearch/parallel/Search_works_across_an_upgraded_room_and_its_predecessor"} -{"Action":"fail","Test":"TestSendAndFetchMessage"} -{"Action":"skip","Test":"TestSendJoinPartialStateResponse"} -{"Action":"pass","Test":"TestSendMessageWithTxn"} -{"Action":"pass","Test":"TestServerCapabilities"} -{"Action":"skip","Test":"TestServerNotices"} -{"Action":"fail","Test":"TestSync"} -{"Action":"fail","Test":"TestSync/parallel"} -{"Action":"pass","Test":"TestSync/parallel/Can_sync_a_joined_room"} -{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking"} -{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking/User_is_correctly_listed_when_they_leave,_even_when_lazy_loading_is_enabled"} -{"Action":"pass","Test":"TestSync/parallel/Full_state_sync_includes_joined_rooms"} -{"Action":"fail","Test":"TestSync/parallel/Get_presence_for_newly_joined_members_in_incremental_sync"} -{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} -{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} -{"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} -{"Action":"pass","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} -{"Action":"pass","Test":"TestSyncFilter"} -{"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} -{"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} -{"Action":"pass","Test":"TestSyncLeaveSection"} -{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} -{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} -{"Action":"pass","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} -{"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} -{"Action":"pass","Test":"TestSyncTimelineGap"} -{"Action":"pass","Test":"TestSyncTimelineGap/full"} -{"Action":"pass","Test":"TestSyncTimelineGap/incremental"} -{"Action":"pass","Test":"TestTentativeEventualJoiningAfterRejecting"} -{"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} -{"Action":"fail","Test":"TestThreadedReceipts"} -{"Action":"fail","Test":"TestThreadsEndpoint"} -{"Action":"pass","Test":"TestToDeviceMessages"} -{"Action":"fail","Test":"TestToDeviceMessagesOverFederation"} -{"Action":"pass","Test":"TestToDeviceMessagesOverFederation/good_connectivity"} -{"Action":"pass","Test":"TestToDeviceMessagesOverFederation/interrupted_connectivity"} -{"Action":"fail","Test":"TestToDeviceMessagesOverFederation/stopped_server"} -{"Action":"fail","Test":"TestTxnIdWithRefreshToken"} -{"Action":"fail","Test":"TestTxnIdempotency"} -{"Action":"pass","Test":"TestTxnIdempotencyScopedToDevice"} -{"Action":"pass","Test":"TestTxnInEvent"} -{"Action":"pass","Test":"TestTxnScopeOnLocalEcho"} -{"Action":"pass","Test":"TestTyping"} -{"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} -{"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} -{"Action":"fail","Test":"TestUnknownEndpoints"} -{"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} -{"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} -{"Action":"pass","Test":"TestUnknownEndpoints/Media_endpoints"} -{"Action":"pass","Test":"TestUnknownEndpoints/Server-server_endpoints"} -{"Action":"pass","Test":"TestUnknownEndpoints/Unknown_prefix"} -{"Action":"fail","Test":"TestUnrejectRejectedEvents"} -{"Action":"fail","Test":"TestUploadKey"} -{"Action":"fail","Test":"TestUploadKey/Parallel"} -{"Action":"fail","Test":"TestUploadKey/Parallel/Can_claim_one_time_key_using_POST"} -{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} -{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} -{"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} -{"Action":"pass","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} -{"Action":"pass","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} -{"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} -{"Action":"pass","Test":"TestUploadKeyIdempotency"} -{"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} -{"Action":"fail","Test":"TestUrlPreview"} -{"Action":"pass","Test":"TestUserAppearsInChangedDeviceListOnJoinOverFederation"} -{"Action":"pass","Test":"TestVersionStructure"} -{"Action":"pass","Test":"TestVersionStructure/Version_responds_200_OK_with_valid_structure"} -{"Action":"pass","Test":"TestWithoutOwnedState"} -{"Action":"pass","Test":"TestWithoutOwnedState/parallel"} -{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_a_non-member_user_ID_as_state_key"} -{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_suffixed_user_ID_as_state_key"} -{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_user_ID_as_state_key"} -{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_malformed_user_ID_as_state_key"} -{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_their_own_suffixed_user_ID_as_state_key"} -{"Action":"pass","Test":"TestWithoutOwnedState/parallel/user_can_set_state_with_their_own_user_ID_as_state_key"} -{"Action":"pass","Test":"TestWriteMDirectAccountData"} diff --git a/theme/css/chrome.css b/theme/css/chrome.css deleted file mode 100644 index 52b35c2c..00000000 --- a/theme/css/chrome.css +++ /dev/null @@ -1,608 +0,0 @@ -/* CSS for UI elements (a.k.a. chrome) */ - -@import 'variables.css'; - -html { - scrollbar-color: var(--scrollbar) var(--bg); -} -#searchresults a, -.content a:link, -a:visited, -a > .hljs { - color: var(--links); -} - -/* - body-container is necessary because mobile browsers don't seem to like - overflow-x on the body tag when there is a tag. -*/ -#body-container { - /* - This is used when the sidebar pushes the body content off the side of - the screen on small screens. Without it, dragging on mobile Safari - will want to reposition the viewport in a weird way. - */ - overflow-x: clip; -} - -/* Menu Bar */ - -#menu-bar, -#menu-bar-hover-placeholder { - z-index: 101; - margin: auto calc(0px - var(--page-padding)); -} -#menu-bar { - position: relative; - display: flex; - flex-wrap: wrap; - background-color: var(--bg); - border-block-end-color: var(--bg); - border-block-end-width: 1px; - border-block-end-style: solid; -} -#menu-bar.sticky, -.js #menu-bar-hover-placeholder:hover + #menu-bar, -.js #menu-bar:hover, -.js.sidebar-visible #menu-bar { - position: -webkit-sticky; - position: sticky; - top: 0 !important; -} -#menu-bar-hover-placeholder { - position: sticky; - position: -webkit-sticky; - top: 0; - height: var(--menu-bar-height); -} -#menu-bar.bordered { - border-block-end-color: var(--table-border-color); -} -#menu-bar i, #menu-bar .icon-button { - position: relative; - padding: 0 8px; - z-index: 10; - line-height: var(--menu-bar-height); - cursor: pointer; - transition: color 0.5s; -} -@media only screen and (max-width: 420px) { - #menu-bar i, #menu-bar .icon-button { - padding: 0 5px; - } -} - -.icon-button { - border: none; - background: none; - padding: 0; - color: inherit; -} -.icon-button i { - margin: 0; -} - -.right-buttons { - margin: 0 15px; -} -.right-buttons a { - text-decoration: none; -} - -.left-buttons { - display: flex; - margin: 0 5px; -} -.no-js .left-buttons button { - display: none; -} - -.menu-title { - display: inline-block; - font-weight: 400; - font-size: 2.4rem; - line-height: var(--menu-bar-height); - text-align: center; - margin: 0; - flex: 1; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; - color: #bb7fb2; -} -.js .menu-title { - cursor: pointer; -} - -.menu-bar, -.menu-bar:visited, -.nav-chapters, -.nav-chapters:visited, -.mobile-nav-chapters, -.mobile-nav-chapters:visited, -.menu-bar .icon-button, -.menu-bar a i { - color: var(--icons); -} - -.menu-bar i:hover, -.menu-bar .icon-button:hover, -.nav-chapters:hover, -.mobile-nav-chapters i:hover { - color: var(--icons-hover); -} - -/* Nav Icons */ - -.nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - - position: fixed; - top: 0; - bottom: 0; - margin: 0; - max-width: 150px; - min-width: 90px; - - display: flex; - justify-content: center; - align-content: center; - flex-direction: column; - - transition: color 0.5s, background-color 0.5s; -} - -.nav-chapters:hover { - text-decoration: none; - background-color: var(--theme-hover); - transition: background-color 0.15s, color 0.15s; -} - -.nav-wrapper { - margin-block-start: 50px; - display: none; -} - -.mobile-nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - width: 90px; - border-radius: 5px; - background-color: var(--sidebar-bg); -} - -/* Only Firefox supports flow-relative values */ -.previous { float: left; } -[dir=rtl] .previous { float: right; } - -/* Only Firefox supports flow-relative values */ -.next { - float: right; - right: var(--page-padding); -} -[dir=rtl] .next { - float: left; - right: unset; - left: var(--page-padding); -} - -/* Use the correct buttons for RTL layouts*/ -[dir=rtl] .previous i.fa-angle-left:before {content:"\f105";} -[dir=rtl] .next i.fa-angle-right:before { content:"\f104"; } - -@media only screen and (max-width: 1080px) { - .nav-wide-wrapper { display: none; } - .nav-wrapper { display: block; } -} - -/* sidebar-visible */ -@media only screen and (max-width: 1380px) { - #sidebar-toggle-anchor:checked ~ .page-wrapper .nav-wide-wrapper { display: none; } - #sidebar-toggle-anchor:checked ~ .page-wrapper .nav-wrapper { display: block; } -} - -/* Inline code */ - -:not(pre) > .hljs { - display: inline; - padding: 0.1em 0.3em; - border-radius: 3px; -} - -:not(pre):not(a) > .hljs { - color: var(--inline-code-color); - overflow-x: initial; -} - -a:hover > .hljs { - text-decoration: underline; -} - -pre { - position: relative; -} -pre > .buttons { - position: absolute; - z-index: 100; - right: 0px; - top: 2px; - margin: 0px; - padding: 2px 0px; - - color: var(--sidebar-fg); - cursor: pointer; - visibility: hidden; - opacity: 0; - transition: visibility 0.1s linear, opacity 0.1s linear; -} -pre:hover > .buttons { - visibility: visible; - opacity: 1 -} -pre > .buttons :hover { - color: var(--sidebar-active); - border-color: var(--icons-hover); - background-color: var(--theme-hover); -} -pre > .buttons i { - margin-inline-start: 8px; -} -pre > .buttons button { - cursor: inherit; - margin: 0px 5px; - padding: 3px 5px; - font-size: 14px; - - border-style: solid; - border-width: 1px; - border-radius: 4px; - border-color: var(--icons); - background-color: var(--theme-popup-bg); - transition: 100ms; - transition-property: color,border-color,background-color; - color: var(--icons); -} -@media (pointer: coarse) { - pre > .buttons button { - /* On mobile, make it easier to tap buttons. */ - padding: 0.3rem 1rem; - } - - .sidebar-resize-indicator { - /* Hide resize indicator on devices with limited accuracy */ - display: none; - } -} -pre > code { - display: block; - padding: 1rem; -} - -/* FIXME: ACE editors overlap their buttons because ACE does absolute - positioning within the code block which breaks padding. The only solution I - can think of is to move the padding to the outer pre tag (or insert a div - wrapper), but that would require fixing a whole bunch of CSS rules. -*/ -.hljs.ace_editor { - padding: 0rem 0rem; -} - -pre > .result { - margin-block-start: 10px; -} - -/* Search */ - -#searchresults a { - text-decoration: none; -} - -mark { - border-radius: 2px; - padding-block-start: 0; - padding-block-end: 1px; - padding-inline-start: 3px; - padding-inline-end: 3px; - margin-block-start: 0; - margin-block-end: -1px; - margin-inline-start: -3px; - margin-inline-end: -3px; - background-color: var(--search-mark-bg); - transition: background-color 300ms linear; - cursor: pointer; -} - -mark.fade-out { - background-color: rgba(0,0,0,0) !important; - cursor: auto; -} - -.searchbar-outer { - margin-inline-start: auto; - margin-inline-end: auto; - max-width: var(--content-max-width); -} - -#searchbar { - width: 100%; - margin-block-start: 5px; - margin-block-end: 0; - margin-inline-start: auto; - margin-inline-end: auto; - padding: 10px 16px; - transition: box-shadow 300ms ease-in-out; - border: 1px solid var(--searchbar-border-color); - border-radius: 3px; - background-color: var(--searchbar-bg); - color: var(--searchbar-fg); -} -#searchbar:focus, -#searchbar.active { - box-shadow: 0 0 3px var(--searchbar-shadow-color); -} - -.searchresults-header { - font-weight: bold; - font-size: 1em; - padding-block-start: 18px; - padding-block-end: 0; - padding-inline-start: 5px; - padding-inline-end: 0; - color: var(--searchresults-header-fg); -} - -.searchresults-outer { - margin-inline-start: auto; - margin-inline-end: auto; - max-width: var(--content-max-width); - border-block-end: 1px dashed var(--searchresults-border-color); -} - -ul#searchresults { - list-style: none; - padding-inline-start: 20px; -} -ul#searchresults li { - margin: 10px 0px; - padding: 2px; - border-radius: 2px; -} -ul#searchresults li.focus { - background-color: var(--searchresults-li-bg); -} -ul#searchresults span.teaser { - display: block; - clear: both; - margin-block-start: 5px; - margin-block-end: 0; - margin-inline-start: 20px; - margin-inline-end: 0; - font-size: 0.8em; -} -ul#searchresults span.teaser em { - font-weight: bold; - font-style: normal; -} - -/* Sidebar */ - -.sidebar { - position: fixed; - left: 0; - top: 0; - bottom: 0; - width: var(--sidebar-width); - font-size: 0.875em; - box-sizing: border-box; - -webkit-overflow-scrolling: touch; - overscroll-behavior-y: contain; - background-color: var(--sidebar-bg); - color: var(--sidebar-fg); -} -[dir=rtl] .sidebar { left: unset; right: 0; } -.sidebar-resizing { - -moz-user-select: none; - -webkit-user-select: none; - -ms-user-select: none; - user-select: none; -} -.no-js .sidebar, -.js:not(.sidebar-resizing) .sidebar { - transition: transform 0.3s; /* Animation: slide away */ -} -.sidebar code { - line-height: 2em; -} -.sidebar .sidebar-scrollbox { - overflow-y: auto; - position: absolute; - top: 0; - bottom: 0; - left: 0; - right: 0; - padding: 10px 10px; -} -.sidebar .sidebar-resize-handle { - position: absolute; - cursor: col-resize; - width: 0; - right: calc(var(--sidebar-resize-indicator-width) * -1); - top: 0; - bottom: 0; - display: flex; - align-items: center; -} - -.sidebar-resize-handle .sidebar-resize-indicator { - width: 100%; - height: 12px; - background-color: var(--icons); - margin-inline-start: var(--sidebar-resize-indicator-space); -} - -[dir=rtl] .sidebar .sidebar-resize-handle { - left: calc(var(--sidebar-resize-indicator-width) * -1); - right: unset; -} -.js .sidebar .sidebar-resize-handle { - cursor: col-resize; - width: calc(var(--sidebar-resize-indicator-width) - var(--sidebar-resize-indicator-space)); -} -/* sidebar-hidden */ -#sidebar-toggle-anchor:not(:checked) ~ .sidebar { - transform: translateX(calc(0px - var(--sidebar-width) - var(--sidebar-resize-indicator-width))); - z-index: -1; -} -[dir=rtl] #sidebar-toggle-anchor:not(:checked) ~ .sidebar { - transform: translateX(calc(var(--sidebar-width) + var(--sidebar-resize-indicator-width))); -} -.sidebar::-webkit-scrollbar { - background: var(--sidebar-bg); -} -.sidebar::-webkit-scrollbar-thumb { - background: var(--scrollbar); -} - -/* sidebar-visible */ -#sidebar-toggle-anchor:checked ~ .page-wrapper { - transform: translateX(calc(var(--sidebar-width) + var(--sidebar-resize-indicator-width))); -} -[dir=rtl] #sidebar-toggle-anchor:checked ~ .page-wrapper { - transform: translateX(calc(0px - var(--sidebar-width) - var(--sidebar-resize-indicator-width))); -} -@media only screen and (min-width: 620px) { - #sidebar-toggle-anchor:checked ~ .page-wrapper { - transform: none; - margin-inline-start: calc(var(--sidebar-width) + var(--sidebar-resize-indicator-width)); - } - [dir=rtl] #sidebar-toggle-anchor:checked ~ .page-wrapper { - transform: none; - } -} - -.chapter { - list-style: none outside none; - padding-inline-start: 0; - line-height: 2.2em; -} - -.chapter ol { - width: 100%; -} - -.chapter li { - display: flex; - color: var(--sidebar-non-existant); -} -.chapter li a { - display: block; - padding: 0; - text-decoration: none; - color: var(--sidebar-fg); -} - -.chapter li a:hover { - color: var(--sidebar-active); -} - -.chapter li a.active { - color: var(--sidebar-active); -} - -.chapter li > a.toggle { - cursor: pointer; - display: block; - margin-inline-start: auto; - padding: 0 10px; - user-select: none; - opacity: 0.68; -} - -.chapter li > a.toggle div { - transition: transform 0.5s; -} - -/* collapse the section */ -.chapter li:not(.expanded) + li > ol { - display: none; -} - -.chapter li.chapter-item { - line-height: 1.5em; - margin-block-start: 0.6em; -} - -.chapter li.expanded > a.toggle div { - transform: rotate(90deg); -} - -.spacer { - width: 100%; - height: 3px; - margin: 5px 0px; -} -.chapter .spacer { - background-color: var(--sidebar-spacer); -} - -@media (-moz-touch-enabled: 1), (pointer: coarse) { - .chapter li a { padding: 5px 0; } - .spacer { margin: 10px 0; } -} - -.section { - list-style: none outside none; - padding-inline-start: 20px; - line-height: 1.9em; -} - -/* Theme Menu Popup */ - -.theme-popup { - position: absolute; - left: 10px; - top: var(--menu-bar-height); - z-index: 1000; - border-radius: 4px; - font-size: 0.7em; - color: var(--fg); - background: var(--theme-popup-bg); - border: 1px solid var(--theme-popup-border); - margin: 0; - padding: 0; - list-style: none; - display: none; - /* Don't let the children's background extend past the rounded corners. */ - overflow: hidden; -} -[dir=rtl] .theme-popup { left: unset; right: 10px; } -.theme-popup .default { - color: var(--icons); -} -.theme-popup .theme { - width: 100%; - border: 0; - margin: 0; - padding: 2px 20px; - line-height: 25px; - white-space: nowrap; - text-align: start; - cursor: pointer; - color: inherit; - background: inherit; - font-size: inherit; -} -.theme-popup .theme:hover { - background-color: var(--theme-hover); -} - -.theme-selected::before { - display: inline-block; - content: "✓"; - margin-inline-start: -14px; - width: 14px; -} - diff --git a/theme/css/general.css b/theme/css/general.css deleted file mode 100644 index e7d20da7..00000000 --- a/theme/css/general.css +++ /dev/null @@ -1,234 +0,0 @@ -/* Base styles and content styles */ - -@import 'variables.css'; - -:root { - /* Browser default font-size is 16px, this way 1 rem = 10px */ - font-size: 62.5%; - color-scheme: var(--color-scheme); -} - -html { - font-family: "Open Sans", sans-serif; - color: var(--fg); - background-color: var(--bg); - text-size-adjust: none; - -webkit-text-size-adjust: none; -} - -body { - margin: 0; - font-size: 1.6rem; - overflow-x: hidden; -} - -code { - font-family: var(--mono-font) !important; - font-size: var(--code-font-size); - direction: ltr !important; -} - -/* make long words/inline code not x overflow */ -main { - overflow-wrap: break-word; -} - -/* make wide tables scroll if they overflow */ -.table-wrapper { - overflow-x: auto; -} - -/* Don't change font size in headers. */ -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - font-size: unset; -} - -.left { float: left; } -.right { float: right; } -.boring { opacity: 0.6; } -.hide-boring .boring { display: none; } -.hidden { display: none !important; } - -h2, h3 { margin-block-start: 2.5em; } -h4, h5 { margin-block-start: 2em; } - -.header + .header h3, -.header + .header h4, -.header + .header h5 { - margin-block-start: 1em; -} - -h1:target::before, -h2:target::before, -h3:target::before, -h4:target::before, -h5:target::before, -h6:target::before { - display: inline-block; - content: "»"; - margin-inline-start: -30px; - width: 30px; -} - -/* This is broken on Safari as of version 14, but is fixed - in Safari Technology Preview 117 which I think will be Safari 14.2. - https://bugs.webkit.org/show_bug.cgi?id=218076 -*/ -:target { - /* Safari does not support logical properties */ - scroll-margin-top: calc(var(--menu-bar-height) + 0.5em); -} - -.page { - outline: 0; - padding: 0 var(--page-padding); - margin-block-start: calc(0px - var(--menu-bar-height)); /* Compensate for the #menu-bar-hover-placeholder */ -} -.page-wrapper { - box-sizing: border-box; - background-color: var(--bg); -} -.no-js .page-wrapper, -.js:not(.sidebar-resizing) .page-wrapper { - transition: margin-left 0.3s ease, transform 0.3s ease; /* Animation: slide away */ -} -[dir=rtl] .js:not(.sidebar-resizing) .page-wrapper { - transition: margin-right 0.3s ease, transform 0.3s ease; /* Animation: slide away */ -} - -.content { - overflow-y: auto; - padding: 0 5px 50px 5px; -} -.content main { - margin-inline-start: auto; - margin-inline-end: auto; - max-width: var(--content-max-width); -} -.content p { line-height: 1.45em; } -.content ol { line-height: 1.45em; } -.content ul { line-height: 1.45em; } -.content a { text-decoration: none; } -.content a:hover { text-decoration: underline; } -.content img, .content video { max-width: 100%; } -.content .header:link, -.content .header:visited { - color: var(--fg); -} -.content .header:link, -.content .header:visited:hover { - text-decoration: none; -} - -table { - margin: 0 auto; - border-collapse: collapse; -} -table td { - padding: 3px 20px; - border: 1px var(--table-border-color) solid; -} -table thead { - background: var(--table-header-bg); -} -table thead td { - font-weight: 700; - border: none; -} -table thead th { - padding: 3px 20px; -} -table thead tr { - border: 1px var(--table-header-bg) solid; -} -/* Alternate background colors for rows */ -table tbody tr:nth-child(2n) { - background: var(--table-alternate-bg); -} - - -blockquote { - margin: 20px 0; - padding: 0 20px; - color: var(--fg); - background-color: var(--quote-bg); - border-block-start: .1em solid var(--quote-border); - border-block-end: .1em solid var(--quote-border); -} - -.warning { - margin: 20px; - padding: 0 20px; - border-inline-start: 2px solid var(--warning-border); -} - -.warning:before { - position: absolute; - width: 3rem; - height: 3rem; - margin-inline-start: calc(-1.5rem - 21px); - content: "ⓘ"; - text-align: center; - background-color: var(--bg); - color: var(--warning-border); - font-weight: bold; - font-size: 2rem; -} - -blockquote .warning:before { - background-color: var(--quote-bg); -} - -kbd { - background-color: var(--table-border-color); - border-radius: 4px; - border: solid 1px var(--theme-popup-border); - box-shadow: inset 0 -1px 0 var(--theme-hover); - display: inline-block; - font-size: var(--code-font-size); - font-family: var(--mono-font); - line-height: 10px; - padding: 4px 5px; - vertical-align: middle; -} - -:not(.footnote-definition) + .footnote-definition, -.footnote-definition + :not(.footnote-definition) { - margin-block-start: 2em; -} -.footnote-definition { - font-size: 0.9em; - margin: 0.5em 0; -} -.footnote-definition p { - display: inline; -} - -.tooltiptext { - position: absolute; - visibility: hidden; - color: #fff; - background-color: #333; - transform: translateX(-50%); /* Center by moving tooltip 50% of its width left */ - left: -8px; /* Half of the width of the icon */ - top: -35px; - font-size: 0.8em; - text-align: center; - border-radius: 6px; - padding: 5px 8px; - margin: 5px; - z-index: 1000; -} -.tooltipped .tooltiptext { - visibility: visible; -} - -.chapter li.part-title { - color: var(--sidebar-fg); - margin: 5px 0px; - font-weight: bold; -} - -.result-no-output { - font-style: italic; -} diff --git a/theme/css/variables.css b/theme/css/variables.css deleted file mode 100644 index e7feed98..00000000 --- a/theme/css/variables.css +++ /dev/null @@ -1,279 +0,0 @@ -/* Globals */ - -:root { - --sidebar-width: 300px; - --sidebar-resize-indicator-width: 8px; - --sidebar-resize-indicator-space: 2px; - --page-padding: 15px; - --content-max-width: 750px; - --menu-bar-height: 50px; - --mono-font: "Source Code Pro", Consolas, "Ubuntu Mono", Menlo, "DejaVu Sans Mono", monospace, monospace; - --code-font-size: 0.875em - /* please adjust the ace font size accordingly in editor.js */ -} - -/* Themes */ - -.ayu { - --bg: hsl(210, 25%, 8%); - --fg: #c5c5c5; - - --sidebar-bg: #14191f; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #5c6773; - --sidebar-active: #ffb454; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #b7b9cc; - - --links: #0096cf; - - --inline-code-color: #ffb454; - - --theme-popup-bg: #14191f; - --theme-popup-border: #5c6773; - --theme-hover: #191f26; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --warning-border: #ff8e00; - - --table-border-color: hsl(210, 25%, 13%); - --table-header-bg: hsl(210, 25%, 28%); - --table-alternate-bg: hsl(210, 25%, 11%); - - --searchbar-border-color: #848484; - --searchbar-bg: #424242; - --searchbar-fg: #fff; - --searchbar-shadow-color: #d4c89f; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #252932; - --search-mark-bg: #e3b171; - - --color-scheme: dark; -} - -.coal { - --bg: hsl(200, 7%, 8%); - --fg: #98a3ad; - - --sidebar-bg: #292c2f; - --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; - --sidebar-active: #3473ad; - --sidebar-spacer: #393939; - - --scrollbar: var(--sidebar-fg); - - --icons: #43484d; - --icons-hover: #b3c0cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6; - - --theme-popup-bg: #141617; - --theme-popup-border: #43484d; - --theme-hover: #1f2124; - - --quote-bg: hsl(234, 21%, 18%); - --quote-border: hsl(234, 21%, 23%); - - --warning-border: #ff8e00; - - --table-border-color: hsl(200, 7%, 13%); - --table-header-bg: hsl(200, 7%, 28%); - --table-alternate-bg: hsl(200, 7%, 11%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #b7b7b7; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #98a3ad; - --searchresults-li-bg: #2b2b2f; - --search-mark-bg: #355c7d; - - --color-scheme: dark; -} - -.light { - --bg: hsl(0, 0%, 100%); - --fg: hsl(0, 0%, 0%); - - --sidebar-bg: #fafafa; - --sidebar-fg: #AE518E; - --sidebar-non-existant: #aaaaaa; - --sidebar-active: #2F7E86; - --sidebar-spacer: #f4f4f4; - - --scrollbar: #8F8F8F; - - --icons: #747474; - --icons-hover: #000000; - - --links: #429EC2; - - --inline-code-color: #9d579d; - - --theme-popup-bg: #fafafa; - --theme-popup-border: #cccccc; - --theme-hover: #e6e6e6; - - --quote-bg: hsl(197, 37%, 96%); - --quote-border: hsl(197, 37%, 91%); - - --warning-border: #ff8e00; - - --table-border-color: hsl(0, 0%, 95%); - --table-header-bg: hsl(0, 0%, 80%); - --table-alternate-bg: hsl(0, 0%, 97%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #e4f2fe; - --search-mark-bg: #a2cff5; - - --color-scheme: light; -} - -.navy { - --bg: hsl(226, 23%, 11%); - --fg: #dfd3db; - - --sidebar-bg: #282d3f; - --sidebar-fg: #fdcbec; - --sidebar-non-existant: #505274; - --sidebar-active: #5BCEFA; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #fdcbec; - --icons-hover: #5BCEFA; - - --links: #5BCEFA; - - --inline-code-color: #bd66bc; - - --theme-popup-bg: #161923; - --theme-popup-border: #737480; - --theme-hover: #282e40; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --warning-border: #ff8e00; - - --table-border-color: hsl(226, 23%, 16%); - --table-header-bg: hsl(226, 23%, 31%); - --table-alternate-bg: hsl(226, 23%, 14%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #aeaec6; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #5f5f71; - --searchresults-border-color: #5c5c68; - --searchresults-li-bg: #242430; - --search-mark-bg: #a2cff5; - - --color-scheme: dark; -} - -.rust { - --bg: hsl(60, 9%, 87%); - --fg: #262625; - - --sidebar-bg: #3b2e2a; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505254; - --sidebar-active: #e69f67; - --sidebar-spacer: #45373a; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #262625; - - --links: #2b79a2; - - --inline-code-color: #6e6b5e; - - --theme-popup-bg: #e1e1db; - --theme-popup-border: #b38f6b; - --theme-hover: #99908a; - - --quote-bg: hsl(60, 5%, 75%); - --quote-border: hsl(60, 5%, 70%); - - --warning-border: #ff8e00; - - --table-border-color: hsl(60, 9%, 82%); - --table-header-bg: #b3a497; - --table-alternate-bg: hsl(60, 9%, 84%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #dec2a2; - --search-mark-bg: #e69f67; - - --color-scheme: light; -} - -@media (prefers-color-scheme: dark) { - .light.no-js { - --bg: hsl(200, 7%, 8%); - --fg: #98a3ad; - - --sidebar-bg: #292c2f; - --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; - --sidebar-active: #3473ad; - --sidebar-spacer: #393939; - - --scrollbar: var(--sidebar-fg); - - --icons: #43484d; - --icons-hover: #b3c0cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6; - - --theme-popup-bg: #141617; - --theme-popup-border: #43484d; - --theme-hover: #1f2124; - - --quote-bg: hsl(234, 21%, 18%); - --quote-border: hsl(234, 21%, 23%); - - --warning-border: #ff8e00; - - --table-border-color: hsl(200, 7%, 13%); - --table-header-bg: hsl(200, 7%, 28%); - --table-alternate-bg: hsl(200, 7%, 11%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #b7b7b7; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #98a3ad; - --searchresults-li-bg: #2b2b2f; - --search-mark-bg: #355c7d; - } -} diff --git a/theme/favicon.png b/theme/favicon.png deleted file mode 100644 index 150aea82..00000000 Binary files a/theme/favicon.png and /dev/null differ