diff --git a/.cargo/audit.toml b/.cargo/audit.toml new file mode 100644 index 00000000..37148cfb --- /dev/null +++ b/.cargo/audit.toml @@ -0,0 +1,27 @@ +[advisories] +ignore = ["RUSTSEC-2024-0436", "RUSTSEC-2025-0014"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] +informational_warnings = [] # warn for categories of informational advisories +severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") + +# Advisory Database Configuration +[database] +path = "~/.cargo/advisory-db" # Path where advisory git repo will be cloned +url = "https://github.com/RustSec/advisory-db.git" # URL to git repo +fetch = true # Perform a `git fetch` before auditing (default: true) +stale = false # Allow stale advisory DB (i.e. no commits for 90 days, default: false) + +# Output Configuration +[output] +deny = ["warnings", "unmaintained", "unsound", "yanked"] # exit on error if unmaintained dependencies are found +format = "terminal" # "terminal" (human readable report) or "json" +quiet = false # Only print information on error +show_tree = true # Show inverse dependency trees along with advisories (default: true) + +# Target Configuration +[target] +arch = ["x86_64", "aarch64"] # Ignore advisories for CPU architectures other than these +os = ["linux", "windows", "macos"] # Ignore advisories for operating systems other than these + +[yanked] +enabled = true # Warn for yanked crates in Cargo.lock (default: true) +update_index = true # Auto-update the crates.io index (default: true) diff --git a/.dockerignore b/.dockerignore index 35d35e1b..453634df 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,9 @@ # Local build and dev artifacts -target -tests +target/ # Docker files Dockerfile* +docker/ # IDE files .vscode diff --git a/.forgejo/workflows/build-alpine.yml b/.forgejo/workflows/build-alpine.yml new file mode 100644 index 00000000..b1757a60 --- /dev/null +++ b/.forgejo/workflows/build-alpine.yml @@ -0,0 +1,49 @@ +on: + - workflow-dispatch + - push + +jobs: + build: + runs-on: ubuntu-latest + container: + image: alpine:edge + + steps: + - name: set up dependencies + run: | + apk update + apk upgrade + apk add nodejs git alpine-sdk + - uses: actions/checkout@v4 + name: checkout the alpine dir + with: + sparse-checkout: "alpine/" + + # - uses: actions/checkout@v4 + # name: checkout the rest in the alpine dir + # with: + # path: 'alpine/continuwuity' + - name: set up user + run: adduser -DG abuild ci + + - name: set up keys + run: | + pwd + mkdir ~/.abuild + echo "${{ secrets.abuild_privkey }}" > ~/.abuild/ci@continuwuity.rsa + echo "${{ secrets.abuild_pubkey }}" > ~/.abuild/ci@continuwuity.rsa.pub + echo $HOME + echo 'PACKAGER_PRIVKEY="/root/.abuild/ci@continuwuity.rsa"' > ~/.abuild/abuild.conf + ls ~/.abuild + + - name: go go gadget abuild + run: | + cd alpine + # modify the APKBUILD to use the current branch instead of the release + # note that it seems to require the repo to be public (as you'll get + # a 404 even if the token is provided) + export ARCHIVE_URL="${{ github.server_url }}/${{ github.repository }}/archive/${{ github.ref_name }}.tar.gz" + echo $ARCHIVE_URL + sed -i '/^source=/c\source="'"$ARCHIVE_URL" APKBUILD + abuild -F checksum + abuild -Fr diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml new file mode 100644 index 00000000..7d95a317 --- /dev/null +++ b/.forgejo/workflows/documentation.yml @@ -0,0 +1,73 @@ +name: Documentation + +on: + pull_request: + push: + branches: + - main + tags: + - "v*" + workflow_dispatch: + +concurrency: + group: "pages-${{ github.ref }}" + cancel-in-progress: true + +jobs: + docs: + name: Build and Deploy Documentation + runs-on: ubuntu-latest + + steps: + - name: Sync repository + uses: https://github.com/actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Setup mdBook + uses: https://github.com/peaceiris/actions-mdbook@v2 + with: + mdbook-version: "latest" + + - name: Build mdbook + run: mdbook build + + - name: Prepare static files for deployment + run: | + mkdir -p ./public/.well-known/matrix + mkdir -p ./public/.well-known/continuwuity + mkdir -p ./public/schema + # Copy the Matrix .well-known files + cp ./docs/static/server ./public/.well-known/matrix/server + cp ./docs/static/client ./public/.well-known/matrix/client + cp ./docs/static/client ./public/.well-known/matrix/support + cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements + cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json + # Copy the custom headers file + cp ./docs/static/_headers ./public/_headers + echo "Copied .well-known files and _headers to ./public" + + - name: Setup Node.js + uses: https://github.com/actions/setup-node@v4 + with: + node-version: 20 + + - name: Install dependencies + run: npm install --save-dev wrangler@latest + + - name: Deploy to Cloudflare Pages (Production) + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" + + - name: Deploy to Cloudflare Pages (Preview) + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" diff --git a/.forgejo/workflows/element.yml b/.forgejo/workflows/element.yml new file mode 100644 index 00000000..db771197 --- /dev/null +++ b/.forgejo/workflows/element.yml @@ -0,0 +1,127 @@ +name: Deploy Element Web + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +concurrency: + group: "element-${{ github.ref }}" + cancel-in-progress: true + +jobs: + build-and-deploy: + name: Build and Deploy Element Web + runs-on: ubuntu-latest + + steps: + - name: Setup Node.js + uses: https://code.forgejo.org/actions/setup-node@v4 + with: + node-version: "20" + + - name: Clone, setup, and build Element Web + run: | + echo "Cloning Element Web..." + git clone https://github.com/maunium/element-web + cd element-web + git checkout develop + git pull + + echo "Cloning matrix-js-sdk..." + git clone https://github.com/matrix-org/matrix-js-sdk.git + + echo "Installing Yarn..." + npm install -g yarn + + echo "Installing dependencies..." + yarn install + + echo "Preparing build environment..." + mkdir -p .home + + echo "Cleaning up specific node_modules paths..." + rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing." + + echo "Getting matrix-js-sdk commit hash..." + cd matrix-js-sdk + jsver=$(git rev-parse HEAD) + jsver=${jsver:0:12} + cd .. + echo "matrix-js-sdk version hash: $jsver" + + echo "Getting element-web commit hash..." + ver=$(git rev-parse HEAD) + ver=${ver:0:12} + echo "element-web version hash: $ver" + + chmod +x ./build-sh + + export VERSION="$ver-js-$jsver" + echo "Building Element Web version: $VERSION" + ./build-sh + + echo "Checking for build output..." + ls -la webapp/ + + - name: Create config.json + run: | + cat < ./element-web/webapp/config.json + { + "default_server_name": "continuwuity.org", + "default_server_config": { + "m.homeserver": { + "base_url": "https://matrix.continuwuity.org" + } + }, + "default_country_code": "GB", + "default_theme": "dark", + "mobile_guide_toast": false, + "show_labs_settings": true, + "room_directory": [ + "continuwuity.org", + "matrixrooms.info" + ], + "settings_defaults": { + "UIFeature.urlPreviews": true, + "UIFeature.feedback": false, + "UIFeature.voip": false, + "UIFeature.shareQrCode": false, + "UIFeature.shareSocial": false, + "UIFeature.locationSharing": false, + "enableSyntaxHighlightLanguageDetection": true + }, + "features": { + "feature_pinning": true, + "feature_custom_themes": true + } + } + EOF + echo "Created ./element-web/webapp/config.json" + cat ./element-web/webapp/config.json + + - name: Upload Artifact + uses: https://code.forgejo.org/actions/upload-artifact@v3 + with: + name: element-web + path: ./element-web/webapp/ + retention-days: 14 + + - name: Install Wrangler + run: npm install --save-dev wrangler@latest + + - name: Deploy to Cloudflare Pages (Production) + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" + + - name: Deploy to Cloudflare Pages (Preview) + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml new file mode 100644 index 00000000..141bfef9 --- /dev/null +++ b/.forgejo/workflows/release-image.yml @@ -0,0 +1,235 @@ +name: Release Docker Image +concurrency: + group: "release-image-${{ github.ref }}" + +on: + pull_request: + push: + paths-ignore: + - "*.md" + - "**/*.md" + - ".gitlab-ci.yml" + - ".gitignore" + - "renovate.json" + - "debian/**" + - "docker/**" + - "docs/**" + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + BUILTIN_REGISTRY: forgejo.ellis.link + BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" + +jobs: + define-variables: + runs-on: ubuntu-latest + + outputs: + images: ${{ steps.var.outputs.images }} + images_list: ${{ steps.var.outputs.images_list }} + build_matrix: ${{ steps.var.outputs.build_matrix }} + + steps: + - name: Setting variables + uses: https://github.com/actions/github-script@v7 + id: var + with: + script: | + const githubRepo = '${{ github.repository }}'.toLowerCase() + const repoId = githubRepo.split('/')[1] + + core.setOutput('github_repository', githubRepo) + const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo + let images = [] + if (process.env.BUILTIN_REGISTRY_ENABLED === "true") { + images.push(builtinImage) + } + core.setOutput('images', images.join("\n")) + core.setOutput('images_list', images.join(",")) + const platforms = ['linux/amd64', 'linux/arm64'] + core.setOutput('build_matrix', JSON.stringify({ + platform: platforms, + include: platforms.map(platform => { return { + platform, + slug: platform.replace('/', '-') + }}) + })) + + build-image: + runs-on: dind + container: ghcr.io/catthehacker/ubuntu:act-latest + needs: define-variables + permissions: + contents: read + packages: write + attestations: write + id-token: write + strategy: + matrix: + { + "include": + [ + { "platform": "linux/amd64", "slug": "linux-amd64" }, + { "platform": "linux/arm64", "slug": "linux-arm64" }, + ], + "platform": ["linux/amd64", "linux/arm64"], + } + steps: + - name: Echo strategy + run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' + - name: Echo matrix + run: echo '${{ toJSON(matrix) }}' + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + - run: | + if ! command -v rustup &> /dev/null ; then + curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y + echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH + fi + - uses: https://github.com/cargo-bins/cargo-binstall@main + - run: cargo binstall timelord-cli@3.0.1 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. + - name: Login to builtin registry + uses: docker/login-action@v3 + with: + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + + # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. + - name: Extract metadata (labels, annotations) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{needs.define-variables.outputs.images}} + # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 + env: + DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index + + # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. + # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. + # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. + # It will not push images generated from a pull request + - name: Get short git commit SHA + id: sha + run: | + calculatedSha=$(git rev-parse --short ${{ github.sha }}) + echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV + - name: Get Git commit timestamps + run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + - name: Set up timelord + uses: actions/cache/restore@v3 + with: + path: /timelord/ + key: timelord-v0 # Cache is already split per runner + - name: Run timelord to set timestamps + run: timelord sync --source-dir . --cache-dir /timelord/ + - name: Save timelord + uses: actions/cache/save@v3 + with: + path: /timelord/ + key: timelord-v0 + - name: Build and push Docker image by digest + id: build + uses: docker/build-push-action@v6 + with: + context: . + file: "docker/Dockerfile" + build-args: | + CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }} + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + annotations: ${{ steps.meta.outputs.annotations }} + cache-from: type=gha + cache-to: type=gha,mode=max + sbom: true + outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true + env: + SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} + + # For publishing multi-platform manifests + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + uses: forgejo/upload-artifact@v4 + with: + name: digests-${{ matrix.slug }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + merge: + runs-on: dind + container: ghcr.io/catthehacker/ubuntu:act-latest + needs: [define-variables, build-image] + steps: + - name: Download digests + uses: forgejo/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. + - name: Login to builtin registry + uses: docker/login-action@v3 + with: + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Extract metadata (tags) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + tags: | + type=semver,pattern=v{{version}} + type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} + type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} + type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) 1= github.ref && 'branch-' || '' }} + type=ref,event=pr + type=sha,format=long + images: ${{needs.define-variables.outputs.images}} + # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 + env: + DOCKER_METADATA_ANNOTATIONS_LEVELS: index + + - name: Create manifest list and push + working-directory: /tmp/digests + env: + IMAGES: ${{needs.define-variables.outputs.images}} + shell: bash + run: | + IFS=$'\n' + IMAGES_LIST=($IMAGES) + ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS) + TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS) + for REPO in "${IMAGES_LIST[@]}"; do + docker buildx imagetools create \ + $(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \ + $(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \ + $(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done) + done + + - name: Inspect image + env: + IMAGES: ${{needs.define-variables.outputs.images}} + shell: bash + run: | + IMAGES_LIST=($IMAGES) + for REPO in "${IMAGES_LIST[@]}"; do + docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }} + done diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..3dfaca65 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,87 @@ +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Rust.gitattributes +# Auto detect text files and perform normalization +* text=auto + +*.rs text diff=rust +*.toml text diff=toml +Cargo.lock text + +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Common.gitattributes +# Documents +*.bibtex text diff=bibtex +*.doc diff=astextplain +*.DOC diff=astextplain +*.docx diff=astextplain +*.DOCX diff=astextplain +*.dot diff=astextplain +*.DOT diff=astextplain +*.pdf diff=astextplain +*.PDF diff=astextplain +*.rtf diff=astextplain +*.RTF diff=astextplain +*.md text diff=markdown +*.mdx text diff=markdown +*.tex text diff=tex +*.adoc text +*.textile text +*.mustache text +*.csv text eol=crlf +*.tab text +*.tsv text +*.txt text +*.sql text +*.epub diff=astextplain + +# Graphics +*.png binary +*.jpg binary +*.jpeg binary +*.gif binary +*.tif binary +*.tiff binary +*.ico binary +# SVG treated as text by default. +*.svg text +*.eps binary + +# Scripts +*.bash text eol=lf +*.fish text eol=lf +*.ksh text eol=lf +*.sh text eol=lf +*.zsh text eol=lf +# These are explicitly windows files and should use crlf +*.bat text eol=crlf +*.cmd text eol=crlf +*.ps1 text eol=crlf + +# Serialisation +*.json text +*.toml text +*.xml text +*.yaml text +*.yml text + +# Archives +*.7z binary +*.bz binary +*.bz2 binary +*.bzip2 binary +*.gz binary +*.lz binary +*.lzma binary +*.rar binary +*.tar binary +*.taz binary +*.tbz binary +*.tbz2 binary +*.tgz binary +*.tlz binary +*.txz binary +*.xz binary +*.Z binary +*.zip binary +*.zst binary + +# Text files where line endings should be preserved +*.patch -text \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 35d60aa1..00000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,993 +0,0 @@ -name: CI and Artifacts - -on: - pull_request: - push: - paths-ignore: - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'debian/**' - - 'docker/**' - branches: - - main - tags: - - '*' - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -concurrency: - group: ${{ github.head_ref || github.ref_name }} - cancel-in-progress: true - -env: - # sccache only on main repo - SCCACHE_GHA_ENABLED: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" - RUSTC_WRAPPER: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }} - SCCACHE_REGION: ${{ vars.SCCACHE_REGION }} - SCCACHE_ENDPOINT: ${{ vars.SCCACHE_ENDPOINT }} - SCCACHE_CACHE_MULTIARCH: ${{ vars.SCCACHE_CACHE_MULTIARCH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Just in case incremental is still being set to true, speeds up CI - CARGO_INCREMENTAL: 0 - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps - NIX_CONFIG: | - show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= - experimental-features = nix-command flakes - extra-experimental-features = nix-command flakes - accept-flake-config = true - WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} - GH_REF_NAME: ${{ github.ref_name }} - WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - -permissions: {} - -jobs: - tests: - name: Test - runs-on: ubuntu-24.04 - steps: - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Install liburing - run: | - sudo apt install liburing-dev -y - - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Tag comparison check - if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - # always save the cache - save-always: true - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv - direnv allow - nix develop .#all-features --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - bin/nix-build-and-cache just '.#devShells.x86_64-linux.default' - bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features' - bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic' - - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - - name: Run CI tests - env: - CARGO_PROFILE: "test" - run: | - direnv exec . engage > >(tee -a test_output.log) - - - name: Run Complement tests - env: - CARGO_PROFILE: "test" - run: | - # the nix devshell sets $COMPLEMENT_SRC, so "/dev/null" is no-op - direnv exec . bin/complement "/dev/null" complement_test_logs.jsonl complement_test_results.jsonl > >(tee -a test_output.log) - cp -v -f result complement_oci_image.tar.gz - - - name: Upload Complement OCI image - uses: actions/upload-artifact@v4 - with: - name: complement_oci_image.tar.gz - path: complement_oci_image.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload Complement logs - uses: actions/upload-artifact@v4 - with: - name: complement_test_logs.jsonl - path: complement_test_logs.jsonl - if-no-files-found: error - - - name: Upload Complement results - uses: actions/upload-artifact@v4 - with: - name: complement_test_results.jsonl - path: complement_test_results.jsonl - if-no-files-found: error - - - name: Diff Complement results with checked-in repo results - run: | - diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) - - - name: Update Job Summary - env: - GH_JOB_STATUS: ${{ job.status }} - if: success() || failure() - run: | - if [ ${GH_JOB_STATUS} == 'success' ]; then - echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY - else - echo '# CI failure' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - fi - - - name: Run cargo clean test artifacts to free up space - run: | - cargo clean --profile test - - build: - name: Build - runs-on: ubuntu-24.04 - strategy: - matrix: - include: - - target: aarch64-linux-musl - - target: x86_64-linux-musl - steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - # always save the cache - save-always: true - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv - direnv allow - nix develop .#all-features --command true --impure - - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - - name: Build static ${{ matrix.target }}-all-features - run: | - if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl" - fi - - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features - - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb - mv -v target/release/conduwuit static-${{ matrix.target }} - mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb - - - name: Build static x86_64-linux-musl-all-features-x86_64-haswell-optimised - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-x86_64-linux-musl-all-features-x86_64-haswell-optimised - - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb - mv -v target/release/conduwuit static-x86_64-linux-musl-x86_64-haswell-optimised - mv -v target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb x86_64-linux-musl-x86_64-haswell-optimised.deb - - # quick smoke test of the x86_64 static release binary - - name: Quick smoke test the x86_64 static release binary - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - # GH actions default runners are x86_64 only - if file result/bin/conduwuit | grep x86-64; then - result/bin/conduwuit --version - result/bin/conduwuit --help - result/bin/conduwuit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown" - fi - - - name: Build static debug ${{ matrix.target }}-all-features - run: | - if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl" - fi - - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features-debug - - # > warning: dev profile is not supported and will be a hard error in the future. cargo-deb is for making releases, and it doesn't make sense to use it with dev profiles. - # so we need to coerce cargo-deb into thinking this is a release binary - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}-debug.deb - mv -v target/release/conduwuit static-${{ matrix.target }}-debug - mv -v target/release/${{ matrix.target }}-debug.deb ${{ matrix.target }}-debug.deb - - # quick smoke test of the x86_64 static debug binary - - name: Run x86_64 static debug binary - run: | - # GH actions default runners are x86_64 only - if file result/bin/conduwuit | grep x86-64; then - result/bin/conduwuit --version - fi - - # check validity of produced deb package, invalid debs will error on these commands - - name: Validate produced deb package - run: | - # List contents - dpkg-deb --contents ${{ matrix.target }}.deb - dpkg-deb --contents ${{ matrix.target }}-debug.deb - # List info - dpkg-deb --info ${{ matrix.target }}.deb - dpkg-deb --info ${{ matrix.target }}-debug.deb - - - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub - uses: actions/upload-artifact@v4 - if: ${{ matrix.target == 'x86_64-linux-musl' }} - with: - name: static-x86_64-linux-musl-x86_64-haswell-optimised - path: static-x86_64-linux-musl-x86_64-haswell-optimised - if-no-files-found: error - - - name: Upload static-${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }} - path: static-${{ matrix.target }} - if-no-files-found: error - - - name: Upload static deb ${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }} - path: ${{ matrix.target }}.deb - if-no-files-found: error - compression-level: 0 - - - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised - scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised - fi - - - name: Upload static-${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x static-${{ matrix.target }} - scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }} - fi - - - name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb - fi - - - name: Upload static deb ${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb - fi - - - name: Upload static-${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }}-debug - path: static-${{ matrix.target }}-debug - if-no-files-found: error - - - name: Upload static deb ${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }}-debug - path: ${{ matrix.target }}-debug.deb - if-no-files-found: error - compression-level: 0 - - - name: Upload static-${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug - fi - - - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb - fi - - - name: Build OCI image ${{ matrix.target }}-all-features - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features - - cp -v -f result oci-image-${{ matrix.target }}.tar.gz - - - name: Build OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - bin/nix-build-and-cache just .#oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised - - cp -v -f result oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - - - name: Build debug OCI image ${{ matrix.target }}-all-features - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features-debug - - cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz - - - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub - if: ${{ matrix.target == 'x86_64-linux-musl' }} - uses: actions/upload-artifact@v4 - with: - name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised - path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - if-no-files-found: error - compression-level: 0 - - name: Upload OCI image ${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }} - path: oci-image-${{ matrix.target }}.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload OCI image ${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }}-debug - path: oci-image-${{ matrix.target }}-debug.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - fi - - - name: Upload OCI image ${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz - fi - - - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz - fi - - build_mac_binaries: - name: Build MacOS Binaries - strategy: - matrix: - os: [macos-latest, macos-13] - runs-on: ${{ matrix.os }} - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Tag comparison check - if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - # Nix can't do portable macOS builds yet - - name: Build macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-x86_64 - otool -L conduwuit-macos-x86_64 - - # quick smoke test of the x86_64 macOS binary - - name: Run x86_64 macOS release binary - if: ${{ matrix.os == 'macos-13' }} - run: | - ./conduwuit-macos-x86_64 --help - ./conduwuit-macos-x86_64 --version - - - name: Build macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-arm64 - otool -L conduwuit-macos-arm64 - - # quick smoke test of the arm64 macOS binary - - name: Run arm64 macOS release binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - ./conduwuit-macos-arm64 --help - ./conduwuit-macos-arm64 --version - - - name: Upload macOS x86_64 binary to webserver - if: ${{ matrix.os == 'macos-13' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-x86_64 - scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-x86_64 - fi - - - name: Upload macOS arm64 binary to webserver - if: ${{ matrix.os == 'macos-latest' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-arm64 - scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-arm64 - fi - - - name: Upload macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-x86_64 - path: conduwuit-macos-x86_64 - if-no-files-found: error - - - name: Upload macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-arm64 - path: conduwuit-macos-arm64 - if-no-files-found: error - variables: - outputs: - github_repository: ${{ steps.var.outputs.github_repository }} - runs-on: "ubuntu-latest" - steps: - - name: Setting global variables - uses: actions/github-script@v7 - id: var - with: - script: | - core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) - docker: - name: Docker publish - runs-on: ubuntu-24.04 - needs: [build, variables, tests] - permissions: - packages: write - contents: read - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' - env: - DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }} - GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }} - GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit - UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} - GHCR_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" - steps: - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Login to Docker Hub - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} - uses: docker/login-action@v3 - with: - registry: docker.io - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to GitLab Container Registry - if: ${{ (vars.GITLAB_USERNAME != '') && (env.GITLAB_TOKEN != '') }} - uses: docker/login-action@v3 - with: - registry: registry.gitlab.com - username: ${{ vars.GITLAB_USERNAME }} - password: ${{ secrets.GITLAB_TOKEN }} - - - name: Download artifacts - uses: actions/download-artifact@v4 - - - name: Move OCI images into position - run: | - mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz - mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz - mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz - mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz - mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz - - - name: Load and push amd64 haswell image - run: | - docker load -i oci-image-amd64-haswell-optimised.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell - fi - - - name: Load and push amd64 image - run: | - docker load -i oci-image-amd64.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - fi - - - name: Load and push arm64 image - run: | - docker load -i oci-image-arm64v8.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 - fi - - - name: Load and push amd64 debug image - run: | - docker load -i oci-image-amd64-debug.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - - - name: Load and push arm64 debug image - run: | - docker load -i oci-image-arm64v8-debug.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - - - name: Create Docker haswell manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell - fi - - - name: Create Docker combined manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - fi - - - name: Create Docker combined debug manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - - - name: Push manifests to Docker registries - run: | - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG} - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG} - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell - fi - if [ $GHCR_ENABLED = "true" ]; then - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG} - docker manifest push ${GHCR_REPO}:${BRANCH_TAG} - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG} - docker manifest push ${GLCR_REPO}:${BRANCH_TAG} - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell - fi - - - name: Add Image Links to Job Summary - run: | - if [ ! -z $DOCKERHUB_TOKEN ]; then - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi - if [ $GHCR_ENABLED = "true" ]; then - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi - if [ ! -z $GITLAB_TOKEN ]; then - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml deleted file mode 100644 index b4f142db..00000000 --- a/.github/workflows/docker-hub-description.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Update Docker Hub Description - -on: - push: - branches: - - main - paths: - - README.md - - .github/workflows/docker-hub-description.yml - - workflow_dispatch: - -jobs: - dockerHubDescription: - runs-on: ubuntu-latest - if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }} - steps: - - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setting variables - uses: actions/github-script@v7 - id: var - with: - script: | - const githubRepo = '${{ github.repository }}'.toLowerCase() - const repoId = githubRepo.split('/')[1] - - core.setOutput('github_repository', githubRepo) - const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId - core.setOutput('docker_repo', dockerRepo) - - - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v4 - with: - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - repository: ${{ steps.var.outputs.docker_repo }} - short-description: ${{ github.event.repository.description }} - enable-url-completion: true diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml deleted file mode 100644 index 0eefe0a4..00000000 --- a/.github/workflows/documentation.yml +++ /dev/null @@ -1,163 +0,0 @@ -name: Documentation and GitHub Pages - -on: - pull_request: - push: - branches: - - main - tags: - - '*' - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -env: - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps - NIX_CONFIG: | - show-trace = true - extra-substituters = extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= - experimental-features = nix-command flakes - extra-experimental-features = nix-command flakes - accept-flake-config = true - -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. -concurrency: - group: "pages" - cancel-in-progress: false - -permissions: {} - -jobs: - docs: - name: Documentation and GitHub Pages - runs-on: ubuntu-24.04 - - permissions: - pages: write - id-token: write - - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - - steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku - set -o pipefail - - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - uses: actions/configure-pages@v5 - - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - # always save the cache - save-always: true - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv - direnv allow - nix develop --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - - - name: Run lychee and markdownlint - run: | - direnv exec . engage just lints lychee - direnv exec . engage just lints markdownlint - - - name: Build documentation (book) - run: | - bin/nix-build-and-cache just .#book - - cp -r --dereference result public - - - name: Upload generated documentation (book) as normal artifact - uses: actions/upload-artifact@v4 - with: - name: public - path: public - if-no-files-found: error - # don't compress again - compression-level: 0 - - - name: Upload generated documentation (book) as GitHub Pages artifact - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - uses: actions/upload-pages-artifact@v3 - with: - path: public - - - name: Deploy to GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - id: deployment - uses: actions/deploy-pages@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index cfe72d2a..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,118 +0,0 @@ -name: Upload Release Assets - -on: - release: - types: [published] - workflow_dispatch: - inputs: - tag: - description: 'Tag to release' - required: true - type: string - action_id: - description: 'Action ID of the CI run' - required: true - type: string - -permissions: {} - -jobs: - publish: - runs-on: ubuntu-latest - permissions: - contents: write - env: - GH_EVENT_NAME: ${{ github.event_name }} - GH_EVENT_INPUTS_ACTION_ID: ${{ github.event.inputs.action_id }} - GH_EVENT_INPUTS_TAG: ${{ github.event.inputs.tag }} - GH_REPOSITORY: ${{ github.repository }} - GH_SHA: ${{ github.sha }} - GH_TAG: ${{ github.event.release.tag_name }} - - steps: - - name: get latest ci id - id: get_ci_id - env: - GH_TOKEN: ${{ github.token }} - run: | - if [ "${GH_EVENT_NAME}" == "workflow_dispatch" ]; then - id="${GH_EVENT_INPUTS_ACTION_ID}" - tag="${GH_EVENT_INPUTS_TAG}" - else - # get all runs of the ci workflow - json=$(gh api "repos/${GH_REPOSITORY}/actions/workflows/ci.yml/runs") - - # find first run that is github sha and status is completed - id=$(echo "$json" | jq ".workflow_runs[] | select(.head_sha == \"${GH_SHA}\" and .status == \"completed\") | .id" | head -n 1) - - if [ ! "$id" ]; then - echo "No completed runs found" - echo "ci_id=0" >> "$GITHUB_OUTPUT" - exit 0 - fi - - tag="${GH_TAG}" - fi - - echo "ci_id=$id" >> "$GITHUB_OUTPUT" - echo "tag=$tag" >> "$GITHUB_OUTPUT" - - - name: get latest ci artifacts - if: steps.get_ci_id.outputs.ci_id != 0 - uses: actions/download-artifact@v4 - env: - GH_TOKEN: ${{ github.token }} - with: - merge-multiple: true - run-id: ${{ steps.get_ci_id.outputs.ci_id }} - github-token: ${{ github.token }} - - - run: | - ls - - - name: upload release assets - if: steps.get_ci_id.outputs.ci_id != 0 - env: - GH_TOKEN: ${{ github.token }} - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - for file in $(find . -type f); do - case "$file" in - *json*) echo "Skipping $file...";; - *) echo "Uploading $file..."; gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping.";; - esac - done - - - name: upload release assets to website - if: steps.get_ci_id.outputs.ci_id != 0 - env: - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config < /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi - # Accept flake config from "untrusted" users - - if command -v nix > /dev/null; then echo "accept-flake-config = true" >> /etc/nix/nix.conf; fi - - # Add conduwuit binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduwuit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=" >> /etc/nix/nix.conf; fi - - - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=" >> /etc/nix/nix.conf; fi - - # Add alternate binary cache - - if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi - - # Add crane binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi - - # Add nix-community binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi - - - if command -v nix > /dev/null; then echo "extra-substituters = https://aseipp-nix-cache.freetls.fastly.net" >> /etc/nix/nix.conf; fi - - # Install direnv and nix-direnv - - if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi - - # Allow .envrc - - if command -v nix > /dev/null; then direnv allow; fi - - # Set CARGO_HOME to a cacheable path - - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" - -ci: - stage: ci - image: nixos/nix:2.24.9 - script: - # Cache CI dependencies - - ./bin/nix-build-and-cache ci - - - direnv exec . engage - cache: - key: nix - paths: - - target - - .gitlab-ci.d - rules: - # CI on upstream runners (only available for maintainers) - - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" - # Manual CI on unprotected branches that are not MRs - - if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false" - when: manual - # Manual CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - - if: $CI - interruptible: true - -artifacts: - stage: artifacts - image: nixos/nix:2.24.9 - script: - - ./bin/nix-build-and-cache just .#static-x86_64-linux-musl - - cp result/bin/conduit x86_64-linux-musl - - - mkdir -p target/release - - cp result/bin/conduit target/release - - direnv exec . cargo deb --no-build --no-strip - - mv target/debian/*.deb x86_64-linux-musl.deb - - # Since the OCI image package is based on the binary package, this has the - # fun side effect of uploading the normal binary too. Conduit users who are - # deploying with Nix can leverage this fact by adding our binary cache to - # their systems. - # - # Note that although we have an `oci-image-x86_64-linux-musl` - # output, we don't build it because it would be largely redundant to this - # one since it's all containerized anyway. - - ./bin/nix-build-and-cache just .#oci-image - - cp result oci-image-amd64.tar.gz - - - ./bin/nix-build-and-cache just .#static-aarch64-linux-musl - - cp result/bin/conduit aarch64-linux-musl - - - ./bin/nix-build-and-cache just .#oci-image-aarch64-linux-musl - - cp result oci-image-arm64v8.tar.gz - - - ./bin/nix-build-and-cache just .#book - # We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746 - - cp -r --dereference result public - artifacts: - paths: - - x86_64-linux-musl - - aarch64-linux-musl - - x86_64-linux-musl.deb - - oci-image-amd64.tar.gz - - oci-image-arm64v8.tar.gz - - public - rules: - # CI required for all MRs - - if: $CI_PIPELINE_SOURCE == "merge_request_event" - # Optional CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - allow_failure: true - - if: $CI - interruptible: true - -pages: - stage: publish - dependencies: - - artifacts - only: - - next - script: - - "true" - artifacts: - paths: - - public diff --git a/.gitlab/merge_request_templates/MR.md b/.gitlab/merge_request_templates/MR.md deleted file mode 100644 index 4210554b..00000000 --- a/.gitlab/merge_request_templates/MR.md +++ /dev/null @@ -1,8 +0,0 @@ - - - ------------------------------------------------------------------------------ - -- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test` -- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license - diff --git a/.gitlab/route-map.yml b/.gitlab/route-map.yml deleted file mode 100644 index cf31bd18..00000000 --- a/.gitlab/route-map.yml +++ /dev/null @@ -1,3 +0,0 @@ -# Docs: Map markdown to html files -- source: /docs/(.+)\.md/ - public: '\1.html' diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..fa267e13 --- /dev/null +++ b/.mailmap @@ -0,0 +1,15 @@ +AlexPewMaster <68469103+AlexPewMaster@users.noreply.github.com> +Daniel Wiesenberg +Devin Ragotzy +Devin Ragotzy +Jonas Platte +Jonas Zohren +Jonathan de Jong +June Clementine Strawberry +June Clementine Strawberry +June Clementine Strawberry +Olivia Lee +Rudi Floren +Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> +Timo Kösters +x4u <14617923-x4u@users.noreply.gitlab.com> diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..a4fad964 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "cSpell.words": [ + "Forgejo", + "appservice", + "appservices", + "conduwuit", + "continuwuity", + "homeserver", + "homeservers" + ] +} diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index e77154e7..476e68fb 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,3 @@ - # Contributor Covenant Code of Conduct ## Our Pledge @@ -60,8 +59,7 @@ representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over email at - or over Matrix at @strawberry:puppygock.gay. +reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at , and respectively. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fb540011..ecff7173 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ This page is for about contributing to conduwuit. The [development](./development.md) page may be of interest for you as well. If you would like to work on an [issue][issues] that is not assigned, preferably -ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix], +ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix], and comment on it. ### Linting and Formatting @@ -23,9 +23,9 @@ suggestion, allow the lint and mention that in a comment. ### Running CI tests locally -conduwuit's CI for tests, linting, formatting, audit, etc use +continuwuity's CI for tests, linting, formatting, audit, etc use [`engage`][engage]. engage can be installed from nixpkgs or `cargo install -engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`. +engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`. Use `engage --help` for more usage details. To test, format, lint, etc that CI would do, install engage, allow the `.envrc` @@ -111,33 +111,28 @@ applies here. ### Creating pull requests -Please try to keep contributions to the GitHub. While the mirrors of conduwuit -allow for pull/merge requests, there is no guarantee I will see them in a timely +Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity +allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts. -This prevents me from having to ping once in a while to double check the status +This prevents us from having to ping once in a while to double check the status of it, especially when the CI completed successfully and everything so it *looks* done. -If you open a pull request on one of the mirrors, it is your responsibility to -inform me about its existence. In the future I may try to solve this with more -repo bots in the conduwuit Matrix room. There is no mailing list or email-patch -support on the sr.ht mirror, but if you'd like to email me a git patch you can -do so at `strawberry@puppygock.gay`. Direct all PRs/MRs to the `main` branch. By sending a pull request or patch, you are agreeing that your changes are allowed to be licenced under the Apache-2.0 licence and all of your conduct is -in line with the Contributor's Covenant, and conduwuit's Code of Conduct. +in line with the Contributor's Covenant, and continuwuity's Code of Conduct. Contribution by users who violate either of these code of conducts will not have their contributions accepted. This includes users who have been banned from -conduwuit Matrix rooms for Code of Conduct violations. +continuwuityMatrix rooms for Code of Conduct violations. -[issues]: https://github.com/girlbossceo/conduwuit/issues -[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay +[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues +[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org [complement]: https://github.com/matrix-org/complement/ -[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml +[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml [engage]: https://charles.page.computer.surgery/engage/ [sytest]: https://github.com/matrix-org/sytest/ [cargo-deb]: https://github.com/kornelski/cargo-deb @@ -146,4 +141,4 @@ conduwuit Matrix rooms for Code of Conduct violations. [cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit [direnv]: https://direnv.net/ [mdbook]: https://rust-lang.github.io/mdBook/ -[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml +[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml diff --git a/Cargo.lock b/Cargo.lock index e379aebb..2d8a2d0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,6 +26,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -49,9 +55,15 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" + +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" [[package]] name = "arc-swap" @@ -59,6 +71,17 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "arg_enum_proc_macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "argon2" version = "0.5.3" @@ -82,9 +105,9 @@ dependencies = [ [[package]] name = "as_variant" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" +checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" [[package]] name = "assign" @@ -95,7 +118,7 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-channel" version = "2.3.1" -source = "git+https://github.com/girlbossceo/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" +source = "git+https://forgejo.ellis.link/continuwuation/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -105,9 +128,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.18" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" +checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" dependencies = [ "brotli", "flate2", @@ -119,6 +142,17 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -138,18 +172,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -174,28 +208,49 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] -name = "aws-lc-rs" -version = "1.12.1" +name = "av1-grain" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ea835662a0af02443aa1396d39be523bbf8f11ee6fad20329607c480bea48c3" +checksum = "6678909d8c5d46a42abcf571271e15fdbc0a225e3646cf23762cd415046c78bf" +dependencies = [ + "anyhow", + "arrayvec", + "log", + "nom", + "num-rational", + "v_frame", +] + +[[package]] +name = "avif-serialize" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98922d6a4cfbcb08820c69d8eeccc05bb1f29bfa06b4f5b1dbfe9a868bd7608e" +dependencies = [ + "arrayvec", +] + +[[package]] +name = "aws-lc-rs" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" dependencies = [ "aws-lc-sys", - "paste", "zeroize", ] [[package]] name = "aws-lc-sys" -version = "0.25.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2ddd3ada61a305e1d8bb6c005d1eaa7d14d903681edfc400406d523a9b491" +checksum = "b9f7720b74ed28ca77f90769a71fd8c637a0137f6fae4ae947e1050229cff57f" dependencies = [ - "bindgen", + "bindgen 0.69.5", "cc", "cmake", "dunce", "fs_extra", - "paste", ] [[package]] @@ -288,16 +343,15 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" dependencies = [ "arc-swap", "bytes", - "futures-util", + "fs-err", "http", "http-body", - "http-body-util", "hyper", "hyper-util", "pin-project-lite", @@ -306,7 +360,6 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower 0.4.13", "tower-service", ] @@ -358,9 +411,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" [[package]] name = "bindgen" @@ -368,7 +421,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -381,10 +434,34 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn", "which", ] +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags 2.9.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn", +] + +[[package]] +name = "bit_field" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" + [[package]] name = "bitflags" version = "1.3.2" @@ -393,9 +470,15 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" + +[[package]] +name = "bitstream-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2" [[package]] name = "blake2" @@ -415,6 +498,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blurhash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79769241dcd44edf79a732545e8b5cec84c247ac060f5252cd51885d093a8fc" +dependencies = [ + "image", +] + [[package]] name = "brotli" version = "7.0.0" @@ -428,25 +520,31 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] [[package]] -name = "bumpalo" -version = "3.16.0" +name = "built" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" [[package]] name = "byteorder" @@ -462,24 +560,23 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.9.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" -version = "1.3.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" +checksum = "a3c8f83209414aacf0eeae3cf730b18d6981697fba62f200fcfb92b9f082acba" [[package]] name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ "cc", - "libc", "pkg-config", ] @@ -495,9 +592,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.10" +version = "1.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" dependencies = [ "jobserver", "libc", @@ -513,6 +610,16 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-expr" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" +dependencies = [ + "smallvec", + "target-lexicon", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -536,9 +643,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "num-traits", ] @@ -556,9 +663,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.26" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" dependencies = [ "clap_builder", "clap_derive", @@ -566,9 +673,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" dependencies = [ "anstyle", "clap_lex", @@ -576,14 +683,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -594,9 +701,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.52" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] @@ -618,7 +725,7 @@ dependencies = [ [[package]] name = "conduwuit" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_admin", @@ -647,7 +754,7 @@ dependencies = [ [[package]] name = "conduwuit_admin" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_api", @@ -668,15 +775,15 @@ dependencies = [ [[package]] name = "conduwuit_api" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ + "async-trait", "axum", "axum-client-ip", "axum-extra", "base64 0.22.1", "bytes", "conduwuit_core", - "conduwuit_database", "conduwuit_service", "const-str", "futures", @@ -685,9 +792,9 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", - "rand", + "rand 0.8.5", "reqwest", "ruma", "serde", @@ -700,11 +807,12 @@ dependencies = [ [[package]] name = "conduwuit_core" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "argon2", "arrayvec", "axum", + "axum-extra", "bytes", "bytesize", "cargo_toml", @@ -723,13 +831,14 @@ dependencies = [ "http", "http-body-util", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "libc", "libloading", "log", + "maplit", "nix", "num-traits", - "rand", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -739,7 +848,9 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", - "thiserror 2.0.11", + "smallstr", + "smallvec", + "thiserror 2.0.12", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -754,9 +865,8 @@ dependencies = [ [[package]] name = "conduwuit_database" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ - "arrayvec", "async-channel", "conduwuit_core", "const-str", @@ -764,27 +874,26 @@ dependencies = [ "log", "minicbor", "minicbor-serde", - "rust-rocksdb-uwu", + "rust-rocksdb", "serde", "serde_json", - "smallvec", "tokio", "tracing", ] [[package]] name = "conduwuit_macros" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "conduwuit_router" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "axum", "axum-client-ip", @@ -817,26 +926,26 @@ dependencies = [ [[package]] name = "conduwuit_service" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ - "arrayvec", "async-trait", "base64 0.22.1", + "blurhash", "bytes", "conduwuit_core", "conduwuit_database", "const-str", "either", "futures", - "hickory-resolver", + "hickory-resolver 0.25.1", "http", "image", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", "loole", "lru-cache", - "rand", + "rand 0.8.5", "regex", "reqwest", "ruma", @@ -845,7 +954,6 @@ dependencies = [ "serde_json", "serde_yaml", "sha2", - "smallvec", "termimad", "tokio", "tracing", @@ -900,9 +1008,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-str" -version = "0.5.7" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" +checksum = "9e991226a70654b49d34de5ed064885f0bef0348a8e70018b8ff1ac80aa984a2" [[package]] name = "const_panic" @@ -938,7 +1046,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_affinity" version = "0.8.1" -source = "git+https://github.com/girlbossceo/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" +source = "git+https://forgejo.ellis.link/continuwuation/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" dependencies = [ "libc", "num_cpus", @@ -947,9 +1055,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -964,10 +1072,16 @@ dependencies = [ ] [[package]] -name = "crokey" -version = "1.1.0" +name = "critical-section" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520e83558f4c008ac06fa6a86e5c1d4357be6f994cce7434463ebcdaadf47bb1" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crokey" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5ff945e42bb93d29b10ba509970066a269903a932f0ea07d99d8621f97e90d7" dependencies = [ "crokey-proc_macros", "crossterm", @@ -978,15 +1092,15 @@ dependencies = [ [[package]] name = "crokey-proc_macros" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370956e708a1ce65fe4ac5bb7185791e0ece7485087f17736d54a23a0895049f" +checksum = "665f2180fd82d0ba2bf3deb45fafabb18f23451024ff71ee47f6bfdfb4bbe09e" dependencies = [ "crossterm", "proc-macro2", "quote", "strict", - "syn 1.0.109", + "syn", ] [[package]] @@ -1004,9 +1118,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -1051,7 +1165,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "crossterm_winapi", "futures-core", "mio", @@ -1071,6 +1185,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" @@ -1088,7 +1208,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -1115,7 +1235,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -1126,9 +1246,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" +checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" [[package]] name = "date_header" @@ -1158,9 +1278,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] @@ -1184,7 +1304,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -1211,7 +1331,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", "subtle", @@ -1220,9 +1340,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" dependencies = [ "serde", ] @@ -1236,14 +1356,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" @@ -1252,13 +1372,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "event-listener" version = "5.3.1" -source = "git+https://github.com/girlbossceo/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" +source = "git+https://forgejo.ellis.link/continuwuation/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" dependencies = [ "concurrent-queue", "parking", @@ -1267,14 +1387,29 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ "event-listener", "pin-project-lite", ] +[[package]] +name = "exr" +version = "1.73.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83197f59927b46c04a183a619b7c29df34e63e63c7869320862268c0ef687e0" +dependencies = [ + "bit_field", + "half", + "lebe", + "miniz_oxide", + "rayon-core", + "smallvec", + "zune-inflate", +] + [[package]] name = "fdeflate" version = "0.3.7" @@ -1318,9 +1453,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.35" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", "miniz_oxide", @@ -1351,6 +1486,16 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "fs-err" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +dependencies = [ + "autocfg", + "tokio", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -1422,7 +1567,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -1455,6 +1600,19 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -1474,7 +1632,21 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", "wasm-bindgen", ] @@ -1502,9 +1674,9 @@ checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "h2" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", @@ -1512,13 +1684,23 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.0", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", "tracing", ] +[[package]] +name = "half" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hardened_malloc-rs" version = "0.1.2+12" @@ -1594,9 +1776,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.2" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" +checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248" dependencies = [ "async-trait", "cfg-if", @@ -1608,7 +1790,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tinyvec", "tokio", @@ -1617,19 +1799,47 @@ dependencies = [ ] [[package]] -name = "hickory-resolver" -version = "0.24.2" +name = "hickory-proto" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "critical-section", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.0", + "ring", + "serde", + "thiserror 2.0.12", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.24.4", "ipconfig", "lru-cache", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", "thiserror 1.0.69", @@ -1637,6 +1847,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "hickory-resolver" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.25.1", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.0", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.12", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1655,17 +1887,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - [[package]] name = "hostname" version = "0.4.0" @@ -1674,7 +1895,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows", + "windows 0.52.0", ] [[package]] @@ -1688,14 +1909,14 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -1723,12 +1944,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -1736,9 +1957,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1748,15 +1969,15 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "hyper" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", @@ -1794,9 +2015,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -1807,9 +2028,8 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +version = "0.1.11" +source = "git+https://forgejo.ellis.link/continuwuation/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" dependencies = [ "bytes", "futures-channel", @@ -1817,10 +2037,10 @@ dependencies = [ "http", "http-body", "hyper", + "libc", "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -1866,9 +2086,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -1890,9 +2110,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -1911,9 +2131,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -1940,7 +2160,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -1966,17 +2186,23 @@ dependencies = [ [[package]] name = "image" -version = "0.25.5" +version = "0.25.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6f44aed642f18953a158afeb30206f4d50da59fbc66ecb53c66488de73563b" +checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" dependencies = [ "bytemuck", "byteorder-lite", "color_quant", + "exr", "gif", "image-webp", "num-traits", "png", + "qoi", + "ravif", + "rayon", + "rgb", + "tiff", "zune-core", "zune-jpeg", ] @@ -1988,9 +2214,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" dependencies = [ "byteorder-lite", - "quick-error 2.0.1", + "quick-error", ] +[[package]] +name = "imgref" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0263a3d970d5c054ed9312c0057b4f3bde9c0b33836d3637361d4a9e6e7a408" + [[package]] name = "indexmap" version = "1.9.3" @@ -2003,9 +2235,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2024,6 +2256,17 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +[[package]] +name = "interpolate_name" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "ipaddress" version = "0.1.3" @@ -2052,9 +2295,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "itertools" @@ -2075,20 +2318,36 @@ dependencies = [ ] [[package]] -name = "itoa" -version = "1.0.14" +name = "itertools" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.2", "libc", ] +[[package]] +name = "jpeg-decoder" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" + [[package]] name = "js-sys" version = "0.3.77" @@ -2157,7 +2416,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.96", + "syn", ] [[package]] @@ -2173,10 +2432,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] -name = "libc" -version = "0.2.169" +name = "lebe" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" + +[[package]] +name = "libc" +version = "0.2.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" + +[[package]] +name = "libfuzzer-sys" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" +dependencies = [ + "arbitrary", + "cc", +] [[package]] name = "libloading" @@ -2185,14 +2460,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -2213,9 +2488,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" @@ -2229,9 +2504,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "loole" @@ -2243,6 +2518,28 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "loop9" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062" +dependencies = [ + "imgref", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -2300,12 +2597,6 @@ dependencies = [ "xml5ever", ] -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -2321,6 +2612,16 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if", + "rayon", +] + [[package]] name = "memchr" version = "2.7.4" @@ -2335,29 +2636,29 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minicbor" -version = "0.25.1" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0452a60c1863c1f50b5f77cd295e8d2786849f35883f0b9e18e7e6e1b5691b0" +checksum = "1936e27fffe7d8557c060eb82cb71668608cd1a5fb56b63e66d22ae8d7564321" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.15.3" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" +checksum = "a9882ef5c56df184b8ffc107fc6c61e33ee3a654b021961d790a78571bb9d67a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "minicbor-serde" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "becf18ac384ecf6f53b2db3b1549eebff664c67ecf259ae99be5912193291686" +checksum = "54e45e8beeefea1b8b6f52fa188a5b6ea3746c2885606af8d4d8bf31cee633fb" dependencies = [ "minicbor", "serde", @@ -2380,9 +2681,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", "simd-adler32", @@ -2396,10 +2697,29 @@ checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -2412,7 +2732,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "cfg_aliases", "libc", @@ -2434,6 +2754,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "noop_proc_macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2483,6 +2809,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -2544,15 +2881,19 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" @@ -2562,7 +2903,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.7.0", + "indexmap 2.8.0", "js-sys", "once_cell", "pin-project-lite", @@ -2611,7 +2952,7 @@ dependencies = [ "opentelemetry", "ordered-float 4.6.0", "percent-encoding", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tokio", "tokio-stream", @@ -2637,9 +2978,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.9.2" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" +checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" dependencies = [ "log", "serde", @@ -2688,7 +3029,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2718,7 +3059,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -2733,7 +3074,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_shared 0.11.3", + "phf_shared", ] [[package]] @@ -2742,18 +3083,8 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ - "phf_generator 0.11.3", - "phf_shared 0.11.3", -] - -[[package]] -name = "phf_generator" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" -dependencies = [ - "phf_shared 0.10.0", - "rand", + "phf_generator", + "phf_shared", ] [[package]] @@ -2762,17 +3093,8 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.3", - "rand", -] - -[[package]] -name = "phf_shared" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" -dependencies = [ - "siphasher 0.3.11", + "phf_shared", + "rand 0.8.5", ] [[package]] @@ -2781,27 +3103,27 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher 1.0.1", + "siphasher", ] [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -2828,9 +3150,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "png" @@ -2845,6 +3167,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + [[package]] name = "powerfmt" version = "0.2.0" @@ -2853,9 +3181,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy", ] @@ -2868,28 +3196,28 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.29" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn", ] [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -2902,16 +3230,35 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", "version_check", "yansi", ] [[package]] -name = "prost" -version = "0.13.4" +name = "profiling" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" +checksum = "afbdc74edc00b6f6a218ca6a5364d6226a259d4b8ea1af4a0ea063f27e179f4d" +dependencies = [ + "profiling-procmacros", +] + +[[package]] +name = "profiling-procmacros" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", "prost-derive", @@ -2919,33 +3266,33 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "prost-types" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ "prost", ] [[package]] name = "pulldown-cmark" -version = "0.12.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -2958,10 +3305,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "007d8adb5ddab6f8e3f491ac63566a7d5002cc7ed73901f72057943fa71ae1ae" [[package]] -name = "quick-error" -version = "1.2.3" +name = "qoi" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001" +dependencies = [ + "bytemuck", +] [[package]] name = "quick-error" @@ -2971,37 +3321,39 @@ checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", + "web-time 1.1.0", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom", - "rand", + "getrandom 0.3.2", + "rand 0.9.0", "ring", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time 1.1.0", @@ -3009,27 +3361,33 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" dependencies = [ "cfg_aliases", "libc", "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "rand" version = "0.8.5" @@ -3037,8 +3395,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy", ] [[package]] @@ -3048,7 +3417,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -3057,16 +3436,95 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + +[[package]] +name = "rav1e" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9" +dependencies = [ + "arbitrary", + "arg_enum_proc_macro", + "arrayvec", + "av1-grain", + "bitstream-io", + "built", + "cfg-if", + "interpolate_name", + "itertools 0.12.1", + "libc", + "libfuzzer-sys", + "log", + "maybe-rayon", + "new_debug_unreachable", + "noop_proc_macro", + "num-derive", + "num-traits", + "once_cell", + "paste", + "profiling", + "rand 0.8.5", + "rand_chacha 0.3.1", + "simd_helpers", + "system-deps", + "thiserror 1.0.69", + "v_frame", + "wasm-bindgen", +] + +[[package]] +name = "ravif" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2413fd96bd0ea5cdeeb37eaf446a22e6ed7b981d792828721e74ded1980a45c6" +dependencies = [ + "avif-serialize", + "imgref", + "loop9", + "quick-error", + "rav1e", + "rayon", + "rgb", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", ] [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3115,9 +3573,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "async-compression", "base64 0.22.1", @@ -3126,7 +3584,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "hickory-resolver", + "hickory-resolver 0.24.4", "http", "http-body", "http-body-util", @@ -3153,6 +3611,7 @@ dependencies = [ "tokio-rustls", "tokio-socks", "tokio-util", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -3164,25 +3623,28 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +version = "0.7.1" +source = "git+https://forgejo.ellis.link/continuwuation/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" dependencies = [ - "hostname 0.3.1", - "quick-error 1.2.3", + "hostname", ] [[package]] -name = "ring" -version = "0.17.8" +name = "rgb" +version = "0.8.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] @@ -3190,7 +3652,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "assign", "js_int", @@ -3203,16 +3665,14 @@ dependencies = [ "ruma-identifiers-validation", "ruma-identity-service-api", "ruma-push-gateway-api", - "ruma-server-util", "ruma-signatures", - "ruma-state-res", "web-time 1.1.0", ] [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3224,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "assign", @@ -3239,7 +3699,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "url", "web-time 1.1.0", ] @@ -3247,18 +3707,19 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", + "getrandom 0.2.15", "http", - "indexmap 2.7.0", + "indexmap 2.8.0", "js_int", "konst", "percent-encoding", - "rand", + "rand 0.8.5", "regex", "ruma-identifiers-validation", "ruma-macros", @@ -3266,7 +3727,7 @@ dependencies = [ "serde_html_form", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "time", "tracing", "url", @@ -3278,10 +3739,10 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", - "indexmap 2.7.0", + "indexmap 2.8.0", "js_int", "js_option", "percent-encoding", @@ -3293,7 +3754,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", "url", "web-time 1.1.0", @@ -3303,34 +3764,38 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "bytes", + "headers", "http", + "http-auth", "httparse", "js_int", "memchr", "mime", - "rand", + "rand 0.8.5", "ruma-common", "ruma-events", "serde", "serde_json", + "thiserror 2.0.12", + "tracing", ] [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3340,7 +3805,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3348,14 +3813,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.96", + "syn", "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3364,56 +3829,28 @@ dependencies = [ "serde_json", ] -[[package]] -name = "ruma-server-util" -version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" -dependencies = [ - "headers", - "http", - "http-auth", - "ruma-common", - "thiserror 2.0.11", - "tracing", -] - [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "base64 0.22.1", "ed25519-dalek", "pkcs8", - "rand", + "rand 0.8.5", "ruma-common", "serde_json", "sha2", "subslice", - "thiserror 2.0.11", -] - -[[package]] -name = "ruma-state-res" -version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" -dependencies = [ - "futures-util", - "js_int", - "ruma-common", - "ruma-events", - "serde", - "serde_json", - "thiserror 2.0.11", - "tracing", + "thiserror 2.0.12", ] [[package]] name = "rust-librocksdb-sys" -version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +version = "0.33.0+9.11.1" +source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" dependencies = [ - "bindgen", + "bindgen 0.71.1", "bzip2-sys", "cc", "glob", @@ -3427,19 +3864,11 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +version = "0.37.0" +source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" dependencies = [ "libc", "rust-librocksdb-sys", - "serde", -] - -[[package]] -name = "rust-rocksdb-uwu" -version = "0.0.1" -dependencies = [ - "rust-rocksdb", ] [[package]] @@ -3456,9 +3885,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -3471,22 +3900,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.43" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "aws-lc-rs", "log", @@ -3521,18 +3950,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" dependencies = [ "web-time 1.1.0", ] [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "aws-lc-rs", "ring", @@ -3542,30 +3971,30 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rustyline-async" version = "0.4.3" -source = "git+https://github.com/girlbossceo/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" +source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" dependencies = [ "crossterm", "futures-channel", "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.11", + "thiserror 2.0.12", "unicode-segmentation", "unicode-width 0.2.0", ] [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "sanitize-filename" @@ -3585,6 +4014,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -3593,9 +4028,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sd-notify" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561e6b346a5e59e0b8a07894004897d7160567e3352d2ebd6c3741d4e086b6f5" +checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" dependencies = [ "libc", ] @@ -3606,7 +4041,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation", "core-foundation-sys", "libc", @@ -3625,15 +4060,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016958f51b96861dead7c1e02290f138411d05e94fad175c8636a835dee6e51e" +checksum = "255914a8e53822abd946e2ce8baa41d4cded6b8e938913b7f7b9da5b7ab44335" dependencies = [ "httpdate", "reqwest", @@ -3653,9 +4088,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57712c24e99252ef175b4b06c485294f10ad6bc5b5e1567ff3803ee7a0b7d3f" +checksum = "00293cd332a859961f24fd69258f7e92af736feaeb91020cff84dac4188a4302" dependencies = [ "backtrace", "once_cell", @@ -3665,11 +4100,11 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" +checksum = "961990f9caa76476c481de130ada05614cd7f5aa70fb57c2142f0e09ad3fb2aa" dependencies = [ - "hostname 0.4.0", + "hostname", "libc", "os_info", "rustc_version", @@ -3679,12 +4114,12 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" +checksum = "1a6409d845707d82415c800290a5d63be5e3df3c2e417b0997c60531dfbd35ef" dependencies = [ "once_cell", - "rand", + "rand 0.8.5", "sentry-types", "serde", "serde_json", @@ -3692,9 +4127,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8982a69133d3f5e4efdbfa0776937fca43c3a2e275a8fe184f50b1b0aa92e07c" +checksum = "71ab5df4f3b64760508edfe0ba4290feab5acbbda7566a79d72673065888e5cc" dependencies = [ "findshlibs", "once_cell", @@ -3703,9 +4138,9 @@ dependencies = [ [[package]] name = "sentry-log" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efcbfbb74628eaef033c1154d4bb082437c7592ce2282c7c5ccb455c4c97a06d" +checksum = "693841da8dfb693af29105edfbea1d91348a13d23dd0a5d03761eedb9e450c46" dependencies = [ "log", "sentry-core", @@ -3713,9 +4148,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de296dae6f01e931b65071ee5fe28d66a27909857f744018f107ed15fd1f6b25" +checksum = "609b1a12340495ce17baeec9e08ff8ed423c337c1a84dffae36a178c783623f3" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3723,9 +4158,9 @@ dependencies = [ [[package]] name = "sentry-tower" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" +checksum = "4b98005537e38ee3bc10e7d36e7febe9b8e573d03f2ddd85fcdf05d21f9abd6d" dependencies = [ "http", "pin-project", @@ -3737,9 +4172,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" +checksum = "49f4e86402d5c50239dc7d8fd3f6d5e048221d5fcb4e026d8d50ab57fe4644cb" dependencies = [ "sentry-backtrace", "sentry-core", @@ -3749,13 +4184,13 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" +checksum = "3d3f117b8755dbede8260952de2aeb029e20f432e72634e8969af34324591631" dependencies = [ "debugid", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "thiserror 1.0.69", @@ -3766,22 +4201,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -3791,7 +4226,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.0", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -3799,9 +4234,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.135" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -3811,9 +4246,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" dependencies = [ "itoa", "serde", @@ -3856,7 +4291,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -3936,7 +4371,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -3946,10 +4381,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] -name = "siphasher" -version = "0.3.11" +name = "simd_helpers" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6" +dependencies = [ + "quote", +] [[package]] name = "siphasher" @@ -3967,30 +4405,34 @@ dependencies = [ ] [[package]] -name = "smallvec" -version = "1.13.2" +name = "smallstr" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "63b1aefdf380735ff8ded0b15f31aab05daf1f70216c01c02a12926badd1df9d" +dependencies = [ + "serde", + "smallvec", +] + +[[package]] +name = "smallvec" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" dependencies = [ "serde", ] [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "spki" version = "0.7.3" @@ -4015,26 +4457,25 @@ checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" [[package]] name = "string_cache" -version = "0.8.7" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" dependencies = [ "new_debug_unreachable", - "once_cell", "parking_lot", - "phf_shared 0.10.0", + "phf_shared", "precomputed-hash", "serde", ] [[package]] name = "string_cache_codegen" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb30289b722be4ff74a408c3cc27edeaad656e06cb1fe8fa9231fa59c728988" +checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0" dependencies = [ - "phf_generator 0.10.0", - "phf_shared 0.10.0", + "phf_generator", + "phf_shared", "proc-macro2", "quote", ] @@ -4056,20 +4497,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "1.0.109" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.96" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -4093,9 +4523,34 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] +[[package]] +name = "system-deps" +version = "6.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" +dependencies = [ + "cfg-expr", + "heck", + "pkg-config", + "toml", + "version-compare", +] + +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + [[package]] name = "tendril" version = "0.4.3" @@ -4109,9 +4564,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.31.1" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a5d4cf55d9f1cb04fcda48f725772d0733ae34e030dfc4dd36e738a5965f4" +checksum = "a8e19c6dbf107bec01d0e216bb8219485795b7d75328e4fa5ef2756c1be4f8dc" dependencies = [ "coolor", "crokey", @@ -4144,11 +4599,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -4159,18 +4614,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -4205,10 +4660,21 @@ dependencies = [ "threadpool", ] +[[package]] +name = "tiff" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1310fcea54c6a9a4fd1aad794ecc02c31682f6bfbecdf460bf19533eed1e3e" +dependencies = [ + "flate2", + "jpeg-decoder", + "weezl", +] + [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "paste", @@ -4218,7 +4684,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "cc", "libc", @@ -4227,7 +4693,7 @@ dependencies = [ [[package]] name = "tikv-jemallocator" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -4235,9 +4701,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -4250,15 +4716,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -4276,9 +4742,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -4291,9 +4757,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", @@ -4315,7 +4781,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] @@ -4332,9 +4798,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -4365,9 +4831,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -4378,9 +4844,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -4399,11 +4865,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.8.0", "serde", "serde_spanned", "toml_datetime", @@ -4451,7 +4917,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -4482,7 +4948,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.8.0", + "bitflags 2.9.0", "bytes", "futures-core", "futures-util", @@ -4513,9 +4979,8 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -4524,17 +4989,17 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "once_cell", "valuable", @@ -4554,7 +5019,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "once_cell", @@ -4581,8 +5046,8 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +version = "0.3.19" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "matchers", "nu-ansi-term", @@ -4604,9 +5069,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "typewit" @@ -4649,9 +5114,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-segmentation" @@ -4736,14 +5201,25 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.12.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom", + "getrandom 0.3.2", "serde", ] +[[package]] +name = "v_frame" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" +dependencies = [ + "aligned-vec", + "num-traits", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -4756,6 +5232,12 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "version-compare" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" + [[package]] name = "version_check" version = "0.9.5" @@ -4777,6 +5259,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -4799,7 +5290,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn", "wasm-bindgen-shared", ] @@ -4834,7 +5325,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4892,9 +5383,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.7" +version = "0.26.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" +checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" dependencies = [ "rustls-pki-types", ] @@ -4919,9 +5410,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" [[package]] name = "wildmatch" @@ -4957,7 +5448,17 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", "windows-targets 0.52.6", ] @@ -4971,16 +5472,57 @@ dependencies = [ ] [[package]] -name = "windows-registry" -version = "0.2.0" +name = "windows-core" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ - "windows-result", - "windows-strings", + "windows-implement", + "windows-interface", + "windows-result 0.2.0", + "windows-strings 0.1.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +dependencies = [ + "windows-result 0.3.2", + "windows-strings 0.3.1", + "windows-targets 0.53.0", +] + [[package]] name = "windows-result" version = "0.2.0" @@ -4990,16 +5532,34 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -5051,13 +5611,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -5070,6 +5646,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -5082,6 +5664,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -5094,12 +5682,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -5112,6 +5712,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -5124,6 +5730,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -5136,6 +5748,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -5149,10 +5767,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] -name = "winnow" -version = "0.6.24" +name = "windows_x86_64_msvc" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] @@ -5167,6 +5791,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -5216,49 +5849,48 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", "synstructure", ] @@ -5287,33 +5919,34 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn", ] [[package]] name = "zstd" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.2.1" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ + "bindgen 0.71.1", "cc", "pkg-config", ] @@ -5324,6 +5957,15 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" +[[package]] +name = "zune-inflate" +version = "0.2.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02" +dependencies = [ + "simd-adler32", +] + [[package]] name = "zune-jpeg" version = "0.4.14" diff --git a/Cargo.toml b/Cargo.toml index c4af4a7c..1ce5c1db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,25 +13,25 @@ authors = [ ] categories = ["network-programming"] description = "a very cool Matrix chat homeserver written in Rust" -edition = "2021" -homepage = "https://conduwuit.puppyirl.gay/" +edition = "2024" +homepage = "https://continuwuity.org/" keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" -repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.84.0" -version = "0.5.0" +repository = "https://forgejo.ellis.link/continuwuation/continuwuity" +rust-version = "1.86.0" +version = "0.5.0-rc.5" [workspace.metadata.crane] name = "conduwuit" [workspace.dependencies.arrayvec] -version = "0.7.4" +version = "0.7.6" features = ["serde"] [workspace.dependencies.smallvec] -version = "1.13.2" +version = "1.14.0" features = [ "const_generics", "const_new", @@ -40,8 +40,12 @@ features = [ "write", ] +[workspace.dependencies.smallstr] +version = "0.3" +features = ["ffi", "std", "union"] + [workspace.dependencies.const-str] -version = "0.5.7" +version = "0.6.2" [workspace.dependencies.ctor] version = "0.2.9" @@ -77,13 +81,13 @@ version = "0.8.5" # Used for the http request / response body type for Ruma endpoints used with reqwest [workspace.dependencies.bytes] -version = "1.9.0" +version = "1.10.1" [workspace.dependencies.http-body-util] -version = "0.1.2" +version = "0.1.3" [workspace.dependencies.http] -version = "1.2.0" +version = "1.3.1" [workspace.dependencies.regex] version = "1.11.1" @@ -107,7 +111,7 @@ default-features = false features = ["typed-header", "tracing"] [workspace.dependencies.axum-server] -version = "0.7.1" +version = "0.7.2" default-features = false # to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest @@ -118,7 +122,7 @@ version = "0.7" version = "0.6.1" [workspace.dependencies.tower] -version = "0.5.1" +version = "0.5.2" default-features = false features = ["util"] @@ -127,21 +131,22 @@ version = "0.6.2" default-features = false features = [ "add-extension", + "catch-panic", "cors", "sensitive-headers", "set-header", + "timeout", "trace", "util", - "catch-panic", ] [workspace.dependencies.rustls] -version = "0.23.19" +version = "0.23.25" default-features = false features = ["aws_lc_rs"] [workspace.dependencies.reqwest] -version = "0.12.9" +version = "0.12.15" default-features = false features = [ "rustls-tls-native-roots", @@ -151,12 +156,12 @@ features = [ ] [workspace.dependencies.serde] -version = "1.0.216" +version = "1.0.219" default-features = false features = ["rc"] [workspace.dependencies.serde_json] -version = "1.0.133" +version = "1.0.140" default-features = false features = ["raw_value"] @@ -178,7 +183,7 @@ version = "0.5.3" features = ["alloc", "rand"] default-features = false -# Used to generate thumbnails for images +# Used to generate thumbnails for images & blurhashes [workspace.dependencies.image] version = "0.25.5" default-features = false @@ -189,15 +194,23 @@ features = [ "webp", ] +[workspace.dependencies.blurhash] +version = "0.2.3" +default-features = false +features = [ + "fast-linear-to-srgb", + "image", +] + # logging [workspace.dependencies.log] -version = "0.4.22" +version = "0.4.27" default-features = false [workspace.dependencies.tracing] version = "0.1.41" default-features = false [workspace.dependencies.tracing-subscriber] -version = "=0.3.18" +version = "0.3.19" default-features = false features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] [workspace.dependencies.tracing-core] @@ -211,7 +224,7 @@ default-features = false # used for conduwuit's CLI and admin room command parsing [workspace.dependencies.clap] -version = "4.5.23" +version = "4.5.35" default-features = false features = [ "derive", @@ -224,12 +237,12 @@ features = [ ] [workspace.dependencies.futures] -version = "0.3.30" +version = "0.3.31" default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.42.0" +version = "1.44.2" default-features = false features = [ "fs", @@ -262,7 +275,7 @@ features = ["alloc", "std"] default-features = false [workspace.dependencies.hyper] -version = "1.5.1" +version = "1.6.0" default-features = false features = [ "server", @@ -271,8 +284,7 @@ features = [ ] [workspace.dependencies.hyper-util] -# hyper-util >=0.1.9 seems to have DNS issues -version = "=0.1.8" +version = "0.1.11" default-features = false features = [ "server-auto", @@ -282,7 +294,7 @@ features = [ # to support multiple variations of setting a config option [workspace.dependencies.either] -version = "1.13.0" +version = "1.15.0" default-features = false features = ["serde"] @@ -293,22 +305,27 @@ default-features = false features = ["env", "toml"] [workspace.dependencies.hickory-resolver] -version = "0.24.2" +version = "0.25.1" default-features = false +features = [ + "serde", + "system-config", + "tokio", +] # Used for conduwuit::Error type [workspace.dependencies.thiserror] -version = "2.0.7" +version = "2.0.12" default-features = false # Used when hashing the state [workspace.dependencies.ring] -version = "0.17.8" +version = "0.17.14" default-features = false # Used to make working with iterators easier, was already a transitive depdendency [workspace.dependencies.itertools] -version = "0.13.0" +version = "0.14.0" # to parse user-friendly time durations in admin commands #TODO: overlaps chrono? @@ -324,16 +341,16 @@ version = "0.4.0" version = "2.3.1" [workspace.dependencies.async-trait] -version = "0.1.83" +version = "0.1.88" [workspace.dependencies.lru-cache] version = "0.1.2" # Used for matrix spec type definitions and helpers [workspace.dependencies.ruma] -git = "https://github.com/girlbossceo/ruwuma" +git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "b560338b2a50dbf61ecfe80808b9b095ad4cec00" +rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" features = [ "compat", "rand", @@ -342,8 +359,6 @@ features = [ "federation-api", "markdown", "push-gateway-api-c", - "state-res", - "server-util", "unstable-exhaustive-types", "ring-compat", "compat-upload-signatures", @@ -360,24 +375,27 @@ features = [ "unstable-msc3381", # polls "unstable-msc3489", # beacon / live location "unstable-msc3575", + "unstable-msc3930", # polls push rules "unstable-msc4075", + "unstable-msc4095", "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", + "unstable-pdu", ] [workspace.dependencies.rust-rocksdb] -path = "deps/rust-rocksdb" -package = "rust-rocksdb-uwu" +git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1" +rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" +default-features = false features = [ "multi-threaded-cf", "mt_static", "lz4", "zstd", - "zlib", "bzip2", ] @@ -409,7 +427,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.35.0" +version = "0.37.0" default-features = false features = [ "backtrace", @@ -425,13 +443,13 @@ features = [ ] [workspace.dependencies.sentry-tracing] -version = "0.35.0" +version = "0.37.0" [workspace.dependencies.sentry-tower] -version = "0.35.0" +version = "0.37.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = [ @@ -439,7 +457,7 @@ features = [ "unprefixed_malloc_on_supported_platforms", ] [workspace.dependencies.tikv-jemallocator] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = [ @@ -447,7 +465,7 @@ features = [ "unprefixed_malloc_on_supported_platforms", ] [workspace.dependencies.tikv-jemalloc-ctl] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = ["use_std"] @@ -461,7 +479,7 @@ default-features = false features = ["resource"] [workspace.dependencies.sd-notify] -version = "0.4.3" +version = "0.4.5" default-features = false [workspace.dependencies.hardened_malloc-rs] @@ -478,25 +496,25 @@ version = "0.4.3" default-features = false [workspace.dependencies.termimad] -version = "0.31.1" +version = "0.31.2" default-features = false [workspace.dependencies.checked_ops] version = "0.1" [workspace.dependencies.syn] -version = "2.0.90" +version = "2.0" default-features = false features = ["full", "extra-traits"] [workspace.dependencies.quote] -version = "1.0.37" +version = "1.0" [workspace.dependencies.proc-macro2] -version = "1.0.89" +version = "1.0" [workspace.dependencies.bytesize] -version = "1.3.0" +version = "2.0" [workspace.dependencies.core_affinity] version = "0.8.1" @@ -508,51 +526,67 @@ version = "0.2" version = "0.2" [workspace.dependencies.minicbor] -version = "0.25.1" +version = "0.26.3" features = ["std"] [workspace.dependencies.minicbor-serde] -version = "0.3.2" +version = "0.4.1" features = ["std"] +[workspace.dependencies.maplit] +version = "1.0.2" + # # Patches # # backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing. # we can switch back to upstream if #2956 is merged and backported in the upstream repo. -# https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c +# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] -git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +git = "https://forgejo.ellis.link/continuwuation/tracing" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing] -git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +git = "https://forgejo.ellis.link/continuwuation/tracing" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-core] -git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +git = "https://forgejo.ellis.link/continuwuation/tracing" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-log] -git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +git = "https://forgejo.ellis.link/continuwuation/tracing" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -# adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 -# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b +# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 +# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b [patch.crates-io.rustyline-async] -git = "https://github.com/girlbossceo/rustyline-async" +git = "https://forgejo.ellis.link/continuwuation/rustyline-async" rev = "deaeb0694e2083f53d363b648da06e10fc13900c" # adds LIFO queue scheduling; this should be updated with PR progress. [patch.crates-io.event-listener] -git = "https://github.com/girlbossceo/event-listener" +git = "https://forgejo.ellis.link/continuwuation/event-listener" rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" [patch.crates-io.async-channel] -git = "https://github.com/girlbossceo/async-channel" +git = "https://forgejo.ellis.link/continuwuation/async-channel" rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" +# adds affinity masks for selecting more than one core at a time [patch.crates-io.core_affinity] -git = "https://github.com/girlbossceo/core_affinity_rs" +git = "https://forgejo.ellis.link/continuwuation/core_affinity_rs" rev = "9c8e51510c35077df888ee72a36b4b05637147da" +# reverts hyperium#148 conflicting with our delicate federation resolver hooks +[patch.crates-io.hyper-util] +git = "https://forgejo.ellis.link/continuwuation/hyper-util" +rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" + +# allows no-aaaa option in resolv.conf +# bumps rust edition and toolchain to 1.86.0 and 2024 +# use sat_add on line number errors +[patch.crates-io.resolv-conf] +git = "https://forgejo.ellis.link/continuwuation/resolv-conf" +rev = "200e958941d522a70c5877e3d846f55b5586c68d" + # # Our crates # @@ -669,7 +703,7 @@ inherits = "release" # To enable hot-reloading: # 1. Uncomment all of the rustflags here. -# 2. Uncomment crate-type=dylib in src/*/Cargo.toml and deps/rust-rocksdb/Cargo.toml +# 2. Uncomment crate-type=dylib in src/*/Cargo.toml # # opt-level, mir-opt-level, validate-mir are not known to interfere with reloading # and can be raised if build times are tolerable. @@ -737,27 +771,6 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.rust-rocksdb-uwu] -inherits = "dev" -debug = 'limited' -incremental = false -codegen-units = 1 -opt-level = 'z' -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztls-model=initial-exec', -# '-Cprefer-dynamic=true', -# '-Zstaticlib-prefer-dynamic=true', -# '-Zstaticlib-allow-rdylib-deps=true', -# '-Zpacked-bundled-libs=true', -# '-Zplt=true', -# '-Clink-arg=-Wl,--no-as-needed', -# '-Clink-arg=-Wl,--allow-shlib-undefined', -# '-Clink-arg=-Wl,-z,lazy', -# '-Clink-arg=-Wl,-z,nodlopen', -# '-Clink-arg=-Wl,-z,nodelete', -#] - [profile.dev.package.'*'] inherits = "dev" debug = 'limited' @@ -845,6 +858,9 @@ unused_crate_dependencies = "allow" unsafe_code = "allow" variant_size_differences = "allow" +# we check nightly clippy lints +unknown_lints = "allow" + ####################################### # # Clippy lints @@ -889,9 +905,11 @@ missing_docs_in_private_items = { level = "allow", priority = 1 } missing_errors_doc = { level = "allow", priority = 1 } missing_panics_doc = { level = "allow", priority = 1 } module_name_repetitions = { level = "allow", priority = 1 } +needless_continue = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } single_match_else = { level = "allow", priority = 1 } +struct_excessive_bools = { level = "allow", priority = 1 } struct_field_names = { level = "allow", priority = 1 } unnecessary_wraps = { level = "allow", priority = 1 } unused_async = { level = "allow", priority = 1 } @@ -953,9 +971,13 @@ style = { level = "warn", priority = -1 } # trivial assertions are quite alright assertions_on_constants = { level = "allow", priority = 1 } module_inception = { level = "allow", priority = 1 } +obfuscated_if_else = { level = "allow", priority = 1 } ################### suspicious = { level = "warn", priority = -1 } ## some sadness let_underscore_future = { level = "allow", priority = 1 } + +# rust doesnt understand conduwuit's custom log macros +literal_string_with_formatting_args = { level = "allow", priority = 1 } diff --git a/README.md b/README.md index 13a1c67f..bf4f5613 100644 --- a/README.md +++ b/README.md @@ -1,146 +1,115 @@ -# conduwuit - -[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) [![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) +# continuwuity -### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust +## A community-driven [Matrix](https://matrix.org/) homeserver in Rust -Visit the [conduwuit documentation](https://conduwuit.puppyirl.gay/) for more -information and how to deploy/setup conduwuit. +[continuwuity] is a Matrix homeserver written in Rust. +It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. -#### What is Matrix? + +### Why does this exist? + +The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. + +We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. + +### Who are we? + +We are a group of Matrix enthusiasts, developers and system administrators who have used conduwuit and believe in its potential. Our team includes both previous +contributors to the original project and new developers who want to help maintain and improve this important piece of Matrix infrastructure. + +We operate as an open community project, welcoming contributions from anyone interested in improving continuwuity. + +### What is Matrix? [Matrix](https://matrix.org) is an open, federated, and extensible network for -decentralised communication. Users from any Matrix homeserver can chat with users from all +decentralized communication. Users from any Matrix homeserver can chat with users from all other homeservers over federation. Matrix is designed to be extensible and built on top of. You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord. -#### What is the goal? +### What are the project's goals? -A high-performance, efficient, low-cost, and featureful Matrix homeserver that's -easy to set up and just works with minimal configuration needed. +Continuwuity aims to: -#### Can I try it out? +- Maintain a stable, reliable Matrix homeserver implementation in Rust +- Improve compatibility and specification compliance with the Matrix protocol +- Fix bugs and performance issues from the original conduwuit +- Add missing features needed by homeserver administrators +- Provide comprehensive documentation and easy deployment options +- Create a sustainable development model for long-term maintenance +- Keep a lightweight, efficient codebase that can run on modest hardware -An official conduwuit server ran by me is available at transfem.dev -([element.transfem.dev](https://element.transfem.dev) / -[cinny.transfem.dev](https://cinny.transfem.dev)) +### Can I try it out? -transfem.dev is a public homeserver that can be used, it is not a "test only -homeserver". This means there are rules, so please read the rules: -[https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt) +Check out the [documentation](introduction) for installation instructions. -transfem.dev is also listed at -[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of -popular public Matrix homeservers, including some others that run conduwuit. +There are currently no open registration Continuwuity instances available. -#### What is the current status? +### What are we working on? -conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta. -The beta status initially was inherited from Conduit, however the huge amount of -codebase divergance, changes, fixes, and improvements have effectively made this -beta status not entirely applicable to us anymore. +We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues). -conduwuit is very stable based on our rapidly growing userbase, has lots of features that users -expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers. +- [Replacing old conduwuit links with working continuwuity links](https://forgejo.ellis.link/continuwuation/continuwuity/issues/742) +- [Getting CI and docs deployment working on the new Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues/740) +- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747) +- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0) +- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119) +- Automated testing +- [Admin API](https://forgejo.ellis.link/continuwuation/continuwuity/issues/748) +- [Policy-list controlled moderation](https://forgejo.ellis.link/continuwuation/continuwuity/issues/750) -A lot of critical stability and performance issues have been fixed, and a lot of -necessary groundwork has finished; making this project way better than it was -back in the start at ~early 2024. +### Can I migrate my data from x? -#### How is conduwuit funded? Is conduwuit sustainable? - -conduwuit has no external funding. This is made possible purely in my freetime with -contributors, also in their free time, and only by user-curated donations. - -conduwuit has existed since around November 2023, but [only became more publicly known -in March/April 2024](https://matrix.org/blog/2024/04/26/this-week-in-matrix-2024-04-26/#conduwuit-website) -and we have no plans in stopping or slowing down any time soon! - -#### Can I migrate or switch from Conduit? - -conduwuit is a complete drop-in replacement for Conduit. As long as you are using RocksDB, -the only "migration" you need to do is replace the binary or container image. There -is no harm or additional steps required for using conduwuit. See the -[Migrating from Conduit](https://conduwuit.puppyirl.gay/deploying/generic.html#migrating-from-conduit) section -on the generic deploying guide. - -Note that as of conduwuit version 0.5.0, backwards compatibility with Conduit is -no longer supported. We only support migrating *from* Conduit, not back to -Conduit like before. If you are truly finding yourself wanting to migrate back -to Conduit, we would appreciate all your feedback and if we can assist with -any issues or concerns. - -#### Can I migrate from Synapse or Dendrite? - -Currently there is no known way to seamlessly migrate all user data from the old -homeserver to conduwuit. However it is perfectly acceptable to replace the old -homeserver software with conduwuit using the same server name and there will not -be any issues with federation. - -There is an interest in developing a built-in seamless user data migration -method into conduwuit, however there is no concrete ETA or timeline for this. +- Conduwuit: Yes +- Conduit: No, database is now incompatible +- Grapevine: No, database is now incompatible +- Dendrite: No +- Synapse: No +We haven't written up a guide on migrating from incompatible homeservers yet. Reach out to us if you need to do this! +## Contribution + +### Development flow + +- Features / changes must developed in a separate branch +- For each change, create a descriptive PR +- Your code will be reviewed by one or more of the continuwuity developers +- The branch will be deployed live on multiple tester's matrix servers to shake out bugs +- Once all testers and reviewers have agreed, the PR will be merged to the main branch +- The main branch will have nightly builds deployed to users on the cutting edge +- Every week or two, a new release is cut. + +The main branch is always green! + + +### Policy on pulling from other forks + +We welcome contributions from other forks of conduwuit, subject to our review process. +When incorporating code from other forks: + +- All external contributions must go through our standard PR process +- Code must meet our quality standards and pass tests +- Code changes will require testing on multiple test servers before merging +- Attribution will be given to original authors and forks +- We prioritize stability and compatibility when evaluating external contributions +- Features that align with our project goals will be given priority consideration + #### Contact -[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay) -is the official project Matrix room. You can get support here, ask questions or -concerns, get assistance setting up conduwuit, etc. - -This room should stay relevant and focused on conduwuit. An offtopic general -chatter room can be found there as well. - -Please keep the issue trackers focused on bug reports and enhancement requests. -General support is extremely difficult to be offered over an issue tracker, and -simple questions should be asked directly in an interactive platform like our -Matrix room above as they can turn into a relevant discussion and/or may not be -simple to answer. If you're not sure, just ask in the Matrix room. - -If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) - -#### Donate - -conduwuit development is purely made possible by myself and contributors. I do -not get paid to work on this, and I work on it in my free time. Donations are -heavily appreciated! 💜🥺 - -- Liberapay (preferred): -- GitHub Sponsors (preferred): -- Ko-fi: - -I do not and will not accept cryptocurrency donations, including things related. - -#### Logo - -Original repo and Matrix room picture was from bran (<3). Current banner image -and logo is directly from [this cohost -post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). - -#### Is it conduwuit or Conduwuit? - -Both, but I prefer conduwuit. - -#### Mirrors of conduwuit - -If GitHub is unavailable in your country, or has poor connectivity, conduwuit's -source code is mirrored onto the following additional platforms I maintain: - -- GitHub: -- GitLab: -- git.girlcock.ceo: -- git.gay: -- mau.dev: -- Codeberg: -- sourcehut: +Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project! + + +[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity + diff --git a/alpine/APKBUILD b/alpine/APKBUILD new file mode 100644 index 00000000..97f84f65 --- /dev/null +++ b/alpine/APKBUILD @@ -0,0 +1,63 @@ +# Contributor: magmaus3 +# Maintainer: magmaus3 +pkgname=continuwuity + +# abuild doesn't like the format of v0.5.0-rc.5, so i had to change it +# see https://wiki.alpinelinux.org/wiki/Package_policies +pkgver=0.5.0_rc5 +pkgrel=0 +pkgdesc="a continuwuation of a very cool, featureful fork of conduit" +url="https://continuwuity.org/" +arch="all" +license="Apache-2.0" +depends="liburing" + +# cargo version on alpine v3.21 is too old to use the 2024 edition +# i recommend either building everything on edge, or adding +# the edge repo as a tag +makedepends="cargo liburing-dev clang-dev linux-headers" +checkdepends="" +install="$pkgname.pre-install" +subpackages="$pkgname-openrc" +source="https://forgejo.ellis.link/continuwuation/continuwuity/archive/v0.5.0-rc.5.tar.gz +continuwuity.initd +continuwuity.confd +" +builddir="$srcdir/continuwuity" +options="net !check" + +prepare() { + default_prepare + cd $srcdir/continuwuity + + # add the default database path to the config (commented out) + cat conduwuit-example.toml \ + | sed '/#database_path/ s:$: "/var/lib/continuwuity":' \ + > "$srcdir"/continuwuity.toml + + cargo fetch --target="$CTARGET" --locked +} + +build() { + cargo build --frozen --release --all-features +} + +check() { + # TODO: make sure the tests work + #cargo test --frozen + return +} + +package() { + cd $srcdir + install -Dm755 continuwuity/target/release/conduwuit "$pkgdir"/usr/bin/continuwuity + install -Dm644 "$srcdir"/continuwuity.toml -t "$pkgdir"/etc/continuwuity + install -Dm755 "$srcdir"/continuwuity.initd "$pkgdir"/etc/init.d/continuwuity + install -Dm644 "$srcdir"/continuwuity.confd "$pkgdir"/etc/conf.d/continuwuity +} + +sha512sums=" +66f6da5e98b6f7bb8c1082500101d5c87b1b79955c139b44c6ef5123919fb05feb0dffc669a3af1bc8d571ddb9f3576660f08dc10a6b19eab6db9e391175436a v0.5.0-rc.5.tar.gz +0482674be24740496d70da256d4121c5a5e3b749f2445d2bbe0e8991f1449de052724f8427da21a6f55574bc53eac9ca1e47e5012b4c13049b2b39044734d80d continuwuity.initd +38e2576278b450d16ba804dd8f4a128f18cd793e6c3ce55aedee1e186905755b31ee23baaa6586b1ab0e25a1f29bf1ea86bfaae4185b0cb1a29203726a199426 continuwuity.confd +" diff --git a/alpine/README.md b/alpine/README.md new file mode 100644 index 00000000..5f26d772 --- /dev/null +++ b/alpine/README.md @@ -0,0 +1,7 @@ +# building + +1. [set up your build + environment](https://wiki.alpinelinux.org/wiki/Include:Setup_your_system_and_account_for_building_packages) + +2. run `abuild` (or `abuild -K` if you want to keep the source directory to make + rebuilding faster) diff --git a/alpine/continuwuity.confd b/alpine/continuwuity.confd new file mode 100644 index 00000000..03d7b0a0 --- /dev/null +++ b/alpine/continuwuity.confd @@ -0,0 +1,3 @@ +supervisor=supervise-daemon +export CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml + diff --git a/alpine/continuwuity.initd b/alpine/continuwuity.initd new file mode 100644 index 00000000..1354f4bd --- /dev/null +++ b/alpine/continuwuity.initd @@ -0,0 +1,19 @@ +#!/sbin/openrc-run + +command="/usr/bin/continuwuity" +command_user="continuwuity:continuwuity" +command_args="--config ${CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml}" +command_background=true +pidfile="/run/$RC_SVCNAME.pid" + +output_log="/var/log/continuwuity.log" +error_log="/var/log/continuwuity.log" + +depend() { + need net +} + +start_pre() { + checkpath -d -m 0755 -o "$command_user" /var/lib/continuwuity + checkpath -f -m 0644 -o "$command_user" "$output_log" +} diff --git a/alpine/continuwuity.pre-install b/alpine/continuwuity.pre-install new file mode 100644 index 00000000..edac789f --- /dev/null +++ b/alpine/continuwuity.pre-install @@ -0,0 +1,4 @@ +#!/bin/sh +addgroup -S continuwuity 2>/dev/null +adduser -S -D -H -h /var/lib/continuwuity -s /sbin/nologin -G continuwuity -g continuwuity continuwuity 2>/dev/null +exit 0 diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 7c05c259..4f45ddc0 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -4,10 +4,24 @@ Wants=network-online.target After=network-online.target Documentation=https://conduwuit.puppyirl.gay/ RequiresMountsFor=/var/lib/private/conduwuit +Alias=matrix-conduwuit.service [Service] DynamicUser=yes -Type=notify +Type=notify-reload +ReloadSignal=SIGUSR1 + +TTYPath=/dev/tty25 +DeviceAllow=char-tty +StandardInput=tty-force +StandardOutput=tty +StandardError=journal+console +TTYReset=yes +# uncomment to allow buffer to be cleared every restart +TTYVTDisallocate=no + +TTYColumns=120 +TTYRows=40 AmbientCapabilities= CapabilityBoundingSet= diff --git a/bin/complement b/bin/complement index a1db4b32..c437503e 100755 --- a/bin/complement +++ b/bin/complement @@ -10,15 +10,15 @@ set -euo pipefail COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}" # A `.jsonl` file to write test logs to -LOG_FILE="$2" +LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to -RESULTS_FILE="$3" +RESULTS_FILE="${3:-complement_test_results.jsonl}" -OCI_IMAGE="complement-conduwuit:main" +COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" -# Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*|TestUnbanViaInvite.*' +# Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time +SKIPPED_COMPLEMENT_TESTS='TestPartialStateJoin.*|TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.*|TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias|TestUnbanViaInvite.*|TestRoomState/Parallel/GET_/publicRooms_lists.*"|TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then @@ -34,17 +34,41 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -bin/nix-build-and-cache just .#linux-complement +if [ ! -f "complement_oci_image.tar.gz" ]; then + echo "building complement conduwuit image" -docker load < result -popd > /dev/null + # if using macOS, use linux-complement + #bin/nix-build-and-cache just .#linux-complement + bin/nix-build-and-cache just .#complement + #nix build -L .#complement + + echo "complement conduwuit image tar.gz built at \"result\"" + + echo "loading into docker" + docker load < result + popd > /dev/null +else + echo "skipping building a complement conduwuit image as complement_oci_image.tar.gz was already found, loading this" + + docker load < complement_oci_image.tar.gz + popd > /dev/null +fi + +echo "" +echo "running go test with:" +echo "\$COMPLEMENT_SRC: $COMPLEMENT_SRC" +echo "\$COMPLEMENT_BASE_IMAGE: $COMPLEMENT_BASE_IMAGE" +echo "\$RESULTS_FILE: $RESULTS_FILE" +echo "\$LOG_FILE: $LOG_FILE" +echo "" # It's okay (likely, even) that `go test` exits nonzero +# `COMPLEMENT_ENABLE_DIRTY_RUNS=1` reuses the same complement container for faster complement, at the possible expense of test environment pollution set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests | tee "$LOG_FILE" + COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ + go test -tags="conduwuit_blacklist" -skip="$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results @@ -54,3 +78,18 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' and .Test != null ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" + +#if command -v gotestfmt &> /dev/null; then +# echo "using gotestfmt on $LOG_FILE" +# grep '{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" +#fi + +echo "" +echo "" +echo "complement logs saved at $LOG_FILE" +echo "complement results saved at $RESULTS_FILE" +#if command -v gotestfmt &> /dev/null; then +# echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" +#fi +echo "" +echo "" diff --git a/book.toml b/book.toml index 1d32c766..46d3a7b0 100644 --- a/book.toml +++ b/book.toml @@ -1,8 +1,8 @@ [book] -title = "conduwuit 🏳️‍⚧️ 💜 🦴" -description = "conduwuit, which is a well-maintained fork of Conduit, is a simple, fast and reliable chat server for the Matrix protocol" +title = "continuwuity" +description = "continuwuity is a community continuation of the conduwuit Matrix homeserver, written in Rust." language = "en" -authors = ["strawberry (June)"] +authors = ["The continuwuity Community"] text-direction = "ltr" multilingual = false src = "docs" @@ -13,12 +13,12 @@ create-missing = true extra-watch-dirs = ["debian", "docs"] [rust] -edition = "2021" +edition = "2024" [output.html] -git-repository-url = "https://github.com/girlbossceo/conduwuit" -edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}" -git-repository-icon = "fa-github-square" +edit-url-template = "https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/{path}" +git-repository-url = "https://forgejo.ellis.link/continuwuation/continuwuity" +git-repository-icon = "fa-git-alt" [output.html.search] limit-results = 15 diff --git a/clippy.toml b/clippy.toml index 42427101..863759aa 100644 --- a/clippy.toml +++ b/clippy.toml @@ -2,9 +2,10 @@ array-size-threshold = 4096 cognitive-complexity-threshold = 94 # TODO reduce me ALARA excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5 future-size-threshold = 7745 # TODO reduce me ALARA -stack-size-threshold = 196608 # reduce me ALARA +stack-size-threshold = 196608 # TODO reduce me ALARA too-many-lines-threshold = 780 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 +large-error-threshold = 256 # TODO reduce me ALARA disallowed-macros = [ { path = "log::error", reason = "use conduwuit_core::error" }, diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 4062ba99..3d92ab15 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -113,14 +113,10 @@ #new_user_displayname_suffix = "🏳️‍⚧️" # If enabled, conduwuit will send a simple GET request periodically to -# `https://pupbrain.dev/check-for-updates/stable` for any new -# announcements made. Despite the name, this is not an update check -# endpoint, it is simply an announcement check endpoint. +# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new +# announcements or major updates. This is not an update check endpoint. # -# This is disabled by default as this is rarely used except for security -# updates or major updates. -# -#allow_check_for_updates = false +#allow_announcements_check = true # Set this to any float value to multiply conduwuit's in-memory LRU caches # with such as "auth_chain_cache_capacity". @@ -195,14 +191,6 @@ # #servernameevent_data_cache_capacity = varies by system -# This item is undocumented. Please contribute documentation for it. -# -#server_visibility_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#user_visibility_cache_capacity = varies by system - # This item is undocumented. Please contribute documentation for it. # #stateinfo_cache_capacity = varies by system @@ -377,6 +365,26 @@ # #pusher_idle_timeout = 15 +# Maximum time to receive a request from a client (seconds). +# +#client_receive_timeout = 75 + +# Maximum time to process a request received from a client (seconds). +# +#client_request_timeout = 180 + +# Maximum time to transmit a response to a client (seconds) +# +#client_response_timeout = 120 + +# Grace period for clean shutdown of client requests (seconds). +# +#client_shutdown_timeout = 10 + +# Grace period for clean shutdown of federation requests (seconds). +# +#sender_shutdown_timeout = 5 + # Enables registration. If set to false, no users can register on this # server. # @@ -406,8 +414,9 @@ # #registration_token = -# Path to a file on the system that gets read for the registration token. -# this config option takes precedence/priority over "registration_token". +# Path to a file on the system that gets read for additional registration +# tokens. Multiple tokens can be added if you separate them with +# whitespace # # conduwuit must be able to access the file, and it must not be empty # @@ -424,10 +433,19 @@ # #allow_federation = true -# This item is undocumented. Please contribute documentation for it. +# Allows federation requests to be made to itself +# +# This isn't intended and is very likely a bug if federation requests are +# being sent to yourself. This currently mainly exists for development +# purposes. # #federation_loopback = false +# Always calls /forget on behalf of the user if leaving a room. This is a +# part of MSC4267 "Automatically forgetting rooms on leave" +# +#forget_forced_upon_leave = false + # Set this to true to require authentication on the normally # unauthenticated profile retrieval endpoints (GET) # "/_matrix/client/v3/profile/{userId}". @@ -505,9 +523,9 @@ # Default room version conduwuit will create rooms with. # -# Per spec, room version 10 is the default. +# Per spec, room version 11 is the default. # -#default_room_version = 10 +#default_room_version = 11 # This item is undocumented. Please contribute documentation for it. # @@ -572,7 +590,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # -# example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] +# example: ["matrix.org", "tchncs.de"] # #trusted_servers = ["matrix.org"] @@ -800,7 +818,7 @@ # Type of RocksDB database compression to use. # -# Available options are "zstd", "zlib", "bz2", "lz4", or "none". +# Available options are "zstd", "bz2", "lz4", or "none". # # It is best to use ZSTD as an overall good balance between # speed/performance, storage, IO amplification, and CPU usage. For more @@ -904,6 +922,13 @@ # #rocksdb_checksums = true +# Enables the "atomic flush" mode in rocksdb. This option is not intended +# for users. It may be removed or ignored in future versions. Atomic flush +# may be enabled by the paranoid to possibly improve database integrity at +# the cost of performance. +# +#rocksdb_atomic_flush = false + # Database repair mode (for RocksDB SST corruption). # # Use this option when the server reports corruption while running or @@ -941,10 +966,10 @@ # #rocksdb_compaction_ioprio_idle = true -# Disables RocksDB compaction. You should never ever have to set this -# option to true. If you for some reason find yourself needing to use this -# option as part of troubleshooting or a bug, please reach out to us in -# the conduwuit Matrix room with information and details. +# Enables RocksDB compaction. You should never ever have to set this +# option to false. If you for some reason find yourself needing to use +# this option as part of troubleshooting or a bug, please reach out to us +# in the conduwuit Matrix room with information and details. # # Disabling compaction will lead to a significantly bloated and # explosively large database, gradually poor performance, unnecessarily @@ -1157,28 +1182,72 @@ # #prune_missing_media = false -# Vector list of servers that conduwuit will refuse to download remote -# media from. +# List of forbidden server names via regex patterns that we will block +# incoming AND outgoing federation with, and block client room joins / +# remote user invites. # -#prevent_media_downloads_from = [] - -# List of forbidden server names that we will block incoming AND outgoing -# federation with, and block client room joins / remote user invites. +# Note that your messages can still make it to forbidden servers through +# backfilling. Events we receive from forbidden servers via backfill +# from servers we *do* federate with will be stored in the database. # # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. # -# Basically "global" ACLs. +# You can set this to ["*"] to block all servers by default, and then +# use `allowed_remote_server_names` to allow only specific servers. +# +# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_server_names = [] -# List of forbidden server names that we will block all outgoing federated -# room directory requests for. Useful for preventing our users from -# wandering into bad servers or spaces. +# List of allowed server names via regex patterns that we will allow, +# regardless of if they match `forbidden_remote_server_names`. +# +# This option has no effect if `forbidden_remote_server_names` is empty. +# +# example: ["goodserver\\.tld$", "goodphrase"] +# +#allowed_remote_server_names = [] + +# Vector list of regex patterns of server names that conduwuit will refuse +# to download remote media from. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# +#prevent_media_downloads_from = [] + +# List of forbidden server names via regex patterns that we will block all +# outgoing federated room directory requests for. Useful for preventing +# our users from wandering into bad servers or spaces. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_room_directory_server_names = [] +# Vector list of regex patterns of server names that conduwuit will not +# send messages to the client from. +# +# Note that there is no way for clients to receive messages once a server +# has become unignored without doing a full sync. This is a protocol +# limitation with the current sync protocols. This means this is somewhat +# of a nuclear option. +# +# example: ["reallybadserver\.tld$", "reallybadphrase", +# "69dollarfortnitecards"] +# +#ignore_messages_from_server_names = [] + +# Send messages from users that the user has ignored to the client. +# +# There is no way for clients to receive messages sent while a user was +# ignored without doing a full sync. This is a protocol limitation with +# the current sync protocols. Disabling this option will move +# responsibility of ignoring messages to the client, which can avoid this +# limitation. +# +#send_messages_from_ignored_users_to_client = false + # Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you # do not want conduwuit to send outbound requests to. Defaults to # RFC1918, unroutable, loopback, multicast, and testnet addresses for @@ -1286,7 +1355,7 @@ # used, and startup as warnings if any room aliases in your database have # a forbidden room alias/ID. # -# example: ["19dollarfortnitecards", "b[4a]droom"] +# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] # #forbidden_alias_names = [] @@ -1299,7 +1368,7 @@ # startup as warnings if any local users in your database have a forbidden # username. # -# example: ["administrator", "b[a4]dusernam[3e]"] +# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] # #forbidden_usernames = [] @@ -1392,7 +1461,7 @@ # Sentry reporting URL, if a custom one is desired. # -#sentry_endpoint = "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" +#sentry_endpoint = "" # Report your conduwuit server_name in Sentry.io crash reports and # metrics. @@ -1586,3 +1655,21 @@ # This item is undocumented. Please contribute documentation for it. # #support_mxid = + +[global.blurhashing] + +# blurhashing x component, 4 is recommended by https://blurha.sh/ +# +#components_x = 4 + +# blurhashing y component, 3 is recommended by https://blurha.sh/ +# +#components_y = 3 + +# Max raw size that the server will blurhash, this is the size of the +# image after converting it to raw data, it should be higher than the +# upload limit but not too high. The higher it is the higher the +# potential load will be for clients requesting blurhashes. The default +# is 33.55MB. Setting it to 0 disables blurhashing. +# +#blurhash_max_raw_size = 33554432 diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 3c2ec49d..3d2fbc9b 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -2,7 +2,8 @@ Description=conduwuit Matrix homeserver Wants=network-online.target After=network-online.target -Documentation=https://conduwuit.puppyirl.gay/ +Alias=matrix-conduwuit.service +Documentation=https://continuwuity.org/ [Service] DynamicUser=yes diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml deleted file mode 100644 index ba8259a3..00000000 --- a/deps/rust-rocksdb/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "rust-rocksdb-uwu" -categories.workspace = true -description = "dylib wrapper for rust-rocksdb" -edition = "2021" -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version = "0.0.1" - -[features] -default = ["lz4", "zstd", "zlib", "bzip2"] -jemalloc = ["rust-rocksdb/jemalloc"] -io-uring = ["rust-rocksdb/io-uring"] -valgrind = ["rust-rocksdb/valgrind"] -snappy = ["rust-rocksdb/snappy"] -lz4 = ["rust-rocksdb/lz4"] -zstd = ["rust-rocksdb/zstd"] -zlib = ["rust-rocksdb/zlib"] -bzip2 = ["rust-rocksdb/bzip2"] -rtti = ["rust-rocksdb/rtti"] -mt_static = ["rust-rocksdb/mt_static"] -multi-threaded-cf = ["rust-rocksdb/multi-threaded-cf"] -serde1 = ["rust-rocksdb/serde1"] -malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] - -[dependencies.rust-rocksdb] -git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "1f032427d3a0e7b0f13c04b4e34712bd8610291b" -#branch = "master" -default-features = false - -[lib] -path = "lib.rs" -crate-type = [ - "rlib", -# "dylib" -] - -[lints] -workspace = true diff --git a/deps/rust-rocksdb/lib.rs b/deps/rust-rocksdb/lib.rs deleted file mode 100644 index 8dbbda98..00000000 --- a/deps/rust-rocksdb/lib.rs +++ /dev/null @@ -1,62 +0,0 @@ -pub use rust_rocksdb::*; - -#[cfg_attr(not(conduwuit_mods), link(name = "rocksdb"))] -#[cfg_attr(conduwuit_mods, link(name = "rocksdb", kind = "static"))] -unsafe extern "C" { - pub unsafe fn rocksdb_list_column_families(); - pub unsafe fn rocksdb_logger_create_stderr_logger(); - pub unsafe fn rocksdb_logger_create_callback_logger(); - pub unsafe fn rocksdb_options_set_info_log(); - pub unsafe fn rocksdb_get_options_from_string(); - pub unsafe fn rocksdb_writebatch_create(); - pub unsafe fn rocksdb_writebatch_destroy(); - pub unsafe fn rocksdb_writebatch_put_cf(); - pub unsafe fn rocksdb_writebatch_delete_cf(); - pub unsafe fn rocksdb_iter_value(); - pub unsafe fn rocksdb_iter_seek_to_last(); - pub unsafe fn rocksdb_iter_seek_for_prev(); - pub unsafe fn rocksdb_iter_seek_to_first(); - pub unsafe fn rocksdb_iter_next(); - pub unsafe fn rocksdb_iter_prev(); - pub unsafe fn rocksdb_iter_seek(); - pub unsafe fn rocksdb_iter_valid(); - pub unsafe fn rocksdb_iter_get_error(); - pub unsafe fn rocksdb_iter_key(); - pub unsafe fn rocksdb_iter_destroy(); - pub unsafe fn rocksdb_livefiles(); - pub unsafe fn rocksdb_livefiles_count(); - pub unsafe fn rocksdb_livefiles_destroy(); - pub unsafe fn rocksdb_livefiles_column_family_name(); - pub unsafe fn rocksdb_livefiles_name(); - pub unsafe fn rocksdb_livefiles_size(); - pub unsafe fn rocksdb_livefiles_level(); - pub unsafe fn rocksdb_livefiles_smallestkey(); - pub unsafe fn rocksdb_livefiles_largestkey(); - pub unsafe fn rocksdb_livefiles_entries(); - pub unsafe fn rocksdb_livefiles_deletions(); - pub unsafe fn rocksdb_put_cf(); - pub unsafe fn rocksdb_delete_cf(); - pub unsafe fn rocksdb_get_pinned_cf(); - pub unsafe fn rocksdb_create_column_family(); - pub unsafe fn rocksdb_get_latest_sequence_number(); - pub unsafe fn rocksdb_batched_multi_get_cf(); - pub unsafe fn rocksdb_cancel_all_background_work(); - pub unsafe fn rocksdb_repair_db(); - pub unsafe fn rocksdb_list_column_families_destroy(); - pub unsafe fn rocksdb_flush(); - pub unsafe fn rocksdb_flush_wal(); - pub unsafe fn rocksdb_open_column_families(); - pub unsafe fn rocksdb_open_for_read_only_column_families(); - pub unsafe fn rocksdb_open_as_secondary_column_families(); - pub unsafe fn rocksdb_open_column_families_with_ttl(); - pub unsafe fn rocksdb_open(); - pub unsafe fn rocksdb_open_for_read_only(); - pub unsafe fn rocksdb_open_with_ttl(); - pub unsafe fn rocksdb_open_as_secondary(); - pub unsafe fn rocksdb_write(); - pub unsafe fn rocksdb_create_iterator_cf(); - pub unsafe fn rocksdb_backup_engine_create_new_backup_flush(); - pub unsafe fn rocksdb_backup_engine_options_create(); - pub unsafe fn rocksdb_write_buffer_manager_destroy(); - pub unsafe fn rocksdb_options_set_ttl(); -} diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..536af632 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,208 @@ +ARG RUST_VERSION=1 + +FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS base +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS toolchain + +# Prevent deletion of apt cache +RUN rm -f /etc/apt/apt.conf.d/docker-clean + +# Match Rustc version as close as possible +# rustc -vV +ARG LLVM_VERSION=19 +# ENV RUSTUP_TOOLCHAIN=${RUST_VERSION} + +# Install repo tools +# Line one: compiler tools +# Line two: curl, for downloading binaries +# Line three: for xx-verify +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ +apt-get update && apt-get install -y \ + clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \ + curl git \ + file + +# Create symlinks for LLVM tools +RUN <> /etc/environment + +# Configure pkg-config +RUN <> /etc/environment + echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment + echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment +EOF + +# Configure cc to use clang version +RUN <> /etc/environment + echo "CXX=clang++" >> /etc/environment +EOF + +# Cross-language LTO +RUN <> /etc/environment + echo "CXXFLAGS=-flto" >> /etc/environment + # Linker is set to target-compatible clang by xx + echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment +EOF + +# Apply CPU-specific optimizations if TARGET_CPU is provided +ARG TARGET_CPU= +RUN <> /etc/environment + echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment + echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment + fi +EOF + +# Prepare output directories +RUN mkdir /out + +FROM toolchain AS builder + +# Conduwuit version info +ARG COMMIT_SHA= +ARG CONDUWUIT_VERSION_EXTRA= +ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA +RUN <> /etc/environment +fi +EOF + +ARG TARGETPLATFORM + +# Verify environment configuration +RUN cat /etc/environment +RUN xx-cargo --print-target-triple + +# Get source +COPY . . + +# Build the binary +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/app/target \ + bash <<'EOF' + set -o allexport + . /etc/environment + TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \ + jq -r ".target_directory")) + mkdir /out/sbin + PACKAGE=conduwuit + xx-cargo build --locked --release \ + -p $PACKAGE; + BINARIES=($(cargo metadata --no-deps --format-version 1 | \ + jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name")) + for BINARY in "${BINARIES[@]}"; do + echo $BINARY + xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY + cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY + done +EOF + +# Generate Software Bill of Materials (SBOM) +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + bash <<'EOF' + mkdir /out/sbom + typeset -A PACKAGES + for BINARY in /out/sbin/*; do + BINARY_BASE=$(basename ${BINARY}) + package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name") + if [ -z "$package" ]; then + continue + fi + PACKAGES[$package]=1 + done + for PACKAGE in $(echo ${!PACKAGES[@]}); do + echo $PACKAGE + cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json + done +EOF + +# Extract dynamically linked dependencies +RUN < + + + + + diff --git a/docs/assets/gay dog anarchists.png b/docs/assets/gay dog anarchists.png new file mode 100644 index 00000000..871cf302 Binary files /dev/null and b/docs/assets/gay dog anarchists.png differ diff --git a/docs/community.md b/docs/community.md new file mode 100644 index 00000000..a6852c0f --- /dev/null +++ b/docs/community.md @@ -0,0 +1,139 @@ +# Continuwuity Community Guidelines + +Welcome to the Continuwuity commuwunity! We're excited to have you here. Continuwuity is a +continuation of the conduwuit homeserver, which in turn is a hard-fork of the Conduit homeserver, +aimed at making Matrix more accessible and inclusive for everyone. + +This space is dedicated to fostering a positive, supportive, and welcoming environment for everyone. +These guidelines apply to all Continuwuity spaces, including our Matrix rooms and any other +community channels that reference them. We've written these guidelines to help us all create an +environment where everyone feels safe and respected. + +For code and contribution guidelines, please refer to the +[Contributor's Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). +Below are additional guidelines specific to the Continuwuity community. + +## Our Values and Expected Behaviors + +We strive to create a community based on mutual respect, collaboration, and inclusivity. We expect +all members to: + +1. **Be Respectful and Inclusive**: Treat everyone with respect. We're committed to a community + where everyone feels safe, regardless of background, identity, or experience. Discrimination, + harassment, or hate speech won't be tolerated. Remember that each person experiences the world + differently; share your own perspective and be open to learning about others'. + +2. **Be Positive and Constructive**: Engage in discussions constructively and support each other. + If you feel angry or frustrated, take a break before participating. Approach disagreements with + the goal of understanding, not winning. Focus on the issue, not the person. + +3. **Communicate Clearly and Kindly**: Our community includes neurodivergent individuals and those + who may not appreciate sarcasm or subtlety. Communicate clearly and kindly. Avoid ambiguity and + ensure your messages can be easily understood by all. Avoid placing the burden of education on + marginalized groups; please make an effort to look into your questions before asking others for + detailed explanations. + +4. **Be Open to Improving Inclusivity**: Actively participate in making our community more inclusive. + Report behaviour that contradicts these guidelines (see Reporting and Enforcement below) and be + open to constructive feedback aimed at improving our community. Understand that discussing + negative experiences can be emotionally taxing; focus on the message, not the tone. + +5. **Commit to Our Values**: Building an inclusive community requires ongoing effort from everyone. + Recognise that addressing bias and discrimination is a continuous process that needs commitment + and action from all members. + +## Unacceptable Behaviors + +To ensure everyone feels safe and welcome, the following behaviors are considered unacceptable +within the Continuwuity community: + +* **Harassment and Discrimination**: Avoid offensive comments related to background, family status, + gender, gender identity or expression, marital status, sex, sexual orientation, native language, + age, ability, race and/or ethnicity, caste, national origin, socioeconomic status, religion, + geographic location, or any other dimension of diversity. Don't deliberately misgender someone or + question the legitimacy of their gender identity. + +* **Violence and Threats**: Do not engage in any form of violence or threats, including inciting + violence towards anyone or encouraging self-harm. Posting or threatening to post someone else's + personally identifying information ("doxxing") is also forbidden. + +* **Personal Attacks**: Disagreements happen, but they should never turn into personal attacks. + Don't insult, demean, or belittle others. + +* **Unwelcome Attention or Contact**: Avoid unwelcome sexual attention, inappropriate physical + contact (or simulation thereof), sexualized comments, jokes, or imagery. + +* **Disruption**: Do not engage in sustained disruption of discussions, events, or other + community activities. + +* **Bad Faith Actions**: Do not intentionally make false reports or otherwise abuse the reporting + process. + +This is not an exhaustive list. Any behaviour that makes others feel unsafe or unwelcome may be +subject to enforcement action. + +## Matrix Community + +These Community Guidelines apply to the entire +[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including: + +### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) + +This room is for support and discussions about Continuwuity. Ask questions, share insights, and help +each other out while adhering to these guidelines. + +We ask that this room remain focused on the Continuwuity software specifically: the team are +typically happy to engage in conversations about related subjects in the off-topic room. + +### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) + +For off-topic community conversations about any subject. While this room allows for a wide range of +topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid +divisive or stressful subjects like specific country/world politics unless handled with exceptional +care and respect for diverse viewpoints. + +General topics, such as world events, are welcome as long as they follow the guidelines. If a member +of the team asks for the conversation to end, please respect their decision. + +### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) + +This room is dedicated to discussing active development of Continuwuity, including ongoing issues or +code development. Collaboration here must follow these guidelines, and please consider raising +[an issue](https://forgejo.ellis.link/continuwuation/continuwuity/issues) on the repository to help +track progress. + +## Reporting and Enforcement + +We take these Community Guidelines seriously to protect our community members. If you witness or +experience unacceptable behaviour, or have any other concerns, please report it. + +**How to Report:** + +* **Alert Moderators in the Room:** If you feel comfortable doing so, you can address the issue + publicly in the relevant room by mentioning the moderation bot, `@rock:continuwuity.org`, which + will immediately alert all available moderators. +* **Direct Message:** If you're not comfortable raising the issue publicly, please send a direct + message (DM) to one of the room moderators. + +Reports will be handled with discretion. We will investigate promptly and thoroughly. + +**Enforcement Actions:** + +Anyone asked to stop unacceptable behaviour is expected to comply immediately. Failure to do so, or +engaging in prohibited behaviour, may result in enforcement action. Moderators may take actions they +deem appropriate, including but not limited to: + +1. **Warning**: A direct message or public warning identifying the violation and requesting + corrective action. +2. **Temporary Mute**: Temporary restriction from participating in discussions for a specified + period. +3. **Kick or Ban**: Removal from a room (kick) or the entire community space (ban). Egregious or + repeated violations may result in an immediate ban. Bans are typically permanent and reviewed + only in exceptional circumstances. + +Retaliation against those who report concerns in good faith will not be tolerated and will be +subject to the same enforcement actions. + +Together, let's build and maintain a community where everyone feels valued, safe, and respected. + +— The Continuwuity Moderation Team diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md deleted file mode 100644 index 0fce2fe3..00000000 --- a/docs/conduwuit_coc.md +++ /dev/null @@ -1,93 +0,0 @@ -# conduwuit Community Code of Conduct - -Welcome to the conduwuit community! We’re excited to have you here. conduwuit is -a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible -and inclusive for everyone. - -This space is dedicated to fostering a positive, supportive, and inclusive -environment for everyone. This Code of Conduct applies to all conduwuit spaces, -including any further community rooms that reference this CoC. Here are our -guidelines to help maintain the welcoming atmosphere that sets conduwuit apart. - -For the general foundational rules, please refer to the [Contributor's -Covenant](https://github.com/girlbossceo/conduwuit/blob/main/CODE_OF_CONDUCT.md). -Below are additional guidelines specific to the conduwuit community. - -## Our Values and Guidelines - -1. **Respect and Inclusivity**: We are committed to maintaining a community - where everyone feels safe and respected. Discrimination, harassment, or hate -speech of any kind will not be tolerated. Recognise that each community member -experiences the world differently based on their past experiences, background, -and identity. Share your own experiences and be open to learning about others' -diverse perspectives. - -2. **Positivity and Constructiveness**: Engage in constructive discussions and - support each other. If you feel angry, negative, or aggressive, take a break -until you can participate in a positive and constructive manner. Process intense -feelings with a friend or in a private setting before engaging in community -conversations to help maintain a supportive and focused environment. - -3. **Clarity and Understanding**: Our community includes neurodivergent - individuals and those who may not appreciate sarcasm or subtlety. Communicate -clearly and kindly, avoiding sarcasm and ensuring your messages are easily -understood by all. Additionally, avoid putting the burden of education on -marginalized groups by doing your own research before asking for explanations. - -4. **Be Open to Inclusivity**: Actively engage in conversations about making our - community more inclusive. Report discriminatory behavior to the moderators -and be open to constructive feedback that aims to improve our community. -Understand that discussing discrimination and negative experiences can be -emotionally taxing, so focus on the message rather than critiquing the tone -used. - -5. **Commit to Inclusivity**: Building an inclusive community requires time, - energy, and resources. Recognise that addressing discrimination and bias is -an ongoing process that necessitates commitment and action from all community -members. - -## Matrix Community - -This Code of Conduct applies to the entire [conduwuit Matrix -Space](https://matrix.to/#/#conduwuit-space:puppygock.gay) and its rooms, -including: - -### [#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) - -This room is for support and discussions about conduwuit. Ask questions, share -insights, and help each other out. - -### [#conduwuit-offtopic:girlboss.ceo](https://matrix.to/#/#conduwuit-offtopic:girlboss.ceo) - -For off-topic community conversations about any subject. While this room allows -for a wide range of topics, the same CoC applies. Keep discussions respectful -and inclusive, and avoid divisive subjects like country/world politics. General -topics, such as world events, are welcome as long as they follow the CoC. - -### [#conduwuit-dev:puppygock.gay](https://matrix.to/#/#conduwuit-dev:puppygock.gay) - -This room is dedicated to discussing active development of conduwuit. Posting -requires an elevated power level, which can be requested in one of the other -rooms. Use this space to collaborate and innovate. - -## Enforcement - -We have a zero-tolerance policy for violations of this Code of Conduct. If -someone’s behavior makes you uncomfortable, please report it to the moderators. -Actions we may take include: - -1. **Warning**: A warning given directly in the room or via a private message - from the moderators, identifying the violation and requesting corrective -action. -2. **Temporary Mute**: Temporary restriction from participating in discussions - for a specified period to allow for reflection and cooling off. -3. **Kick or Ban**: Egregious behavior may result in an immediate kick or ban to - protect other community members. Bans are considered permanent and will only -be reversed in exceptional circumstances after proven good behavior. - -Please highlight issues directly in rooms when possible, but if you don't feel -comfortable doing that, then please send a DM to one of the moderators directly. - -Together, let’s build a community where everyone feels valued and respected. - -— The conduwuit Moderation Team diff --git a/docs/configuration.md b/docs/configuration.md index 0c670210..778e5c56 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,10 +1,10 @@ # Configuration -This chapter describes various ways to configure conduwuit. +This chapter describes various ways to configure Continuwuity. ## Basics -conduwuit uses a config file for the majority of the settings, but also supports +Continuwuity uses a config file for the majority of the settings, but also supports setting individual config options via commandline. Please refer to the [example config @@ -12,13 +12,13 @@ file](./configuration/examples.md#example-configuration) for all of those settings. The config file to use can be specified on the commandline when running -conduwuit by specifying the `-c`, `--config` flag. Alternatively, you can use +Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use the environment variable `CONDUWUIT_CONFIG` to specify the config file to used. Conduit's environment variables are supported for backwards compatibility. ## Option commandline flag -conduwuit supports setting individual config options in TOML format from the +Continuwuity supports setting individual config options in TOML format from the `-O` / `--option` flag. For example, you can set your server name via `-O server_name=\"example.com\"`. @@ -33,7 +33,7 @@ string. This does not apply to options that take booleans or numbers: ## Execute commandline flag -conduwuit supports running admin commands on startup using the commandline +Continuwuity supports running admin commands on startup using the commandline argument `--execute`. The most notable use for this is to create an admin user on first startup. diff --git a/docs/deploying.md b/docs/deploying.md index 86277aba..be1bf736 100644 --- a/docs/deploying.md +++ b/docs/deploying.md @@ -1,3 +1,3 @@ # Deploying -This chapter describes various ways to deploy conduwuit. +This chapter describes various ways to deploy Continuwuity. diff --git a/docs/deploying/arch-linux.md b/docs/deploying/arch-linux.md index 7436e5bf..a14201e3 100644 --- a/docs/deploying/arch-linux.md +++ b/docs/deploying/arch-linux.md @@ -1,15 +1,3 @@ -# conduwuit for Arch Linux +# Continuwuity for Arch Linux -Currently conduwuit is only on the Arch User Repository (AUR). - -The conduwuit AUR packages are community maintained and are not maintained by -conduwuit development team, but the AUR package maintainers are in the Matrix -room. Please attempt to verify your AUR package's PKGBUILD file looks fine -before asking for support. - -- [conduwuit](https://aur.archlinux.org/packages/conduwuit) - latest tagged -conduwuit -- [conduwuit-git](https://aur.archlinux.org/packages/conduwuit-git) - latest git -conduwuit from `main` branch -- [conduwuit-bin](https://aur.archlinux.org/packages/conduwuit-bin) - latest -tagged conduwuit static binary +Continuwuity does not have any Arch Linux packages at this time. diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 366f6999..04142e0c 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -1,13 +1,14 @@ -# conduwuit - Behind Traefik Reverse Proxy +# Continuwuity - Behind Traefik Reverse Proxy services: homeserver: ### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml networks: - proxy @@ -35,14 +36,14 @@ services: server=your.server.name.example:443 } #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it + ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it nofile: soft: 1048567 hard: 1048567 ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml index a343eeee..ec82fac3 100644 --- a/docs/deploying/docker-compose.override.yml +++ b/docs/deploying/docker-compose.override.yml @@ -1,4 +1,4 @@ -# conduwuit - Traefik Reverse Proxy Labels +# Continuwuity - Traefik Reverse Proxy Labels services: homeserver: @@ -6,7 +6,7 @@ services: - "traefik.enable=true" - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which conduwuit is hosted + - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which Continuwuity is hosted - "traefik.http.routers.to-conduwuit.tls=true" - "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt" - "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker" @@ -16,7 +16,7 @@ services: - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" - # If you want to have your account on , but host conduwuit on a subdomain, + # If you want to have your account on , but host Continuwuity on a subdomain, # you can let it only handle the well known file on that domain instead #- "traefik.http.routers.to-matrix-wellknown.rule=Host(``) && PathPrefix(`/.well-known/matrix`)" #- "traefik.http.routers.to-matrix-wellknown.tls=true" diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 431cf2d4..9ee98428 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -1,6 +1,6 @@ services: caddy: - # This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit! + # This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity! # For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy image: lucaslorentz/caddy-docker-proxy:ci-alpine ports: @@ -20,12 +20,13 @@ services: caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}} homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml environment: CONDUWUIT_SERVER_NAME: example.com # EDIT THIS diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 89118c74..9083b796 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -1,13 +1,14 @@ -# conduwuit - Behind Traefik Reverse Proxy +# Continuwuity - Behind Traefik Reverse Proxy services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image, + ### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml networks: - proxy @@ -21,7 +22,7 @@ services: CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit #CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above - ### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too + ### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUWUIT_LOG: info # default is: "warn,state_res=warn" # CONDUWUIT_ALLOW_ENCRYPTION: 'true' @@ -43,14 +44,14 @@ services: server=your.server.name.example:443 } #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it + ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it nofile: soft: 1048567 hard: 1048567 ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index ca33b5f5..1a3ab811 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -1,10 +1,10 @@ -# conduwuit +# Continuwuity services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped ports: - 8448:6167 @@ -28,7 +28,7 @@ services: # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index bdbfb59c..08a0dc4f 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -1,31 +1,20 @@ -# conduwuit for Docker +# Continuwuity for Docker ## Docker -To run conduwuit with Docker you can either build the image yourself or pull it +To run Continuwuity with Docker you can either build the image yourself or pull it from a registry. ### Use a registry -OCI images for conduwuit are available in the registries listed below. +OCI images for Continuwuity are available in the registries listed below. -| Registry | Image | Size | Notes | -| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. | +| Registry | Image | Notes | +| --------------- | --------------------------------------------------------------- | -----------------------| +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. | +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. | -[dh]: https://hub.docker.com/r/girlbossceo/conduwuit -[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit -[gl]: https://gitlab.com/conduwuit/conduwuit/container_registry/6369729 -[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest -[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main - -OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a -commit hash/revision or a tagged release: +[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity Use @@ -52,11 +41,11 @@ or you can use [docker compose](#docker-compose). The `-d` flag lets the container run in detached mode. You may supply an optional `conduwuit.toml` config file, the example config can be found [here](../configuration/examples.md). You can pass in different env vars to -change config values on the fly. You can even configure conduwuit completely by +change config values on the fly. You can even configure Continuwuity completely by using env vars. For an overview of possible values, please take a look at the [`docker-compose.yml`](docker-compose.yml) file. -If you just want to test conduwuit for a short time, you can use the `--rm` +If you just want to test Continuwuity for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. @@ -91,32 +80,32 @@ docker network create caddy After that, you can rename it so it matches `docker-compose.yml` and spin up the containers! -Additional info about deploying conduwuit can be found [here](generic.md). +Additional info about deploying Continuwuity can be found [here](generic.md). ### Build -Official conduwuit images are built using Nix's -[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are -repeatable and reproducible by anyone, keeps the images lightweight, and can be -built offline. +Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently. -This also ensures portability of our images because `buildLayeredImage` builds -OCI images, not Docker images, and works with other container software. +The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd. -The OCI images are OS-less with only a very minimal environment of the `tini` -init system, CA certificates, and the conduwuit binary. This does mean there is -not a shell, but in theory you can get a shell by adding the necessary layers -to the layered image. However it's very unlikely you will need a shell for any -real troubleshooting. +The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition. -The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def]. +To build an image locally using Docker Buildx, you can typically run a command like: -To build an OCI image using Nix, the following outputs can be built: -- `nix build -L .#oci-image` (default features, x86_64 glibc) -- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl) -- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl) +```bash +# Build for the current platform and load into the local Docker daemon +docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile . + +# Example: Build for specific platforms and push to a registry. +# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push + +# Example: Build binary optimized for the current CPU +# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile . +``` + +Refer to the Docker Buildx documentation for more advanced build options. + +[dockerfile-path]: ../../docker/Dockerfile ### Run @@ -138,10 +127,10 @@ web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy -to deploy and use conduwuit, with a little caveat. If you already took a look at +to deploy and use Continuwuity, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and loadbalancer and is not able to -serve any kind of content, but for conduwuit to federate, we need to either +serve any kind of content, but for Continuwuity to federate, we need to either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. @@ -153,4 +142,3 @@ those two files. See the [TURN](../turn.md) page. [nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage -[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix diff --git a/docs/deploying/freebsd.md b/docs/deploying/freebsd.md index 65b40204..3764ffa8 100644 --- a/docs/deploying/freebsd.md +++ b/docs/deploying/freebsd.md @@ -1,5 +1,5 @@ -# conduwuit for FreeBSD +# Continuwuity for FreeBSD -conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB. +Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB. -Contributions for getting conduwuit packaged are welcome. +Contributions for getting Continuwuity packaged are welcome. diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index cc50544e..46b9b439 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -2,11 +2,11 @@ > ### Getting help > -> If you run into any problems while setting up conduwuit, ask us in -> `#conduwuit:puppygock.gay` or [open an issue on -> GitHub](https://github.com/girlbossceo/conduwuit/issues/new). +> If you run into any problems while setting up Continuwuity, ask us in +> `#continuwuity:continuwuity.org` or [open an issue on +> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). -## Installing conduwuit +## Installing Continuwuity ### Static prebuilt binary @@ -14,12 +14,10 @@ You may simply download the binary that fits your machine architecture (x86_64 or aarch64). Run `uname -m` to see what you need. Prebuilt fully static musl binaries can be downloaded from the latest tagged -release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or +release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or `main` CI branch workflow artifact output. These also include Debian/Ubuntu packages. -Binaries are also available on my website directly at: - These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit hash/revision, and `releases` are tagged releases. Sort by descending last modified for the latest. @@ -37,7 +35,7 @@ for performance. ### Compiling Alternatively, you may compile the binary yourself. We recommend using -Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most +Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most guaranteed reproducibiltiy and easiest to get a build environment and output going. This also allows easy cross-compilation. @@ -51,57 +49,35 @@ If wanting to build using standard Rust toolchains, make sure you install: - `liburing-dev` on the compiling machine, and `liburing` on the target host - LLVM and libclang for RocksDB -You can build conduwuit using `cargo build --release --all-features` +You can build Continuwuity using `cargo build --release --all-features` -## Migrating from Conduit +## Adding a Continuwuity user -As mentioned in the README, there is little to no steps needed to migrate -from Conduit. As long as you are using the RocksDB database backend, just -replace the binary / container image / etc. - -**WARNING**: As of conduwuit 0.5.0, all database and backwards compatibility -with Conduit is no longer supported. We only support migrating *from* Conduit, -not back to Conduit like before. If you are truly finding yourself wanting to -migrate back to Conduit, we would appreciate all your feedback and if we can -assist with any issues or concerns. - -**Note**: If you are relying on Conduit's "automatic delegation" feature, -this will **NOT** work on conduwuit and you must configure delegation manually. -This is not a mistake and no support for this feature will be added. - -If you are using SQLite, you **MUST** migrate to RocksDB. You can use this -tool to migrate from SQLite to RocksDB: - -See the `[global.well_known]` config section, or configure your web server -appropriately to send the delegation responses. - -## Adding a conduwuit user - -While conduwuit can run as any user it is better to use dedicated users for +While Continuwuity can run as any user it is better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In Debian, you can use this command to create a conduwuit user: +In Debian, you can use this command to create a Continuwuity user: ```bash -sudo adduser --system conduwuit --group --disabled-login --no-create-home +sudo adduser --system continuwuity --group --disabled-login --no-create-home ``` For distros without `adduser` (or where it's a symlink to `useradd`): ```bash -sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit +sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity ``` ## Forwarding ports in the firewall or the router Matrix's default federation port is port 8448, and clients must be using port 443. If you would like to use only port 443, or a different port, you will need to setup -delegation. conduwuit has config options for doing delegation, or you can configure +delegation. Continuwuity has config options for doing delegation, or you can configure your reverse proxy to manually serve the necessary JSON files to do delegation (see the `[global.well_known]` config section). -If conduwuit runs behind a router or in a container and has a different public +If Continuwuity runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. @@ -116,9 +92,9 @@ on the network level, consider something like NextDNS or Pi-Hole. ## Setting up a systemd service -Two example systemd units for conduwuit can be found +Two example systemd units for Continuwuity can be found [on the configuration page](../configuration/examples.md#debian-systemd-unit-file). -You may need to change the `ExecStart=` path to where you placed the conduwuit +You may need to change the `ExecStart=` path to where you placed the Continuwuity binary if it is not `/usr/bin/conduwuit`. On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros @@ -136,9 +112,9 @@ and entering the following: ReadWritePaths=/path/to/custom/database/path ``` -## Creating the conduwuit configuration file +## Creating the Continuwuity configuration file -Now we need to create the conduwuit's config file in +Now we need to create the Continuwuity's config file in `/etc/conduwuit/conduwuit.toml`. The example config can be found at [conduwuit-example.toml](../configuration/examples.md). @@ -149,7 +125,7 @@ RocksDB is the only supported database backend. ## Setting the correct file permissions -If you are using a dedicated user for conduwuit, you will need to allow it to +If you are using a dedicated user for Continuwuity, you will need to allow it to read the config. To do that you can run this: ```bash @@ -161,41 +137,48 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/conduwuit/ -sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/ +sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/ sudo chmod 700 /var/lib/conduwuit/ ``` ## Setting up the Reverse Proxy -Refer to the documentation or various guides online of your chosen reverse proxy -software. There are many examples of basic Apache/Nginx reverse proxy setups -out there. +We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults. +For other software, please refer to their respective documentation or online guides. -A [Caddy](https://caddyserver.com/) example will be provided as this -is the recommended reverse proxy for new users and is very trivial to use -(handles TLS, reverse proxy headers, etc transparently with proper defaults). +### Caddy -Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization -header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. +After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile` +and enter this (substitute for your server name). -If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent this (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). +```caddyfile +your.server.name, your.server.name:8448 { + # TCP reverse_proxy + reverse_proxy 127.0.0.1:6167 + # UNIX socket + #reverse_proxy unix//run/conduwuit/conduwuit.sock +} +``` -If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: -- `proxy_pass http://127.0.0.1:6167$request_uri;` -- `proxy_pass http://127.0.0.1:6167;` +That's it! Just start and enable the service and you're set. -Nginx users need to increase `client_max_body_size` (default is 1M) to match -`max_request_size` defined in conduwuit.toml. +```bash +sudo systemctl enable --now caddy +``` + +### Other Reverse Proxies + +As we would prefer our users to use Caddy, we will not provide configuration files for other proxys. You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs -- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and +- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and `/server_version` You can optionally reverse proxy the following individual routes: - `/.well-known/matrix/client` and `/.well-known/matrix/server` if using -conduwuit to perform delegation (see the `[global.well_known]` config section) -- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin +Continuwuity to perform delegation (see the `[global.well_known]` config section) +- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin contact and support page (formerly known as MSC1929) - `/` if you would like to see `hewwo from conduwuit woof!` at the root @@ -208,29 +191,23 @@ Examples of delegation: - - -### Caddy +For Apache and Nginx there are many examples available online. -Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for -your server name). +Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization +header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. -```caddyfile -your.server.name, your.server.name:8448 { - # TCP reverse_proxy - 127.0.0.1:6167 - # UNIX socket - #reverse_proxy unix//run/conduwuit/conduwuit.sock -} -``` +If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). -That's it! Just start and enable the service and you're set. +If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so: +- `proxy_pass http://127.0.0.1:6167$request_uri;` +- `proxy_pass http://127.0.0.1:6167;` -```bash -sudo systemctl enable --now caddy -``` +Nginx users need to increase `client_max_body_size` (default is 1M) to match +`max_request_size` defined in conduwuit.toml. ## You're done -Now you can start conduwuit with: +Now you can start Continuwuity with: ```bash sudo systemctl start conduwuit diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md index d7721722..0cbfbbc0 100644 --- a/docs/deploying/kubernetes.md +++ b/docs/deploying/kubernetes.md @@ -1,8 +1,9 @@ -# conduwuit for Kubernetes +# Continuwuity for Kubernetes -conduwuit doesn't support horizontal scalability or distributed loading +Continuwuity doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run conduwuit on Kubernetes: -Should changes need to be made, please reach out to the maintainer in our -Matrix room as this is not maintained/controlled by the conduwuit maintainers. +This should be compatible with continuwuity, but you will need to change the image reference. + +Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers. diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index 3c5b0e69..cf2c09e4 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -1,66 +1,33 @@ -# conduwuit for NixOS +# Continuwuity for NixOS -conduwuit can be acquired by Nix (or [Lix][lix]) from various places: +Continuwuity can be acquired by Nix (or [Lix][lix]) from various places: * The `flake.nix` at the root of the repo * The `default.nix` at the root of the repo -* From conduwuit's binary cache - -A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit) - -### Binary cache - -A binary cache for conduwuit that the CI/CD publishes to is available at the -following places (both are the same just different names): - -``` -https://attic.kennel.juneis.dog/conduit -conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= - -https://attic.kennel.juneis.dog/conduwuit -conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= -``` - -The binary caches were recreated some months ago due to attic issues. The old public -keys were: - -``` -conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= -conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= -``` - -If needed, we have a binary cache on Cachix but it is only limited to 5GB: - -``` -https://conduwuit.cachix.org -conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= -``` - -If specifying a Git remote URL in your flake, you can use any remotes that -are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit` +* From Continuwuity's binary cache ### NixOS module The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure -conduwuit. +Continuwuity. ### Conduit NixOS Config Module and SQLite Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend. -Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB. +Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB. Make sure that you are using the RocksDB backend before migrating! There is a [tool to migrate a Conduit SQLite database to RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/). -If you want to run the latest code, you should get conduwuit from the `flake.nix` +If you want to run the latest code, you should get Continuwuity from the `flake.nix` or `default.nix` and set [`services.matrix-conduit.package`][package] -appropriately to use conduwuit instead of Conduit. +appropriately to use Continuwuity instead of Conduit. ### UNIX sockets -Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module +Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX socket option does not exist in Conduit, and the module forcibly sets the `address` and `port` config options. @@ -84,13 +51,13 @@ disallows the namespace from accessing or creating UNIX sockets and has to be en systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ]; ``` -Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and +Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and published by the community, would be appreciated. ### jemalloc and hardened profile -conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] -due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or +Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] +due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or disable jemalloc like so: ```nix diff --git a/docs/development.md b/docs/development.md index fa7519c0..1e344f41 100644 --- a/docs/development.md +++ b/docs/development.md @@ -4,9 +4,9 @@ Information about developing the project. If you are only interested in using it, you can safely ignore this page. If you plan on contributing, see the [contributor's guide](./contributing.md). -## conduwuit project layout +## Continuwuity project layout -conduwuit uses a collection of sub-crates, packages, or workspace members +Continuwuity uses a collection of sub-crates, packages, or workspace members that indicate what each general area of code is for. All of the workspace members are under `src/`. The workspace definition is at the top level / root `Cargo.toml`. @@ -14,11 +14,11 @@ members are under `src/`. The workspace definition is at the top level / root The crate names are generally self-explanatory: - `admin` is the admin room - `api` is the HTTP API, Matrix C-S and S-S endpoints, etc -- `core` is core conduwuit functionality like config loading, error definitions, +- `core` is core Continuwuity functionality like config loading, error definitions, global utilities, logging infrastructure, etc - `database` is RocksDB methods, helpers, RocksDB config, and general database definitions, utilities, or functions -- `macros` are conduwuit Rust [macros][macros] like general helper macros, logging +- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging and error handling macros, and [syn][syn] and [procedural macros][proc-macro] used for admin room commands and others - `main` is the "primary" sub-crate. This is where the `main()` function lives, @@ -35,7 +35,7 @@ if you truly find yourself needing to, we recommend reaching out to us in the Matrix room for discussions about it beforehand. The primary inspiration for this design was apart of hot reloadable development, -to support "conduwuit as a library" where specific parts can simply be swapped out. +to support "Continuwuity as a library" where specific parts can simply be swapped out. There is evidence Conduit wanted to go this route too as `axum` is technically an optional feature in Conduit, and can be compiled without the binary or axum library for handling inbound web requests; but it was never completed or worked. @@ -68,10 +68,10 @@ do this if Rust supported workspace-level features to begin with. ## List of forked dependencies -During conduwuit development, we have had to fork +During Continuwuity development, we have had to fork some dependencies to support our use-cases in some areas. This ranges from things said upstream project won't accept for any reason, faster-paced -development (unresponsive or slow upstream), conduwuit-specific usecases, or +development (unresponsive or slow upstream), Continuwuity-specific usecases, or lack of time to upstream some things. - [ruma/ruma][1]: - various performance @@ -84,7 +84,7 @@ builds seem to be broken on upstream, fixes some broken/suspicious code in places, additional safety measures, and support redzones for Valgrind - [zyansheep/rustyline-async][4]: - tab completion callback and -`CTRL+\` signal quit event for conduwuit console CLI +`CTRL+\` signal quit event for Continuwuity console CLI - [rust-rocksdb/rust-rocksdb][5]: - [`@zaidoon1`][8]'s fork has quicker updates, more up to date dependencies, etc. Our fork fixes musl build @@ -97,7 +97,7 @@ alongside other logging/metrics things ## Debugging with `tokio-console` [`tokio-console`][7] can be a useful tool for debugging and profiling. To make a -`tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature, +`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature, disable the default `release_max_log_level` feature, and set the `--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might look like this: @@ -109,7 +109,7 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \ --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console ``` -You will also need to enable the `tokio_console` config option in conduwuit when +You will also need to enable the `tokio_console` config option in Continuwuity when starting it. This was due to tokio-console causing gradual memory leak/usage if left enabled. diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index 018eb4b3..ecfb6396 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -1,8 +1,11 @@ # Hot Reloading ("Live" Development) +Note that hot reloading has not been refactored in quite a while and is not +guaranteed to work at this time. + ### Summary -When developing in debug-builds with the nightly toolchain, conduwuit is modular +When developing in debug-builds with the nightly toolchain, Continuwuity is modular using dynamic libraries and various parts of the application are hot-reloadable while the server is running: http api handlers, admin commands, services, database, etc. These are all split up into individual workspace crates as seen @@ -39,7 +42,7 @@ library, macOS, and likely other host architectures are not supported (if other architectures work, feel free to let us know and/or make a PR updating this). This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you happen to have linker issues it's recommended to try using `mold` or `gold` -linkers, and please let us know in the [conduwuit Matrix room][7] the linker +linkers, and please let us know in the [Continuwuity Matrix room][7] the linker error and what linker solved this issue so we can figure out a solution. Ideally there should be minimal friction to using this, and in the future a build script (`build.rs`) may be suitable to making this easier to use if the capabilities @@ -49,13 +52,13 @@ allow us. As of 19 May 2024, the instructions for using this are: -0. Have patience. Don't hesitate to join the [conduwuit Matrix room][7] to +0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to receive help using this. As indicated by the various rustflags used and some of the interesting issues linked at the bottom, this is definitely not something the Rust ecosystem or toolchain is used to doing. 1. Install the nightly toolchain using rustup. You may need to use `rustup - override set nightly` in your local conduwuit directory, or use `cargo + override set nightly` in your local Continuwuity directory, or use `cargo +nightly` for all actions. 2. Uncomment `cargo-features` at the top level / root Cargo.toml @@ -82,14 +85,14 @@ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown Cargo should only rebuild what was changed / what's necessary, so it should not be rebuilding all the crates. -9. In your conduwuit server terminal, hit/send `CTRL+C` signal. This will tell - conduwuit to find which libraries need to be reloaded, and reloads them as +9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell + Continuwuity to find which libraries need to be reloaded, and reloads them as necessary. 10. If there were no errors, it will tell you it successfully reloaded `#` modules, and your changes should now be visible. Repeat 7 - 9 as needed. -To shutdown conduwuit in this setup, hit/send `CTRL+\`. Normal builds still +To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still shutdown with `CTRL+C` as usual. Steps 1 - 5 are the initial first-time steps for using this. To remove the hot @@ -98,7 +101,7 @@ reload setup, revert/comment all the Cargo.toml changes. As mentioned in the requirements section, if you happen to have some linker issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the `rustflags` definitions in the top level Cargo.toml, and please let us know in -the [conduwuit Matrix room][7] the problem. mold can be installed typically +the [Continuwuity Matrix room][7] the problem. mold can be installed typically through your distro, and gold is provided by the binutils package. It's possible a helper script can be made to do all of this, or most preferably @@ -133,7 +136,7 @@ acyclic graph. The primary rule is simple and illustrated in the figure below: **no crate is allowed to call a function or use a variable from a crate below it.** -![conduwuit's dynamic library setup diagram - created by Jason +![Continuwuity's dynamic library setup diagram - created by Jason Volk](assets/libraries.png) When a symbol is referenced between crates they become bound: **crates cannot be @@ -144,7 +147,7 @@ by using an `RTLD_LOCAL` binding for just one link between the main executable and the first crate, freeing the executable from all modules as no global binding ever occurs between them. -![conduwuit's reload and load order diagram - created by Jason +![Continuwuity's reload and load order diagram - created by Jason Volk](assets/reload_order.png) Proper resource management is essential for reliable reloading to occur. This is @@ -193,5 +196,5 @@ The initial implementation PR is available [here][1]. [4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049 [5]: https://github.com/rust-lang/cargo/issues/12746 [6]: https://crates.io/crates/hot-lib-reloader/ -[7]: https://matrix.to/#/#conduwuit:puppygock.gay +[7]: https://matrix.to/#/#continuwuity:continuwuity.org [8]: https://crates.io/crates/libloading diff --git a/docs/development/testing.md b/docs/development/testing.md index 2d421767..a577698a 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -5,12 +5,11 @@ Have a look at [Complement's repository][complement] for an explanation of what it is. -To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv -installed and set up, you can: +To test against Complement, with Nix (or [Lix](https://lix.systems) and +[direnv installed and set up][direnv] (run `direnv allow` after setting up the hook), you can: -* Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl -./path/to/results.jsonl` to build a Complement image, run the tests, and output -the logs and results to the specified paths. This will also output the OCI image +* Run `./bin/complement "$COMPLEMENT_SRC"` to build a Complement image, run +the tests, and output the logs and results to the specified paths. This will also output the OCI image at `result` * Run `nix build .#complement` from the root of the repository to just build a Complement OCI image outputted to `result` (it's a `.tar.gz` file) @@ -18,5 +17,15 @@ Complement OCI image outputted to `result` (it's a `.tar.gz` file) output from the commit/revision you want to test (e.g. from main) [here][ci-workflows] +If you want to use your own prebuilt OCI image (such as from our CI) without needing +Nix installed, put the image at `complement_oci_image.tar.gz` in the root of the repo +and run the script. + +If you're on macOS and need to build an image, run `nix build .#linux-complement`. + +We have a Complement fork as some tests have needed to be fixed. This can be found +at: + [ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo [complement]: https://github.com/matrix-org/complement +[direnv]: https://direnv.net/docs/hook.html diff --git a/docs/differences.md b/docs/differences.md deleted file mode 100644 index 18ea7a1f..00000000 --- a/docs/differences.md +++ /dev/null @@ -1,379 +0,0 @@ -#### **Note: This list may not up to date. There are rapidly more and more -improvements, fixes, changes, etc being made that it is becoming more difficult -to maintain this list. I recommend that you give conduwuit a try and see the -differences for yourself. If you have any concerns, feel free to join the -conduwuit Matrix room and ask any pre-usage questions.** - -### list of features, bug fixes, etc that conduwuit does that Conduit does not - -Outgoing typing indicators, outgoing read receipts, **and** outgoing presence! - -## Performance - -- Concurrency support for individual homeserver key fetching for faster remote -room joins and room joins that will error less frequently -- Send `Cache-Control` response header with `immutable` and 1 year cache length -for all media requests (download and thumbnail) to instruct clients to cache -media, and reduce server load from media requests that could be otherwise cached -- Add feature flags and config options to enable/build with zstd, brotli, and/or -gzip HTTP body compression (response and request) -- Eliminate all usage of the thread-blocking `getaddrinfo(3)` call upon DNS -queries, significantly improving federation latency/ping and cache DNS results -(NXDOMAINs, successful queries, etc) using hickory-dns / hickory-resolver -- Enable HTTP/2 support on all requests -- Vastly improve RocksDB default settings to use new features that help with -performance significantly, uses settings tailored to SSDs, various ways to tweak -RocksDB, and a conduwuit setting to tell RocksDB to use settings that are -tailored to HDDs or slow spinning rust storage or buggy filesystems. -- Implement database flush and cleanup conduwuit operations when using RocksDB -- Implement RocksDB write buffer corking and coalescing in database write-heavy -areas -- Perform connection pooling and keepalives where necessary to significantly -improve federation performance and latency -- Various config options to tweak connection pooling, request timeouts, -connection timeouts, DNS timeouts and settings, etc with good defaults which -also help huge with performance via reusing connections and retrying where -needed -- Properly get and use the amount of parallelism / tokio workers -- Implement building conduwuit with jemalloc (which extends to the RocksDB -jemalloc feature for maximum gains) or hardened_malloc light variant, and -io_uring support, and produce CI builds with jemalloc and io_uring by default -for performance (Nix doesn't seem to build -[hardened_malloc-rs](https://github.com/girlbossceo/hardened_malloc-rs) -properly) -- Add support for caching DNS results with hickory-dns / hickory-resolver in -conduwuit (not a replacement for a proper resolver cache, but still far better -than nothing), also properly falls back on TCP for UDP errors or if a SRV -response is too large -- Add config option for using DNS over TCP, and config option for controlling -A/AAAA record lookup strategy (e.g. don't query AAAA records if you don't have -IPv6 connectivity) -- Overall significant database, Client-Server, and federation performance and -latency improvements (check out the ping room leaderboards if you don't believe -me :>) -- Add config options for RocksDB compression and bottommost compression, -including choosing the algorithm and compression level -- Use [loole](https://github.com/mahdi-shojaee/loole) MPSC channels instead of -tokio MPSC channels for huge performance boosts in sending channels (mainly -relevant for federation) and presence channels -- Use `tracing`/`log`'s `release_max_level_info` feature to improve performance, -build speeds, binary size, and CPU usage in release builds by avoid compiling -debug/trace log level macros that users will generally never use (can be -disabled with a build-time feature flag) -- Remove some unnecessary checks on EDU handling for incoming transactions, -effectively speeding them up -- Simplify, dedupe, etc huge chunks of the codebase, including some that were -unnecessary overhead, binary bloats, or preventing compiler/linker optimisations -- Implement zero-copy RocksDB database accessors, substantially improving -performance caused by unnecessary memory allocations - -## General Fixes/Features - -- Add legacy Element client hack fixing password changes and deactivations on -legacy Element Android/iOS due to usage of an unspecced `user` field for UIAA -- Raise and improve all the various request timeouts making some things like -room joins and client bugs error less or none at all than they should, and make -them all user configurable -- Add missing `reason` field to user ban events (`/ban`) -- Safer and cleaner shutdowns across incoming/outgoing requests (graceful -shutdown) and the database -- Stop sending `make_join` requests on room joins if 15 servers respond with -`M_UNSUPPORTED_ROOM_VERSION` or `M_INVALID_ROOM_VERSION` -- Stop sending `make_join` requests if 50 servers cannot provide `make_join` for -us -- Respect *most* client parameters for `/media/` requests (`allow_redirect` -still needs work) -- Return joined member count of rooms for push rules/conditions instead of a -hardcoded value of 10 -- Make `CONDUIT_CONFIG` optional, relevant for container users that configure -only by environment variables and no longer need to set `CONDUIT_CONFIG` to an -empty string. -- Allow HEAD and PATCH (MSC4138) HTTP requests in CORS for clients (despite not -being explicity mentioned in Matrix spec, HTTP spec says all HEAD requests need -to behave the same as GET requests, Synapse supports HEAD requests) -- Fix using conduwuit with flake-compat on NixOS -- Resolve and remove some "features" from upstream that result in concurrency -hazards, exponential backoff issues, or arbitrary performance limiters -- Find more servers for outbound federation `/hierarchy` requests instead of -just the room ID server name -- Support for suggesting servers to join through at -`/_matrix/client/v3/directory/room/{roomAlias}` -- Support for suggesting servers to join through us at -`/_matrix/federation/v1/query/directory` -- Misc edge-case search fixes (e.g. potentially missing some events) -- Misc `/sync` fixes (e.g. returning unnecessary data or incorrect/invalid -responses) -- Add `replaces_state` and `prev_sender` in `unsigned` for state event changes -which primarily makes Element's "See history" button on a state event functional -- Fix Conduit not allowing incoming federation requests for various world -readable rooms -- Fix Conduit not respecting the client-requested file name on media requests -- Prevent sending junk / non-membership events to `/send_join` and `/send_leave` -endpoints -- Only allow the requested membership type on `/send_join` and `/send_leave` -endpoints (e.g. don't allow leave memberships on join endpoints) -- Prevent state key impersonation on `/send_join` and `/send_leave` endpoints -- Validate `X-Matrix` origin and request body `"origin"` field on incoming -transactions -- Add `GET /_matrix/client/v1/register/m.login.registration_token/validity` -endpoint -- Explicitly define support for sliding sync at `/_matrix/client/versions` -(`org.matrix.msc3575`) -- Fix seeing empty status messages on user presences - -## Moderation - -- (Also see [Admin Room](#admin-room) for all the admin commands pertaining to -moderation, there's a lot!) -- Add support for room banning/blocking by ID using admin command -- Add support for serving `support` well-known from `[global.well_known]` -(MSC1929) (`/.well-known/matrix/support`) -- Config option to forbid publishing rooms to the room directory -(`lockdown_public_room_directory`) except for admins -- Admin commands to delete room aliases and unpublish rooms from our room -directory -- For all -[`/report`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3roomsroomidreporteventid) -requests: check if the reported event ID belongs to the reported room ID, raise -report reasoning character limit to 750, fix broken formatting, make a small -delayed random response per spec suggestion on privacy, and check if the sender -user is in the reported room. -- Support blocking servers from downloading remote media from, returning a 404 -- Don't allow `m.call.invite` events to be sent in public rooms (prevents -calling the entire room) -- On new public room creations, only allow moderators to send `m.call.invite`, -`org.matrix.msc3401.call`, and `org.matrix.msc3401.call.member` events to -prevent unprivileged users from calling the entire room -- Add support for a "global ACLs" feature (`forbidden_remote_server_names`) that -blocks inbound remote room invites, room joins by room ID on server name, room -joins by room alias on server name, incoming federated joins, and incoming -federated room directory requests. This is very helpful for blocking servers -that are purely toxic/bad and serve no value in allowing our users to suffer -from things like room invite spam or such. Please note that this is not a -substitute for room ACLs. -- Add support for a config option to forbid our local users from sending -federated room directory requests for -(`forbidden_remote_room_directory_server_names`). Similar to above, useful for -blocking servers that help prevent our users from wandering into bad areas of -Matrix via room directories of those malicious servers. -- Add config option for auto remediating/deactivating local non-admin users who -attempt to join bad/forbidden rooms (`auto_deactivate_banned_room_attempts`) -- Deactivating users will remove their profile picture, blurhash, display name, -and leave all rooms by default just like Synapse and for additional privacy -- Reject some EDUs from ACL'd users such as read receipts and typing indicators - -## Privacy/Security - -- Add config option for device name federation with a privacy-friendly default -(disabled) -- Add config option for requiring authentication to the `/publicRooms` endpoint -(room directory) with a default enabled for privacy -- Add config option for federating `/publicRooms` endpoint (room directory) to -other servers with a default disabled for privacy -- Uses proper `argon2` crate by RustCrypto instead of questionable `rust-argon2` -crate -- Generate passwords with 25 characters instead of 15 -- Config option `ip_range_denylist` to support refusing to send requests -(typically federation) to specific IP ranges, typically RFC 1918, non-routable, -testnet, etc addresses like Synapse for security (note: this is not a guaranteed -protection, and you should be using a firewall with zones if you want guaranteed -protection as doing this on the application level is prone to bypasses). -- Config option to block non-admin users from sending room invites or receiving -remote room invites. Admin users are still allowed. -- Config option to disable incoming and/or outgoing remote read receipts -- Config option to disable incoming and/or outgoing remote typing indicators -- Config option to disable incoming, outgoing, and/or local presence and for -timing out remote users -- Sanitise file names for the `Content-Disposition` header for all media -requests (thumbnails, downloads, uploads) -- Media repository on handling `Content-Disposition` and `Content-Type` is fully -spec compliant and secured -- Send secure default HTTP headers such as a strong restrictive CSP (see -MSC4149), deny iframes, disable `X-XSS-Protection`, disable interest cohort in -`Permission-Policy`, etc to mitigate any potential attack surface such as from -untrusted media - -## Administration/Logging - -- Commandline argument to specify the path to a config file instead of relying -on `CONDUIT_CONFIG` -- Revamped admin room infrastructure and commands -- Substantially clean up, improve, and fix logging (less noisy dead server -logging, registration attempts, more useful troubleshooting logging, proper -error propagation, etc) -- Configurable RocksDB logging (`LOG` files) with proper defaults (rotate, max -size, verbosity, etc) to stop LOG files from accumulating so much -- Explicit startup error if your configuration allows open registration without -a token or such like Synapse with a way to bypass it if needed -- Replace the lightning bolt emoji option with support for setting any arbitrary -text (e.g. another emoji) to suffix to all new user registrations, with a -conduwuit default of "🏳️‍⚧️" -- Implement config option to auto join rooms upon registration -- Warn on unknown config options specified -- Add `/_conduwuit/server_version` route to return the version of conduwuit -without relying on the federation API `/_matrix/federation/v1/version` -- Add `/_conduwuit/local_user_count` route to return the amount of registered -active local users on your homeserver *if federation is enabled* -- Add configurable RocksDB recovery modes to aid in recovering corrupted RocksDB -databases -- Support config options via `CONDUWUIT_` prefix and accessing non-global struct -config options with the `__` split (e.g. `CONDUWUIT_WELL_KNOWN__SERVER`) -- Add support for listening on multiple TCP ports and multiple addresses -- **Opt-in** Sentry.io telemetry and metrics, mainly used for crash reporting -- Log the client IP on various requests such as registrations, banned room join -attempts, logins, deactivations, federation transactions, etc -- Fix Conduit dropping some remote server federation response errors - -## Maintenance/Stability - -- GitLab CI ported to GitHub Actions -- Add support for the Matrix spec compliance test suite -[Complement](https://github.com/matrix-org/complement/) via the Nix flake and -various other fixes for it -- Implement running and diff'ing Complement results in CI and error if any -mismatch occurs to prevent large cases of conduwuit regressions -- Repo is (officially) mirrored to GitHub, GitLab, git.gay, git.girlcock.ceo, -sourcehut, and Codeberg (see README.md for their links) -- Docker container images published to GitLab Container Registry, GitHub -Container Registry, and Dockerhub -- Extensively revamp the example config to be extremely helpful and useful to -both new users and power users -- Fixed every single clippy (default lints) and rustc warnings, including some -that were performance related or potential safety issues / unsoundness -- Add a **lot** of other clippy and rustc lints and a rustfmt.toml file -- Repo uses [Renovate](https://docs.renovatebot.com/) and keeps ALL -dependencies as up to date as possible -- Purge unmaintained/irrelevant/broken database backends (heed, sled, persy) and -other unnecessary code or overhead -- webp support for images -- Add cargo audit support to CI -- Add documentation lints via lychee and markdownlint-cli to CI -- CI tests for all sorts of feature matrixes (jemalloc, non-defaullt, all -features, etc) -- Add static and dynamic linking smoke tests in CI to prevent any potential -linking regressions for Complement, static binaries, Nix devshells, etc -- Add timestamp by commit date when building OCI images for keeping image build -reproducibility and still have a meaningful "last modified date" for OCI image -- Add timestamp by commit date via `SOURCE_DATE_EPOCH` for Debian packages -- Startup check if conduwuit running in a container and is listening on -127.0.0.1 (generally containers are using NAT networking and 0.0.0.0 is the -intended listening address) -- Add a panic catcher layer to return panic messages in HTTP responses if a -panic occurs -- Add full compatibility support for SHA256 media file names instead of base64 -file names to overcome filesystem file name length limitations (OS error file -name too long) while still retaining upstream database compatibility -- Remove SQLite support due to being very poor performance, difficult to -maintain against RocksDB, and is a blocker to significantly improved database -code - -## Admin Room - -- Add support for a console CLI interface that can issue admin commands and -output them in your terminal -- Add support for an admin-user-only commandline admin room interface that can -be issued in any room with the `\\!admin` or `\!admin` prefix and returns the -response as yourself in the same room -- Add admin commands for uptime, server startup, server shutdown, and server -restart -- Fix admin room handler to not panic/crash if the admin room command response -fails (e.g. too large message) -- Add command to dynamically change conduwuit's tracing log level filter on the -fly -- Add admin command to fetch a server's `/.well-known/matrix/support` file -- Add debug admin command to force update user device lists (could potentially -resolve some E2EE flukes) -- Implement **RocksDB online backups**, listing RocksDB backups, and listing -database file counts all via admin commands -- Add various database visibility commands such as being able to query the -getters and iterators used in conduwuit, a very helpful online debugging utility -- Forbid the admin room from being made public or world readable history -- Add `!admin` as a way to call the admin bot -- Extend clear cache admin command to support clearing more caches such as DNS -and TLS name overrides -- Admin debug command to send a federation request/ping to a server's -`/_matrix/federation/v1/version` endpoint and measures the latency it took -- Add admin command to bulk delete media via a codeblock list of MXC URLs. -- Add admin command to delete both the thumbnail and media MXC URLs from an -event ID (e.g. from an abuse report) -- Add admin command to list all the rooms a local user is joined in -- Add admin command to list joined members in a room -- Add admin command to view the room topic of a room -- Add admin command to delete all remote media in the past X minutes as a form -of deleting media that you don't want on your server that a remote user posted -in a room, a `--force` flag to ignore errors, and support for reading `last -modified time` instead of `creation time` for filesystems that don't support -file created metadata -- Add admin command to return a room's full/complete state -- Admin debug command to fetch a PDU from a remote server and inserts it into -our database/timeline as backfill -- Add admin command to delete media via a specific MXC. This deletes the MXC -from our database, and the file locally. -- Add admin commands for banning (blocking) room IDs from our local users -joining (admins are always allowed) and evicts all our local users from that -room, in addition to bulk room banning support, and blocks room invites (remote -and local) to the banned room, as a moderation feature -- Add admin commands to output jemalloc memory stats and memory usage -- Add admin command to get rooms a *remote* user shares with us -- Add debug admin commands to get the earliest and latest PDU in a room -- Add debug admin command to echo a message -- Add admin command to insert rooms tags for a user, most useful for inserting -the `m.server_notice` tag on your admin room to make it "persistent" in the -"System Alerts" section of Element -- Add experimental admin debug command for Dendrite's `AdminDownloadState` -(`/admin/downloadState/{serverName}/{roomID}`) admin API endpoint to download -and use a remote server's room state in the room -- Disable URL previews by default in the admin room due to various command -outputs having "URLs" in them that clients may needlessly render/request -- Extend memory usage admin server command to support showing memory allocator -stats such as jemalloc's -- Add admin debug command to see memory allocator's full extended debug -statistics such as jemalloc's - -## Misc - -- Add guest support for accessing TURN servers via `turn_allow_guests` like -Synapse -- Support for creating rooms with custom room IDs like Maunium Synapse -(`room_id` request body field to `/createRoom`) -- Query parameter `?format=event|content` for returning either the room state -event's content (default) for the full room state event on -`/_matrix/client/v3/rooms/{roomId}/state/{eventType}[/{stateKey}]` requests (see -) -- Send a User-Agent on all of our requests -- Send `avatar_url` on invite room membership events/changes -- Support sending [`well_known` response to client login -responses](https://spec.matrix.org/v1.10/client-server-api/#post_matrixclientv3login) -if using config option `[well_known.client]` -- Implement `include_state` search criteria support for `/search` requests -(response now can include room states) -- Declare various missing Matrix versions and features at -`/_matrix/client/versions` -- Implement legacy Matrix `/v1/` media endpoints that some clients and servers -may still call -- Config option to change Conduit's behaviour of homeserver key fetching -(`query_trusted_key_servers_first`). This option sets whether conduwuit will -query trusted notary key servers first before the individual homeserver(s), or -vice versa which may help in joining certain rooms. -- Implement unstable MSC2666 support for querying mutual rooms with a user -- Implement unstable MSC3266 room summary API support -- Implement unstable MSC4125 support for specifying servers to join via on -federated invites -- Make conduwuit build and be functional under Nix + macOS -- Log out all sessions after unsetting the emergency password -- Assume well-knowns are broken if they exceed past 12288 characters. -- Add support for listening on both HTTP and HTTPS if using direct TLS with -conduwuit for usecases such as Complement -- Add config option for disabling RocksDB Direct IO if needed -- Add various documentation on maintaining conduwuit, using RocksDB online -backups, some troubleshooting, using admin commands, moderation documentation, -etc -- (Developers): Add support for [hot reloadable/"live" modular -development](development/hot_reload.md) -- (Developers): Add support for tokio-console -- (Developers): Add support for tracing flame graphs -- No cryptocurrency donations allowed, conduwuit is fully maintained by -independent queer maintainers, and with a strong priority on inclusitivity and -comfort for protected groups 🏳️‍⚧️ -- [Add a community Code of Conduct for all conduwuit community spaces, primarily -the Matrix space](https://conduwuit.puppyirl.gay/conduwuit_coc.html) diff --git a/docs/introduction.md b/docs/introduction.md index 9db76681..d193f7c7 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -1,18 +1,14 @@ -# conduwuit +# Continuwuity {{#include ../README.md:catchphrase}} {{#include ../README.md:body}} -#### What's different about your fork than upstream Conduit? - -See the [differences](differences.md) page - #### How can I deploy my own? - [Deployment options](deploying.md) -If you want to connect an appservice to conduwuit, take a look at the +If you want to connect an appservice to Continuwuity, take a look at the [appservices documentation](appservices.md). #### How can I contribute? diff --git a/docs/maintenance.md b/docs/maintenance.md index 5c8c853a..b85a1971 100644 --- a/docs/maintenance.md +++ b/docs/maintenance.md @@ -1,14 +1,14 @@ -# Maintaining your conduwuit setup +# Maintaining your Continuwuity setup ## Moderation -conduwuit has moderation through admin room commands. "binary commands" (medium +Continuwuity has moderation through admin room commands. "binary commands" (medium priority) and an admin API (low priority) is planned. Some moderation-related config options are available in the example config such as "global ACLs" and blocking media requests to certain servers. See the example config for the moderation config options under the "Moderation / Privacy / Security" section. -conduwuit has moderation admin commands for: +Continuwuity has moderation admin commands for: - managing room aliases (`!admin rooms alias`) - managing room directory (`!admin rooms directory`) @@ -36,7 +36,7 @@ each object being newline delimited. An example of doing this is: ## Database (RocksDB) Generally there is very little you need to do. [Compaction][rocksdb-compaction] -is ran automatically based on various defined thresholds tuned for conduwuit to +is ran automatically based on various defined thresholds tuned for Continuwuity to be high performance with the least I/O amplifcation or overhead. Manually running compaction is not recommended, or compaction via a timer, due to creating unnecessary I/O amplification. RocksDB is built with io_uring support @@ -50,7 +50,7 @@ Some RocksDB settings can be adjusted such as the compression method chosen. See the RocksDB section in the [example config](configuration/examples.md). btrfs users have reported that database compression does not need to be disabled -on conduwuit as the filesystem already does not attempt to compress. This can be +on Continuwuity as the filesystem already does not attempt to compress. This can be validated by using `filefrag -v` on a `.SST` file in your database, and ensure the `physical_offset` matches (no filesystem compression). It is very important to ensure no additional filesystem compression takes place as this can render @@ -70,7 +70,7 @@ they're server logs or database logs, however they are critical RocksDB files related to WAL tracking. The only safe files that can be deleted are the `LOG` files (all caps). These -are the real RocksDB telemetry/log files, however conduwuit has already +are the real RocksDB telemetry/log files, however Continuwuity has already configured to only store up to 3 RocksDB `LOG` files due to generall being useless for average users unless troubleshooting something low-level. If you would like to store nearly none at all, see the `rocksdb_max_log_files` @@ -88,7 +88,7 @@ still be joined together. To restore a backup from an online RocksDB backup: -- shutdown conduwuit +- shutdown Continuwuity - create a new directory for merging together the data - in the online backup created, copy all `.sst` files in `$DATABASE_BACKUP_PATH/shared_checksum` to your new directory @@ -99,9 +99,9 @@ To restore a backup from an online RocksDB backup: if you have multiple) to your new directory - set your `database_path` config option to your new directory, or replace your old one with the new one you crafted -- start up conduwuit again and it should open as normal +- start up Continuwuity again and it should open as normal -If you'd like to do an offline backup, shutdown conduwuit and copy your +If you'd like to do an offline backup, shutdown Continuwuity and copy your `database_path` directory elsewhere. This can be restored with no modifications needed. @@ -110,7 +110,7 @@ directory. ## Media -Media still needs various work, however conduwuit implements media deletion via: +Media still needs various work, however Continuwuity implements media deletion via: - MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the event) @@ -118,17 +118,17 @@ event) - Delete remote media in the past `N` seconds/minutes via filesystem metadata on the file created time (`btime`) or file modified time (`mtime`) -See the `!admin media` command for further information. All media in conduwuit +See the `!admin media` command for further information. All media in Continuwuity is stored at `$DATABASE_DIR/media`. This will be configurable soon. If you are finding yourself needing extensive granular control over media, we recommend looking into [Matrix Media -Repo](https://github.com/t2bot/matrix-media-repo). conduwuit intends to +Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to implement various utilities for media, but MMR is dedicated to extensive media management. Built-in S3 support is also planned, but for now using a "S3 filesystem" on -`media/` works. conduwuit also sends a `Cache-Control` header of 1 year and +`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and immutable for all media requests (download and thumbnail) to reduce unnecessary media requests from browsers, reduce bandwidth usage, and reduce load. diff --git a/docs/static/_headers b/docs/static/_headers new file mode 100644 index 00000000..6e52de9f --- /dev/null +++ b/docs/static/_headers @@ -0,0 +1,6 @@ +/.well-known/matrix/* + Access-Control-Allow-Origin: * + Content-Type: application/json +/.well-known/continuwuity/* + Access-Control-Allow-Origin: * + Content-Type: application/json \ No newline at end of file diff --git a/docs/static/announcements.json b/docs/static/announcements.json new file mode 100644 index 00000000..9b97d091 --- /dev/null +++ b/docs/static/announcements.json @@ -0,0 +1,9 @@ +{ + "$schema": "https://continuwuity.org/schema/announcements.schema.json", + "announcements": [ + { + "id": 1, + "message": "Welcome to Continuwuity! Important announcements about the project will appear here." + } + ] +} \ No newline at end of file diff --git a/docs/static/announcements.schema.json b/docs/static/announcements.schema.json new file mode 100644 index 00000000..95b1d153 --- /dev/null +++ b/docs/static/announcements.schema.json @@ -0,0 +1,31 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://continwuity.org/schema/announcements.schema.json", + "type": "object", + "properties": { + "updates": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "integer" + }, + "message": { + "type": "string" + }, + "date": { + "type": "string" + } + }, + "required": [ + "id", + "message" + ] + } + } + }, + "required": [ + "updates" + ] + } \ No newline at end of file diff --git a/docs/static/client b/docs/static/client new file mode 100644 index 00000000..c2b70a14 --- /dev/null +++ b/docs/static/client @@ -0,0 +1 @@ +{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"}} diff --git a/docs/static/server b/docs/static/server new file mode 100644 index 00000000..a3099f6e --- /dev/null +++ b/docs/static/server @@ -0,0 +1 @@ +{"m.server":"matrix.continuwuity.org:443"} diff --git a/docs/static/support b/docs/static/support new file mode 100644 index 00000000..6b7a9860 --- /dev/null +++ b/docs/static/support @@ -0,0 +1,24 @@ +{ + "contacts": [ + { + "email_address": "security@continuwuity.org", + "role": "m.role.security" + }, + { + "matrix_id": "@tom:continuwuity.org", + "email_address": "tom@tcpip.uk", + "role": "m.role.admin" + }, + { + "matrix_id": "@jade:continuwuity.org", + "email_address": "jade@continuwuity.org", + "role": "m.role.admin" + }, + { + "matrix_id": "@nex:continuwuity.org", + "email_address": "nex@continuwuity.org", + "role": "m.role.admin" + } + ], + "support_page": "https://continuwuity.org/introduction#contact" +} \ No newline at end of file diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d25c9762..d84dbc7a 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,47 +1,48 @@ -# Troubleshooting conduwuit +# Troubleshooting Continuwuity -> ## Docker users ⚠️ +> **Docker users ⚠️** > -> Docker is extremely UX unfriendly. Because of this, a ton of issues or support -> is actually Docker support, not conduwuit support. We also cannot document the -> ever-growing list of Docker issues here. -> -> If you intend on asking for support and you are using Docker, **PLEASE** -> triple validate your issues are **NOT** because you have a misconfiguration in -> your Docker setup. -> -> If there are things like Compose file issues or Dockerhub image issues, those -> can still be mentioned as long as they're something we can fix. +> Docker can be difficult to use and debug. It's common for Docker +> misconfigurations to cause issues, particularly with networking and permissions. +> Please check that your issues are not due to problems with your Docker setup. -## conduwuit and Matrix issues +## Continuwuity and Matrix issues -#### Lost access to admin room +### Lost access to admin room You can reinvite yourself to the admin room through the following methods: -- Use the `--execute "users make_user_admin "` conduwuit binary + +- Use the `--execute "users make_user_admin "` Continuwuity binary argument once to invite yourslf to the admin room on startup -- Use the conduwuit console/CLI to run the `users make_user_admin` command +- Use the Continuwuity console/CLI to run the `users make_user_admin` command - Or specify the `emergency_password` config option to allow you to temporarily log into the server account (`@conduit`) from a web client ## General potential issues -#### Potential DNS issues when using Docker +### Potential DNS issues when using Docker -Docker has issues with its default DNS setup that may cause DNS to not be -properly functional when running conduwuit, resulting in federation issues. The -symptoms of this have shown in excessively long room joins (30+ minutes) from -very long DNS timeouts, log entries of "mismatching responding nameservers", +Docker's DNS setup for containers in a non-default network intercepts queries to +enable resolving of container hostnames to IP addresses. However, due to +performance issues with Docker's built-in resolver, this can cause DNS queries +to take a long time to resolve, resulting in federation issues. + +This is particularly common with Docker Compose, as custom networks are easily +created and configured. + +Symptoms of this include excessively long room joins (30+ minutes) from very +long DNS timeouts, log entries of "mismatching responding nameservers", and/or partial or non-functional inbound/outbound federation. -This is **not** a conduwuit issue, and is purely a Docker issue. It is not -sustainable for heavy DNS activity which is normal for Matrix federation. The -workarounds for this are: -- Use DNS over TCP via the config option `query_over_tcp_only = true` -- Don't use Docker's default DNS setup and instead allow the container to use -and communicate with your host's DNS servers (host's `/etc/resolv.conf`) +This is not a bug in continuwuity. Docker's default DNS resolver is not suitable +for heavy DNS activity, which is normal for federated protocols like Matrix. -#### DNS No connections available error message +Workarounds: + +- Use DNS over TCP via the config option `query_over_tcp_only = true` +- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`. + +### DNS No connections available error message If you receive spurious amounts of error logs saying "DNS No connections available", this is due to your DNS server (servers from `/etc/resolv.conf`) @@ -64,7 +65,7 @@ very computationally expensive, and is extremely susceptible to denial of service, especially on Matrix. Many servers also strangely have broken DNSSEC setups and will result in non-functional federation. -conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but +Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch] may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors config options and removing the `validator` module. @@ -75,9 +76,9 @@ high load, and we have identified its DNS caching to not be very effective. dnsmasq can possibly work, but it does **not** support TCP fallback which can be problematic when receiving large DNS responses such as from large SRV records. If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback` -in conduwuit config. +in Continuwuity config. -Raising `dns_cache_entries` in conduwuit config from the default can also assist +Raising `dns_cache_entries` in Continuwuity config from the default can also assist in DNS caching, but a full-fledged external caching resolver is better and more reliable. @@ -91,13 +92,13 @@ reliability at a slight performance cost due to TCP overhead. ## RocksDB / database issues -#### Database corruption +### Database corruption If your database is corrupted *and* is failing to start (e.g. checksum mismatch), it may be recoverable but careful steps must be taken, and there is no guarantee it may be recoverable. -The first thing that can be done is launching conduwuit with the +The first thing that can be done is launching Continuwuity with the `rocksdb_repair` config option set to true. This will tell RocksDB to attempt to repair itself at launch. If this does not work, disable the option and continue reading. @@ -109,7 +110,7 @@ RocksDB has the following recovery modes: - `PointInTime` - `SkipAnyCorruptedRecord` -By default, conduwuit uses `TolerateCorruptedTailRecords` as generally these may +By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may be due to bad federation and we can re-fetch the correct data over federation. The RocksDB default is `PointInTime` which will attempt to restore a "snapshot" of the data when it was last known to be good. This data can be either a few @@ -126,12 +127,12 @@ if `PointInTime` does not work as a last ditch effort. With this in mind: -- First start conduwuit with the `PointInTime` recovery method. See the [example +- First start Continuwuity with the `PointInTime` recovery method. See the [example config](configuration/examples.md) for how to do this using `rocksdb_recovery_mode` - If your database successfully opens, clients are recommended to clear their client cache to account for the rollback -- Leave your conduwuit running in `PointInTime` for at least 30-60 minutes so as +- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as much possible corruption is restored - If all goes will, you should be able to restore back to using `TolerateCorruptedTailRecords` and you have successfully recovered your database @@ -142,16 +143,16 @@ Note that users should not really be debugging things. If you find yourself debugging and find the issue, please let us know and/or how we can fix it. Various debug commands can be found in `!admin debug`. -#### Debug/Trace log level +### Debug/Trace log level -conduwuit builds without debug or trace log levels at compile time by default +Continuwuity builds without debug or trace log levels at compile time by default for substantial performance gains in CPU usage and improved compile times. If you need to access debug/trace log levels, you will need to build without the `release_max_log_level` feature or use our provided static debug binaries. -#### Changing log level dynamically +### Changing log level dynamically -conduwuit supports changing the tracing log environment filter on-the-fly using +Continuwuity supports changing the tracing log environment filter on-the-fly using the admin command `!admin debug change-log-level `. This accepts a string **without quotes** the same format as the `log` config option. @@ -166,9 +167,9 @@ load, simply pass the `--reset` flag. `!admin debug change-log-level --reset` -#### Pinging servers +### Pinging servers -conduwuit can ping other servers using `!admin debug ping `. This takes +Continuwuity can ping other servers using `!admin debug ping `. This takes a server name and goes through the server discovery process and queries `/_matrix/federation/v1/version`. Errors are outputted. @@ -177,15 +178,15 @@ server performance on either side as that endpoint is completely unauthenticated and simply fetches a string on a static JSON endpoint. It is very low cost both bandwidth and computationally. -#### Allocator memory stats +### Allocator memory stats When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you -can see conduwuit's high-level allocator stats by using +can see Continuwuity's high-level allocator stats by using `!admin server memory-usage` at the bottom. If you are a developer, you can also view the raw jemalloc statistics with `!admin debug memory-stats`. Please note that this output is extremely large -which may only be visible in the conduwuit console CLI due to PDU size limits, +which may only be visible in the Continuwuity console CLI due to PDU size limits, and is not easy for non-developers to understand. [unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html diff --git a/docs/turn.md b/docs/turn.md index 287f2545..5dba823c 100644 --- a/docs/turn.md +++ b/docs/turn.md @@ -1,6 +1,6 @@ # Setting up TURN/STURN -In order to make or receive calls, a TURN server is required. conduwuit suggests +In order to make or receive calls, a TURN server is required. Continuwuity suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. @@ -17,9 +17,9 @@ realm= A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. -These same values need to be set in conduwuit. See the [example +These same values need to be set in Continuwuity. See the [example config](configuration/examples.md) in the TURN section for configuring these and -restart conduwuit after. +restart Continuwuity after. `turn_secret` or a path to `turn_secret_file` must have a value of your coturn `static-auth-secret`, or use `turn_username` and `turn_password` @@ -34,7 +34,7 @@ If you are using TURN over TLS, you can replace `turn:` with `turns:` in the TURN over TLS. This is highly recommended. If you need unauthenticated access to the TURN URIs, or some clients may be -having trouble, you can enable `turn_guest_access` in conduwuit which disables +having trouble, you can enable `turn_guest_access` in Continuwuity which disables authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer` ### Run diff --git a/engage.toml b/engage.toml index 1d6a5475..210bafd5 100644 --- a/engage.toml +++ b/engage.toml @@ -18,12 +18,12 @@ script = "direnv --version" [[task]] name = "rustc" group = "versions" -script = "rustc --version" +script = "rustc --version -v" [[task]] name = "cargo" group = "versions" -script = "cargo --version" +script = "cargo --version -v" [[task]] name = "cargo-fmt" @@ -60,15 +60,10 @@ name = "markdownlint" group = "versions" script = "markdownlint --version" -[[task]] -name = "dpkg" -group = "versions" -script = "dpkg --version" - [[task]] name = "cargo-audit" group = "security" -script = "cargo audit -D warnings -D unmaintained -D unsound -D yanked" +script = "cargo audit --color=always -D warnings -D unmaintained -D unsound -D yanked" [[task]] name = "cargo-fmt" @@ -86,6 +81,7 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo doc \ --workspace \ + --locked \ --profile test \ --all-features \ --no-deps \ @@ -100,8 +96,8 @@ script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ - --all-targets \ --color=always \ -- \ -D warnings @@ -115,8 +111,8 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ - --all-targets \ --all-features \ --color=always \ -- \ @@ -124,33 +120,37 @@ env DIRENV_DEVSHELL=all-features \ """ [[task]] -name = "clippy/jemalloc" +name = "clippy/no-features" +group = "lints" +script = """ +env DIRENV_DEVSHELL=no-features \ + direnv exec . \ + cargo clippy \ + --workspace \ + --locked \ + --profile test \ + --no-default-features \ + --color=always \ + -- \ + -D warnings +""" + +[[task]] +name = "clippy/other-features" group = "lints" script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ - --features jemalloc \ - --all-targets \ + --no-default-features \ + --features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \ --color=always \ -- \ -D warnings """ -#[[task]] -#name = "clippy/hardened_malloc" -#group = "lints" -#script = """ -#cargo clippy \ -# --workspace \ -# --features hardened_malloc \ -# --all-targets \ -# --color=always \ -# -- \ -# -D warnings -#""" - [[task]] name = "lychee" group = "lints" @@ -161,22 +161,6 @@ name = "markdownlint" group = "lints" script = "markdownlint docs *.md || true" # TODO: fix the ton of markdown lints so we can drop `|| true` -[[task]] -name = "cargo/all" -group = "tests" -script = """ -env DIRENV_DEVSHELL=all-features \ - direnv exec . \ - cargo test \ - --workspace \ - --profile test \ - --all-targets \ - --all-features \ - --color=always \ - -- \ - --color=always -""" - [[task]] name = "cargo/default" group = "tests" @@ -185,8 +169,10 @@ env DIRENV_DEVSHELL=default \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ --all-targets \ + --no-fail-fast \ --color=always \ -- \ --color=always @@ -201,22 +187,3 @@ depends = ["cargo/default"] script = """ git diff --exit-code conduwuit-example.toml """ - -# Ensure that the flake's default output can build and run without crashing -# -# This is a dynamically-linked jemalloc build, which is a case not covered by -# our other tests. We've had linking problems in the past with dynamic -# jemalloc builds that usually show up as an immediate segfault or "invalid free" -[[task]] -name = "nix-default" -group = "tests" -script = """ -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - bin/nix-build-and-cache just .#default-test -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - nix run -L .#default-test -- --help && nix run -L .#default-test -- --version -""" diff --git a/flake.lock b/flake.lock index 5af6ec43..1f87b9b6 100644 --- a/flake.lock +++ b/flake.lock @@ -10,11 +10,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1731270564, - "narHash": "sha256-6KMC/NH/VWP5Eb+hA56hz0urel3jP6Y6cF2PX6xaTkk=", + "lastModified": 1738524606, + "narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=", "owner": "zhaofengli", "repo": "attic", - "rev": "47752427561f1c34debb16728a210d378f0ece36", + "rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e", "type": "github" }, "original": { @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1734303596, - "narHash": "sha256-HjDRyLR4MBqQ3IjfMM6eE+8ayztXlbz3gXdyDmFla68=", + "lastModified": 1741891349, + "narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=", "owner": "girlbossceo", "repo": "complement", - "rev": "14cc5be797b774f1a2b9f826f38181066d4952b8", + "rev": "e587b3df569cba411aeac7c20b6366d03c143745", "type": "github" }, "original": { @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1737689766, - "narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=", + "lastModified": 1739936662, + "narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=", "owner": "ipetkov", "repo": "crane", - "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", + "rev": "19de14aaeb869287647d9461cbd389187d8ecdb7", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1737786656, - "narHash": "sha256-ubCW9Jy7ZUOF354bWxTgLDpVnTvIpNr6qR4H/j7I0oo=", + "lastModified": 1740724364, + "narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=", "owner": "nix-community", "repo": "fenix", - "rev": "2f721f527886f801403f389a9cabafda8f1e3b7f", + "rev": "edf7d9e431cda8782e729253835f178a356d3aab", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1737600516, - "narHash": "sha256-EKyLQ3pbcjoU5jH5atge59F4fzuhTsb6yalUj6Ve2t8=", + "lastModified": 1740613216, + "narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=", "owner": "axboe", "repo": "liburing", - "rev": "6c509e2b0c881a13b83b259a221bf15fc9b3f681", + "rev": "e1003e496e66f9b0ae06674869795edf772d5500", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1737717945, - "narHash": "sha256-ET91TMkab3PmOZnqiJQYOtSGvSTvGeHoegAv4zcTefM=", + "lastModified": 1740547748, + "narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "ecd26a469ac56357fd333946a99086e992452b6a", + "rev": "3a05eebede89661660945da1f151959900903b6a", "type": "github" }, "original": { @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1737828695, - "narHash": "sha256-8Ev6zzhNPU798JNvU27a7gj5X+6SDG3jBweUkQ59DbA=", + "lastModified": 1741308171, + "narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "a4d9230dcc9d03be428b9a728133f8f646c0065c", + "rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.11.1", "repo": "rocksdb", "type": "github" } @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1737728869, - "narHash": "sha256-U4pl3Hi0lT6GP4ecN3q9wdD2sdaKMbmD/5NJ1NdJ9AM=", + "lastModified": 1740691488, + "narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "6e4c29f7ce18cea7d3d31237a4661ab932eab636", + "rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 920d3d14..49e860ed 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-lMLAupxng4Fd9F1oDw8gx+qA0RuF7ou7xhNU8wgs0PU="; + sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { @@ -64,8 +64,10 @@ patches = []; cmakeFlags = pkgs.lib.subtractLists [ - # no real reason to have snappy, no one uses this + # no real reason to have snappy or zlib, no one uses this "-DWITH_SNAPPY=1" + "-DZLIB=1" + "-DWITH_ZLIB=1" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=1" # we dont need to build rocksdb tests @@ -82,6 +84,8 @@ ++ [ # no real reason to have snappy, no one uses this "-DWITH_SNAPPY=0" + "-DZLIB=0" + "-DWITH_ZLIB=0" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=0" # we dont need trace tools @@ -140,23 +144,26 @@ toolchain ] ++ (with pkgsHost.pkgs; [ - engage - cargo-audit - # Required by hardened-malloc.rs dep binutils + cargo-audit + cargo-auditable + # Needed for producing Debian packages cargo-deb # Needed for CI to check validity of produced Debian packages (dpkg-deb) dpkg + engage + # Needed for Complement go # Needed for our script for Complement jq + gotestfmt # Needed for finding broken markdown links lychee @@ -169,21 +176,10 @@ # used for rust caching in CI to speed it up sccache - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo ] # liburing is Linux-exclusive ++ lib.optional stdenv.hostPlatform.isLinux liburing - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]) + ++ lib.optional stdenv.hostPlatform.isLinux numactl) ++ scope.main.buildInputs ++ scope.main.propagatedBuildInputs ++ scope.main.nativeBuildInputs; diff --git a/nix/pkgs/complement/certificate.crt b/nix/pkgs/complement/certificate.crt new file mode 100644 index 00000000..5dd4fdea --- /dev/null +++ b/nix/pkgs/complement/certificate.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL +BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz +IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy +NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m +ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt +/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88 +awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp +L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK +K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl +8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV +HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy +ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw +DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ +irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+ +HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e +VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3 +y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d +jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4= +-----END CERTIFICATE----- diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index f20abee2..7f4ecef7 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -6,7 +6,7 @@ allow_public_room_directory_over_federation = true allow_public_room_directory_without_auth = true allow_registration = true database_path = "/database" -log = "trace,h2=warn,hyper=warn" +log = "trace,h2=debug,hyper=debug" port = [8008, 8448] trusted_servers = [] only_query_trusted_key_servers = false @@ -17,21 +17,34 @@ ip_range_denylist = [] url_preview_domain_contains_allowlist = ["*"] url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false -media_startup_check = false -prune_missing_media = false -log_colors = false +media_startup_check = true +prune_missing_media = true +log_colors = true admin_room_notices = false allow_check_for_updates = false -allow_unstable_room_versions = true -rocksdb_log_level = "debug" +intentionally_unknown_config_option_for_testing = true +rocksdb_log_level = "info" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true log_guest_registrations = false allow_legacy_media = true -startup_netburst = false +startup_netburst = true +startup_netburst_keep = -1 + +allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true + +# valgrind makes things so slow +dns_timeout = 60 +dns_attempts = 20 +request_conn_timeout = 60 +request_timeout = 120 +well_known_conn_timeout = 60 +well_known_timeout = 60 +federation_idle_timeout = 300 +sender_timeout = 300 +sender_idle_timeout = 300 +sender_retry_backoff_limit = 300 [global.tls] -certs = "/certificate.crt" dual_protocol = true -key = "/private_key.key" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index e35cbf04..9b010e14 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -3,10 +3,8 @@ , buildEnv , coreutils , dockerTools -, gawk , lib , main -, openssl , stdenv , tini , writeShellScriptBin @@ -18,18 +16,12 @@ let all_features = true; disable_release_max_log_level = true; disable_features = [ - # no reason to use jemalloc for complement, just has compatibility/build issues - "jemalloc" - "jemalloc_stats" - "jemalloc_prof" # console/CLI stuff isn't used or relevant for complement "console" "tokio_console" # sentry telemetry isn't useful for complement, disabled by default anyways "sentry_telemetry" "perf_measurements" - # the containers don't use or need systemd signal support - "systemd" # this is non-functional on nix for some reason "hardened_malloc" # dont include experimental features @@ -48,28 +40,6 @@ let start = writeShellScriptBin "start" '' set -euxo pipefail - ${lib.getExe openssl} genrsa -out private_key.key 2048 - ${lib.getExe openssl} req \ - -new \ - -sha256 \ - -key private_key.key \ - -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" \ - -out signing_request.csr - cp ${./v3.ext} v3.ext - echo "DNS.1 = $SERVER_NAME" >> v3.ext - echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ - >> v3.ext - ${lib.getExe openssl} x509 \ - -req \ - -extfile v3.ext \ - -in signing_request.csr \ - -CA /complement/ca/ca.crt \ - -CAkey /complement/ca/ca.key \ - -CAcreateserial \ - -out certificate.crt \ - -days 1 \ - -sha256 - ${lib.getExe' coreutils "env"} \ CONDUWUIT_SERVER_NAME="$SERVER_NAME" \ ${lib.getExe main'} @@ -105,7 +75,8 @@ dockerTools.buildImage { else []; Env = [ - "SSL_CERT_FILE=/complement/ca/ca.crt" + "CONDUWUIT_TLS__KEY=${./private_key.key}" + "CONDUWUIT_TLS__CERTS=${./certificate.crt}" "CONDUWUIT_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/complement/private_key.key b/nix/pkgs/complement/private_key.key new file mode 100644 index 00000000..5b9d4d4f --- /dev/null +++ b/nix/pkgs/complement/private_key.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb +iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT +LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a +09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc +ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga +Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO +/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu +WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB +DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb +piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN +D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ +8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+ +3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq +/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90 +FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q +td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M +Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A +91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV +8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh +VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW +UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K +kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz +KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7 +IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh +tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM +9MVtdgSkuh2gwkD/mMoAJXM= +-----END PRIVATE KEY----- diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr new file mode 100644 index 00000000..e2aa658e --- /dev/null +++ b/nix/pkgs/complement/signing_request.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK +DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH +uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR +xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb +o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B +hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe +vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB +CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79 +ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3 +r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb +XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK +MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76 +U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/ +-----END CERTIFICATE REQUEST----- diff --git a/nix/pkgs/complement/v3.ext b/nix/pkgs/complement/v3.ext index 6083d960..0deaa48a 100644 --- a/nix/pkgs/complement/v3.ext +++ b/nix/pkgs/complement/v3.ext @@ -4,3 +4,9 @@ keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment subjectAltName = @alt_names [alt_names] +DNS.1 = *.docker.internal +DNS.2 = hs1 +DNS.3 = hs2 +DNS.4 = hs3 +DNS.5 = hs4 +IP.1 = 127.0.0.1 diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index d7424d11..9c8038a7 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -82,7 +82,7 @@ rust-jemalloc-sys' = (rust-jemalloc-sys.override { buildDepsOnlyEnv = let rocksdb' = (rocksdb.override { - jemalloc = rust-jemalloc-sys'; + jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'; # rocksdb fails to build with prefixed jemalloc, which is required on # darwin due to [1]. In this case, fall back to building rocksdb with # libc malloc. This should not cause conflicts, because all of the @@ -103,6 +103,12 @@ buildDepsOnlyEnv = ++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ]) ) ++ old.cmakeFlags; + + # outputs has "tools" which we dont need or use + outputs = [ "out" ]; + + # preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use + preInstall = ""; }); in { @@ -149,13 +155,20 @@ commonAttrs = { # Keep sorted include = [ + ".cargo" "Cargo.lock" "Cargo.toml" - "deps" "src" ]; }; + doCheck = true; + + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -181,27 +194,7 @@ commonAttrs = { # differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious # rebuilds of bindgen and its depedents. jq - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo - ] - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]; - - # for some reason gcc and other weird deps are added to OCI images and bloats it up - # - # - postInstall = with pkgsBuildHost; '' - find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${llvm} -t ${rustc.unwrapped} -t ${rustc} '{}' + - ''; + ]; }; in @@ -210,16 +203,13 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - cargoExtraArgs = "--no-default-features " + doCheck = true; + + cargoExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - # This is redundant with CI - cargoTestCommand = ""; - cargoCheckCommand = ""; - doCheck = false; - env = buildPackageEnv; passthru = { diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 5520c920..1650053d 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -36,6 +36,7 @@ dockerTools.buildLayeredImage { "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; + "org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit"; "org.opencontainers.image.title" = main.pname; "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.vendor" = "girlbossceo"; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 97e33c91..aadc8f99 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.84.0" +channel = "1.86.0" profile = "minimal" components = [ # For rust-analyzer @@ -24,5 +24,6 @@ targets = [ "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "aarch64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", #"aarch64-apple-darwin", ] diff --git a/rustfmt.toml b/rustfmt.toml index 635ec8f8..89041b04 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -2,7 +2,7 @@ array_width = 80 chain_width = 60 comment_width = 80 condense_wildcard_suffixes = true -edition = "2024" +style_edition = "2024" fn_call_width = 80 fn_single_line = true format_code_in_doc_comments = true diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml index ca865969..7896ef97 100644 --- a/src/admin/Cargo.toml +++ b/src/admin/Cargo.toml @@ -17,12 +17,61 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", +] +gzip_compression = [ + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", +] +io_uring = [ + "conduwuit-api/io_uring", + "conduwuit-database/io_uring", + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", + "conduwuit-service/zstd_compression", +] [dependencies] clap.workspace = true diff --git a/src/admin/admin.rs b/src/admin/admin.rs index b6de1ec6..0d636c72 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -2,7 +2,7 @@ use clap::Parser; use conduwuit::Result; use crate::{ - appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, + appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context, debug, debug::DebugCommand, federation, federation::FederationCommand, media, media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server, server::ServerCommand, user, user::UserCommand, @@ -49,20 +49,18 @@ pub(super) enum AdminCommand { } #[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result { use AdminCommand::*; match command { - | Appservices(command) => appservice::process(command, context).await?, - | Media(command) => media::process(command, context).await?, - | Users(command) => user::process(command, context).await?, - | Rooms(command) => room::process(command, context).await?, - | Federation(command) => federation::process(command, context).await?, - | Server(command) => server::process(command, context).await?, - | Debug(command) => debug::process(command, context).await?, - | Query(command) => query::process(command, context).await?, - | Check(command) => check::process(command, context).await?, - }; - - Ok(()) + | Appservices(command) => appservice::process(command, context).await, + | Media(command) => media::process(command, context).await, + | Users(command) => user::process(command, context).await, + | Rooms(command) => room::process(command, context).await, + | Federation(command) => federation::process(command, context).await, + | Server(command) => server::process(command, context).await, + | Debug(command) => debug::process(command, context).await, + | Query(command) => query::process(command, context).await, + | Check(command) => check::process(command, context).await, + } } diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 4f02531a..3575e067 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -1,84 +1,80 @@ -use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; +use conduwuit::{Err, Result, checked}; +use futures::{FutureExt, StreamExt, TryFutureExt}; -use crate::{admin_command, Result}; +use crate::admin_command; #[admin_command] -pub(super) async fn register(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" +pub(super) async fn register(&self) -> Result { + let body = &self.body; + let body_len = self.body.len(); + if body_len < 2 + || !body[0].trim().starts_with("```") + || body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } - let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config_body); + let range = 1..checked!(body_len - 1)?; + let appservice_config_body = body[range].join("\n"); + let parsed_config = serde_yaml::from_str(&appservice_config_body); match parsed_config { + | Err(e) => return Err!("Could not parse appservice config as YAML: {e}"), | Ok(registration) => match self .services .appservice .register_appservice(®istration, &appservice_config_body) .await + .map(|()| registration.id) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {}", - registration.id - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {e}" - ))), + | Err(e) => return Err!("Failed to register appservice: {e}"), + | Ok(id) => write!(self, "Appservice registered with ID: {id}"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config as YAML: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn unregister( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn unregister(&self, appservice_identifier: String) -> Result { match self .services .appservice .unregister_appservice(&appservice_identifier) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {e}" - ))), + | Err(e) => return Err!("Failed to unregister appservice: {e}"), + | Ok(()) => write!(self, "Appservice unregistered."), } + .await } #[admin_command] -pub(super) async fn show_appservice_config( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result { match self .services .appservice .get_registration(&appservice_identifier) .await { + | None => return Err!("Appservice does not exist."), | Some(config) => { - let config_str = serde_yaml::to_string(&config) - .expect("config should've been validated on register"); - let output = - format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",); - Ok(RoomMessageEventContent::notice_markdown(output)) + let config_str = serde_yaml::to_string(&config)?; + write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```") }, - | None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), } + .await } #[admin_command] -pub(super) async fn list_registered(&self) -> Result { - let appservices = self.services.appservice.iter_ids().await; - let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", ")); - Ok(RoomMessageEventContent::text_plain(output)) +pub(super) async fn list_registered(&self) -> Result { + self.services + .appservice + .iter_ids() + .collect() + .map(Ok) + .and_then(|appservices: Vec<_>| { + let len = appservices.len(); + let list = appservices.join(", "); + write!(self, "Appservices ({len}): {list}") + }) + .await } diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs index 7e27362f..1ffc3ae5 100644 --- a/src/admin/check/commands.rs +++ b/src/admin/check/commands.rs @@ -1,15 +1,14 @@ use conduwuit::Result; use conduwuit_macros::implement; use futures::StreamExt; -use ruma::events::room::message::RoomMessageEventContent; -use crate::Command; +use crate::Context; /// Uses the iterator in `src/database/key_value/users.rs` to iterator over /// every user in our database (remote and local). Reports total count, any /// errors if there were any, etc -#[implement(Command, params = "<'_>")] -pub(super) async fn check_all_users(&self) -> Result { +#[implement(Context, params = "<'_>")] +pub(super) async fn check_all_users(&self) -> Result { let timer = tokio::time::Instant::now(); let users = self.services.users.iter().collect::>().await; let query_time = timer.elapsed(); @@ -18,11 +17,10 @@ pub(super) async fn check_all_users(&self) -> Result { let err_count = users.iter().filter(|_user| false).count(); let ok_count = users.iter().filter(|_user| true).count(); - let message = format!( + self.write_str(&format!( "Database query completed in {query_time:?}:\n\n```\nTotal entries: \ {total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \ {ok_count:?}\n```" - ); - - Ok(RoomMessageEventContent::notice_markdown(message)) + )) + .await } diff --git a/src/admin/command.rs b/src/admin/context.rs similarity index 61% rename from src/admin/command.rs rename to src/admin/context.rs index 5ad9e581..270537be 100644 --- a/src/admin/command.rs +++ b/src/admin/context.rs @@ -3,13 +3,13 @@ use std::{fmt, time::SystemTime}; use conduwuit::Result; use conduwuit_service::Services; use futures::{ + Future, FutureExt, TryFutureExt, io::{AsyncWriteExt, BufWriter}, lock::Mutex, - Future, FutureExt, }; use ruma::EventId; -pub(crate) struct Command<'a> { +pub(crate) struct Context<'a> { pub(crate) services: &'a Services, pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, @@ -17,14 +17,14 @@ pub(crate) struct Command<'a> { pub(crate) output: Mutex>>, } -impl Command<'_> { +impl Context<'_> { pub(crate) fn write_fmt( &self, arguments: fmt::Arguments<'_>, - ) -> impl Future + Send + '_ { + ) -> impl Future + Send + '_ + use<'_> { let buf = format!("{arguments}"); - self.output.lock().then(|mut output| async move { - output.write_all(buf.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(buf.as_bytes()).map_err(Into::into).await }) } @@ -32,8 +32,8 @@ impl Command<'_> { &'a self, s: &'a str, ) -> impl Future + Send + 'a { - self.output.lock().then(move |mut output| async move { - output.write_all(s.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(s.as_bytes()).map_err(Into::into).await }) } } diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index cd892ded..d0debc2a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,15 +6,20 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, - RawPduId, Result, + Err, Result, debug_error, err, info, + matrix::pdu::{PduEvent, PduId, RawPduId}, + trace, utils, + utils::{ + stream::{IterStream, ReadyExt}, + string::EMPTY, + }, + warn, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::{client::error::ErrorKind, federation::event::get_room_state}, - events::room::message::RoomMessageEventContent, - CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, - ServerName, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, + api::federation::event::get_room_state, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -25,28 +30,24 @@ use tracing_subscriber::EnvFilter; use crate::admin_command; #[admin_command] -pub(super) async fn echo(&self, message: Vec) -> Result { +pub(super) async fn echo(&self, message: Vec) -> Result { let message = message.join(" "); - - Ok(RoomMessageEventContent::notice_plain(message)) + self.write_str(&message).await } #[admin_command] -pub(super) async fn get_auth_chain( - &self, - event_id: Box, -) -> Result { +pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { - return Ok(RoomMessageEventContent::notice_plain("Event not found.")); + return Err!("Event not found."); }; let room_id_str = event .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + .and_then(CanonicalJsonValue::as_str) + .ok_or_else(|| err!(Database("Invalid event in database")))?; let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + .map_err(|_| err!(Database("Invalid room id field in event in database")))?; let start = Instant::now(); let count = self @@ -54,56 +55,44 @@ pub(super) async fn get_auth_chain( .rooms .auth_chain .event_ids_iter(room_id, once(event_id.as_ref())) - .await? + .ready_filter_map(Result::ok) .count() .await; let elapsed = start.elapsed(); - Ok(RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {count} in {elapsed:?}" - ))) + let out = format!("Loaded auth chain with length {count} in {elapsed:?}"); + + self.write_str(&out).await } #[admin_command] -pub(super) async fn parse_pdu(&self) -> Result { +pub(super) async fn parse_pdu(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().saturating_sub(1)].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json in command body: {e}"), | Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + | Err(e) => return Err!("Could not parse PDU JSON: {e:?}"), | Ok(hash) => { let event_id = OwnedEventId::parse(format!("${hash}")); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - | Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\n{pdu:#?}" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\nCould not parse event: {e}" - ))), + match serde_json::from_value::(serde_json::to_value(value)?) { + | Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"), + | Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"), } }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {e:?}" - ))), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_pdu(&self, event_id: Box) -> Result { +pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { let mut outlier = false; let mut pdu_json = self .services @@ -118,21 +107,18 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - if outlier { - "Outlier (Rejected / Soft Failed) PDU found in our database" - } else { - "PDU found in our database" - }, - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = if outlier { + "Outlier (Rejected / Soft Failed) PDU found in our database" + } else { + "PDU found in our database" + }; + write!(self, "{msg}\n```json\n{text}\n```",) }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] @@ -140,7 +126,7 @@ pub(super) async fn get_short_pdu( &self, shortroomid: ShortRoomId, shorteventid: ShortEventId, -) -> Result { +) -> Result { let pdu_id: RawPduId = PduId { shortroomid, shorteventid: shorteventid.into(), @@ -155,41 +141,33 @@ pub(super) async fn get_short_pdu( .await; match pdu_json { + | Err(_) => return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",))) + let json_text = serde_json::to_string_pretty(&json)?; + write!(self, "```json\n{json_text}\n```") }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] -pub(super) async fn get_remote_pdu_list( - &self, - server: Box, - force: bool, -) -> Result { +pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs from the database.", - )); + ); } if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let list = self @@ -203,60 +181,68 @@ pub(super) async fn get_remote_pdu_list( let mut failed_count: usize = 0; let mut success_count: usize = 0; - for pdu in list { + for event_id in list { if force { - if let Err(e) = self.get_remote_pdu(Box::from(pdu), server.clone()).await { - failed_count = failed_count.saturating_add(1); - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to get remote PDU, ignoring error: {e}" - ))) - .await - .ok(); - warn!("Failed to get remote PDU, ignoring error: {e}"); - } else { - success_count = success_count.saturating_add(1); + match self + .get_remote_pdu(event_id.to_owned(), server.clone()) + .await + { + | Err(e) => { + failed_count = failed_count.saturating_add(1); + self.services + .admin + .send_text(&format!("Failed to get remote PDU, ignoring error: {e}")) + .await; + + warn!("Failed to get remote PDU, ignoring error: {e}"); + }, + | _ => { + success_count = success_count.saturating_add(1); + }, } } else { - self.get_remote_pdu(Box::from(pdu), server.clone()).await?; + self.get_remote_pdu(event_id.to_owned(), server.clone()) + .await?; success_count = success_count.saturating_add(1); } } - Ok(RoomMessageEventContent::text_plain(format!( - "Fetched {success_count} remote PDUs successfully with {failed_count} failures" - ))) + let out = + format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures"); + + self.write_str(&out).await } #[admin_command] pub(super) async fn get_remote_pdu( &self, - event_id: Box, - server: Box, -) -> Result { + event_id: OwnedEventId, + server: OwnedServerName, +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver."); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } match self .services .sending .send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request { - event_id: event_id.clone().into(), + event_id: event_id.clone(), include_unredacted_content: None, }) .await { + | Err(e) => + return Err!( + "Remote server did not have PDU or failed sending request to remote server: {e}" + ), | Ok(response) => { let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { @@ -264,10 +250,9 @@ pub(super) async fn get_remote_pdu( "Requested event ID {event_id} from server but failed to convert from \ RawValue to CanonicalJsonObject (malformed event/response?): {e}" ); - Error::BadRequest( - ErrorKind::Unknown, - "Received response from server but failed to parse PDU", - ) + err!(Request(Unknown( + "Received response from server but failed to parse PDU" + ))) })?; trace!("Attempting to parse PDU: {:?}", &response.pdu); @@ -277,6 +262,7 @@ pub(super) async fn get_remote_pdu( .rooms .event_handler .parse_incoming_pdu(&response.pdu) + .boxed() .await; let (event_id, value, room_id) = match parsed_result { @@ -284,9 +270,7 @@ pub(super) async fn get_remote_pdu( | Err(e) => { warn!("Failed to parse PDU: {e}"); info!("Full PDU: {:?}", &response.pdu); - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse PDU remote server {server} sent us: {e}" - ))); + return Err!("Failed to parse PDU remote server {server} sent us: {e}"); }, }; @@ -298,30 +282,18 @@ pub(super) async fn get_remote_pdu( .rooms .timeline .backfill_pdu(&server, response.pdu) - .boxed() .await?; - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - "Got PDU from specified server and handled as backfilled PDU successfully. \ - Event body:", - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = "Got PDU from specified server and handled as backfilled"; + write!(self, "{msg}. Event body:\n```json\n{text}\n```") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Remote server did not have PDU or failed sending request to remote server: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_room_state( - &self, - room: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room).await?; let room_state: Vec<_> = self .services @@ -333,28 +305,24 @@ pub(super) async fn get_room_state( .await?; if room_state.is_empty() { - return Ok(RoomMessageEventContent::text_plain( - "Unable to find room state in our database (vector is empty)", - )); + return Err!("Unable to find room state in our database (vector is empty)",); } let json = serde_json::to_string_pretty(&room_state).map_err(|e| { - warn!("Failed converting room state vector in our database to pretty JSON: {e}"); - Error::bad_database( + err!(Database( "Failed to convert room state events to pretty JSON, possible invalid room state \ - events in our database", - ) + events in our database {e}", + )) })?; - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json}\n```"))) + let out = format!("```json\n{json}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn ping(&self, server: Box) -> Result { +pub(super) async fn ping(&self, server: OwnedServerName) -> Result { if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves.", - )); + return Err!("Not allowed to send federation requests to ourselves."); } let timer = tokio::time::Instant::now(); @@ -368,35 +336,27 @@ pub(super) async fn ping(&self, server: Box) -> Result { + return Err!("Failed sending federation request to specified server:\n\n{e}"); + }, | Ok(response) => { let ping_time = timer.elapsed(); - let json_text_res = serde_json::to_string_pretty(&response.server); - if let Ok(json) = json_text_res { - return Ok(RoomMessageEventContent::notice_markdown(format!( - "Got response which took {ping_time:?} time:\n```json\n{json}\n```" - ))); - } + let out = if let Ok(json) = json_text_res { + format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```") + } else { + format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}") + }; - Ok(RoomMessageEventContent::text_plain(format!( - "Got non-JSON response which took {ping_time:?} time:\n{response:?}" - ))) - }, - | Err(e) => { - warn!( - "Failed sending federation request to specified server from ping debug command: \ - {e}" - ); - Ok(RoomMessageEventContent::text_plain(format!( - "Failed sending federation request to specified server:\n\n{e}", - ))) + write!(self, "{out}") }, } + .await } #[admin_command] -pub(super) async fn force_device_list_updates(&self) -> Result { +pub(super) async fn force_device_list_updates(&self) -> Result { // Force E2EE device list updates for all users self.services .users @@ -404,27 +364,17 @@ pub(super) async fn force_device_list_updates(&self) -> Result, - reset: bool, -) -> Result { +pub(super) async fn change_log_level(&self, filter: Option, reset: bool) -> Result { let handles = &["console"]; if reset { let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Log level from config appears to be invalid now: {e}" - ))); - }, + | Err(e) => return Err!("Log level from config appears to be invalid now: {e}"), }; match self @@ -434,16 +384,12 @@ pub(super) async fn change_log_level( .reload .reload(&old_filter_layer, Some(handles)) { + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Successfully changed log level back to config value {}", - self.services.server.config.log - ))); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); + let value = &self.services.server.config.log; + let out = format!("Successfully changed log level back to config value {value}"); + return self.write_str(&out).await; }, } } @@ -451,11 +397,7 @@ pub(super) async fn change_log_level( if let Some(filter) = filter { let new_filter_layer = match EnvFilter::try_new(filter) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Invalid log level filter specified: {e}" - ))); - }, + | Err(e) => return Err!("Invalid log level filter specified: {e}"), }; match self @@ -465,90 +407,75 @@ pub(super) async fn change_log_level( .reload .reload(&new_filter_layer, Some(handles)) { - | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); - }, + | Ok(()) => return self.write_str("Successfully changed log level").await, + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), } } - Ok(RoomMessageEventContent::text_plain("No log level was specified.")) + Err!("No log level was specified.") } #[admin_command] -pub(super) async fn sign_json(&self) -> Result { +pub(super) async fn sign_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(mut value) => { - self.services - .server_keys - .sign_json(&mut value) - .expect("our request json is what ruma expects"); - let json_text = - serde_json::to_string_pretty(&value).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::text_plain(json_text)) + self.services.server_keys.sign_json(&mut value)?; + let json_text = serde_json::to_string_pretty(&value)?; + write!(self, "{json_text}") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_json(&self) -> Result { +pub(super) async fn verify_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str::(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(value) => match self.services.server_keys.verify_json(&value, None).await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - ))), + | Err(e) => return Err!("Signature verification failed: {e}"), + | Ok(()) => write!(self, "Signature correct"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_pdu(&self, event_id: Box) -> Result { +pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { + use ruma::signatures::Verified; + let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; event.remove("event_id"); let msg = match self.services.server_keys.verify_event(&event, None).await { - | Ok(ruma::signatures::Verified::Signatures) => - "signatures OK, but content hash failed (redaction).", - | Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.", | Err(e) => return Err(e), + | Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).", + | Ok(Verified::All) => "signatures and hashes OK.", }; - Ok(RoomMessageEventContent::notice_plain(msg)) + self.write_str(msg).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn first_pdu_in_room( - &self, - room_id: Box, -) -> Result { +pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -556,9 +483,7 @@ pub(super) async fn first_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID.",); } let first_pdu = self @@ -567,17 +492,15 @@ pub(super) async fn first_pdu_in_room( .timeline .first_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the first PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}"))) + let out = format!("{first_pdu:?}"); + self.write_str(&out).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn latest_pdu_in_room( - &self, - room_id: Box, -) -> Result { +pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -585,9 +508,7 @@ pub(super) async fn latest_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let latest_pdu = self @@ -596,18 +517,19 @@ pub(super) async fn latest_pdu_in_room( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}"))) + let out = format!("{latest_pdu:?}"); + self.write_str(&out).await } #[admin_command] #[tracing::instrument(skip(self))] pub(super) async fn force_set_room_state_from_server( &self, - room_id: Box, - server_name: Box, -) -> Result { + room_id: OwnedRoomId, + server_name: OwnedServerName, +) -> Result { if !self .services .rooms @@ -615,9 +537,7 @@ pub(super) async fn force_set_room_state_from_server( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let first_pdu = self @@ -626,7 +546,7 @@ pub(super) async fn force_set_room_state_from_server( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; let room_version = self.services.rooms.state.get_room_version(&room_id).await?; @@ -636,7 +556,7 @@ pub(super) async fn force_set_room_state_from_server( .services .sending .send_federation_request(&server_name, get_room_state::v1::Request { - room_id: room_id.clone().into(), + room_id: room_id.clone(), event_id: first_pdu.event_id.clone(), }) .await?; @@ -725,6 +645,7 @@ pub(super) async fn force_set_room_state_from_server( .await?; let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await; + self.services .rooms .state @@ -741,21 +662,18 @@ pub(super) async fn force_set_room_state_from_server( .update_joined_count(&room_id) .await; - drop(state_lock); - - Ok(RoomMessageEventContent::text_plain( - "Successfully forced the room state from the requested remote server.", - )) + self.write_str("Successfully forced the room state from the requested remote server.") + .await } #[admin_command] pub(super) async fn get_signing_keys( &self, - server_name: Option>, - notary: Option>, + server_name: Option, + notary: Option, query: bool, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); if let Some(notary) = notary { let signing_keys = self @@ -764,9 +682,8 @@ pub(super) async fn get_signing_keys( .notary_request(¬ary, &server_name) .await?; - return Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))); + let out = format!("```rs\n{signing_keys:#?}\n```"); + return self.write_str(&out).await; } let signing_keys = if query { @@ -781,17 +698,13 @@ pub(super) async fn get_signing_keys( .await? }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))) + let out = format!("```rs\n{signing_keys:#?}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn get_verify_keys( - &self, - server_name: Option>, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +pub(super) async fn get_verify_keys(&self, server_name: Option) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); let keys = self .services @@ -806,26 +719,24 @@ pub(super) async fn get_verify_keys( writeln!(out, "| {key_id} | {key:?} |")?; } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&out).await } #[admin_command] pub(super) async fn resolve_true_destination( &self, - server_name: Box, + server_name: OwnedServerName, no_cache: bool, -) -> Result { +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server_name == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } let actual = self @@ -834,13 +745,12 @@ pub(super) async fn resolve_true_destination( .resolve_actual_dest(&server_name, !no_cache) .await?; - let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host,); - - Ok(RoomMessageEventContent::text_markdown(msg)) + let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host); + self.write_str(&msg).await } #[admin_command] -pub(super) async fn memory_stats(&self, opts: Option) -> Result { +pub(super) async fn memory_stats(&self, opts: Option) -> Result { const OPTS: &str = "abcdefghijklmnopqrstuvwxyz"; let opts: String = OPTS @@ -859,13 +769,12 @@ pub(super) async fn memory_stats(&self, opts: Option) -> Result Result { +pub(super) async fn runtime_metrics(&self) -> Result { let out = self.services.server.metrics.runtime_metrics().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| { @@ -878,51 +787,51 @@ pub(super) async fn runtime_metrics(&self) -> Result { }, ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_metrics(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_metrics(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[cfg(tokio_unstable)] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { +pub(super) async fn runtime_interval(&self) -> Result { let out = self.services.server.metrics.runtime_interval().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| format!("```rs\n{metrics:#?}\n```"), ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_interval(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[admin_command] -pub(super) async fn time(&self) -> Result { +pub(super) async fn time(&self) -> Result { let now = SystemTime::now(); - Ok(RoomMessageEventContent::text_markdown(utils::time::format(now, "%+"))) + let now = utils::time::format(now, "%+"); + + self.write_str(&now).await } #[admin_command] -pub(super) async fn list_dependencies(&self, names: bool) -> Result { +pub(super) async fn list_dependencies(&self, names: bool) -> Result { if names { let out = info::cargo::dependencies_names().join(" "); - return Ok(RoomMessageEventContent::notice_markdown(out)); + return self.write_str(&out).await; } - let deps = info::cargo::dependencies(); let mut out = String::new(); + let deps = info::cargo::dependencies(); writeln!(out, "| name | version | features |")?; writeln!(out, "| ---- | ------- | -------- |")?; for (name, dep) in deps { @@ -933,10 +842,11 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result, map: Option, -) -> Result { - let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); +) -> Result { let map_name = map.as_ref().map_or(EMPTY, String::as_str); - - let mut out = String::new(); - for (&name, map) in self.services.db.iter() { - if !map_name.is_empty() && map_name != name { - continue; - } - - let res = map.property(&property)?; - let res = res.trim(); - writeln!(out, "##### {name}:\n```\n{res}\n```")?; - } - - Ok(RoomMessageEventContent::notice_markdown(out)) + let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); + self.services + .db + .iter() + .filter(|&(&name, _)| map_name.is_empty() || map_name == name) + .try_stream() + .try_for_each(|(&name, map)| { + let res = map.property(&property).expect("invalid property"); + writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) + }) + .await } #[admin_command] -pub(super) async fn trim_memory(&self) -> Result { +pub(super) async fn database_files(&self, map: Option, level: Option) -> Result { + let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; + + files.sort_by_key(|f| f.name.clone()); + + writeln!(self, "| lev | sst | keys | dels | size | column |").await?; + writeln!(self, "| ---: | :--- | ---: | ---: | ---: | :--- |").await?; + files + .into_iter() + .filter(|file| { + map.as_deref() + .is_none_or(|map| map == file.column_family_name) + }) + .filter(|file| level.as_ref().is_none_or(|&level| level == file.level)) + .try_stream() + .try_for_each(|file| { + writeln!( + self, + "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", + file.level, + file.name, + file.num_entries, + file.num_deletions, + file.size, + file.column_family_name, + ) + }) + .await +} + +#[admin_command] +pub(super) async fn trim_memory(&self) -> Result { conduwuit::alloc::trim(None)?; - writeln!(self, "done").await?; - - Ok(RoomMessageEventContent::notice_plain("")) + writeln!(self, "done").await } diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 07f7296b..9b86f18c 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -3,7 +3,7 @@ pub(crate) mod tester; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName}; use service::rooms::short::{ShortEventId, ShortRoomId}; use self::tester::TesterCommand; @@ -20,7 +20,7 @@ pub(super) enum DebugCommand { /// - Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Parse and print a PDU from a JSON @@ -35,7 +35,7 @@ pub(super) enum DebugCommand { /// - Retrieve and print a PDU by EventID from the conduwuit database GetPdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Retrieve and print a PDU by PduId from the conduwuit database @@ -52,11 +52,11 @@ pub(super) enum DebugCommand { /// (following normal event auth rules, handles it as an incoming PDU). GetRemotePdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, /// Argument for us to attempt to fetch the event from the /// specified remote server. - server: Box, + server: OwnedServerName, }, /// - Same as `get-remote-pdu` but accepts a codeblock newline delimited @@ -64,7 +64,7 @@ pub(super) enum DebugCommand { GetRemotePduList { /// Argument for us to attempt to fetch all the events from the /// specified remote server. - server: Box, + server: OwnedServerName, /// If set, ignores errors, else stops at the first error/failure. #[arg(short, long)] @@ -88,10 +88,10 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetSigningKeys { - server_name: Option>, + server_name: Option, #[arg(long)] - notary: Option>, + notary: Option, #[arg(short, long)] query: bool, @@ -99,14 +99,14 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetVerifyKeys { - server_name: Option>, + server_name: Option, }, /// - Sends a federation request to the remote server's /// `/_matrix/federation/v1/version` endpoint and measures the latency it /// took for the server to respond Ping { - server: Box, + server: OwnedServerName, }, /// - Forces device lists for all local and remote users to be updated (as @@ -141,21 +141,21 @@ pub(super) enum DebugCommand { /// /// This re-verifies a PDU existing in the database found by ID. VerifyPdu { - event_id: Box, + event_id: OwnedEventId, }, /// - Prints the very first PDU in the specified room (typically /// m.room.create) FirstPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Prints the latest ("last") PDU in the specified room (typically a /// message) LatestPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Forcefully replaces the room state of our local copy of the specified @@ -174,9 +174,9 @@ pub(super) enum DebugCommand { /// `/_matrix/federation/v1/state/{roomId}`. ForceSetRoomStateFromServer { /// The impacted room ID - room_id: Box, + room_id: OwnedRoomId, /// The server we will use to query the room state for - server_name: Box, + server_name: OwnedServerName, }, /// - Runs a server name through conduwuit's true destination resolution @@ -184,7 +184,7 @@ pub(super) enum DebugCommand { /// /// Useful for debugging well-known issues ResolveTrueDestination { - server_name: Box, + server_name: OwnedServerName, #[arg(short, long)] no_cache: bool, @@ -226,6 +226,14 @@ pub(super) enum DebugCommand { /// - Trim memory usage TrimMemory, + /// - List database files + DatabaseFiles { + map: Option, + + #[arg(long)] + level: Option, + }, + /// - Developer test stubs #[command(subcommand)] #[allow(non_snake_case)] diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 5200fa0d..0a2b1516 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,7 +1,6 @@ -use conduwuit::Err; -use ruma::events::room::message::RoomMessageEventContent; +use conduwuit::{Err, Result}; -use crate::{admin_command, admin_command_dispatch, Result}; +use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] @@ -14,14 +13,14 @@ pub(crate) enum TesterCommand { #[rustfmt::skip] #[admin_command] -async fn panic(&self) -> Result { +async fn panic(&self) -> Result { panic!("panicked") } #[rustfmt::skip] #[admin_command] -async fn failure(&self) -> Result { +async fn failure(&self) -> Result { Err!("failed") } @@ -29,20 +28,20 @@ async fn failure(&self) -> Result { #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn tester(&self) -> Result { +async fn tester(&self) -> Result { - Ok(RoomMessageEventContent::notice_plain("legacy")) + self.write_str("Ok").await } #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn timer(&self) -> Result { +async fn timer(&self) -> Result { let started = std::time::Instant::now(); timed(self.body); let elapsed = started.elapsed(); - Ok(RoomMessageEventContent::notice_plain(format!("completed in {elapsed:#?}"))) + self.write_str(&format!("completed in {elapsed:#?}")).await } #[inline(never)] diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 13bc8da4..545dcbca 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -1,49 +1,48 @@ use std::fmt::Write; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId, -}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::{admin_command, get_room_info}; #[admin_command] -pub(super) async fn disable_room(&self, room_id: Box) -> Result { +pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); - Ok(RoomMessageEventContent::text_plain("Room disabled.")) + self.write_str("Room disabled.").await } #[admin_command] -pub(super) async fn enable_room(&self, room_id: Box) -> Result { +pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, false); - Ok(RoomMessageEventContent::text_plain("Room enabled.")) + self.write_str("Room enabled.").await } #[admin_command] -pub(super) async fn incoming_federation(&self) -> Result { - let map = self - .services - .rooms - .event_handler - .federation_handletime - .read() - .expect("locked"); - let mut msg = format!("Handling {} incoming pdus:\n", map.len()); +pub(super) async fn incoming_federation(&self) -> Result { + let msg = { + let map = self + .services + .rooms + .event_handler + .federation_handletime + .read() + .expect("locked"); - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; - } + let mut msg = format!("Handling {} incoming pdus:\n", map.len()); + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; + } - Ok(RoomMessageEventContent::text_plain(&msg)) + msg + }; + + self.write_str(&msg).await } #[admin_command] -pub(super) async fn fetch_support_well_known( - &self, - server_name: Box, -) -> Result { +pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result { let response = self .services .client @@ -55,54 +54,44 @@ pub(super) async fn fetch_support_well_known( let text = response.text().await?; if text.is_empty() { - return Ok(RoomMessageEventContent::text_plain("Response text/body is empty.")); + return Err!("Response text/body is empty."); } if text.len() > 1500 { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Response text/body is over 1500 characters, assuming no support well-known.", - )); + ); } let json: serde_json::Value = match serde_json::from_str(&text) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; let pretty_json: String = match serde_json::to_string_pretty(&json) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "Got JSON response:\n\n```json\n{pretty_json}\n```" - ))) + self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```")) + .await } #[admin_command] -pub(super) async fn remote_user_in_rooms( - &self, - user_id: Box, -) -> Result { +pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result { if user_id.server_name() == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", - )); + ); } if !self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain( - "Remote user does not exist in our database.", - )); + return Err!("Remote user does not exist in our database.",); } let mut rooms: Vec<(OwnedRoomId, u64, String)> = self @@ -115,21 +104,19 @@ pub(super) async fn remote_user_in_rooms( .await; if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); + return Err!("User is not in any rooms."); } rooms.sort_by_key(|r| r.1); rooms.reverse(); - let output = format!( - "Rooms {user_id} shares with us ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) - .collect::>() - .join("\n") - ); + let num = rooms.len(); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::text_markdown(output)) + self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/federation/mod.rs b/src/admin/federation/mod.rs index 3adfd459..2c539adc 100644 --- a/src/admin/federation/mod.rs +++ b/src/admin/federation/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{RoomId, ServerName, UserId}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::admin_command_dispatch; @@ -14,12 +14,12 @@ pub(super) enum FederationCommand { /// - Disables incoming federation handling for a room. DisableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Enables incoming federation handling for a room again. EnableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Fetch `/.well-known/matrix/support` from the specified server @@ -32,11 +32,11 @@ pub(super) enum FederationCommand { /// moderation, and security inquiries. This command provides a way to /// easily fetch that information. FetchSupportWellKnown { - server_name: Box, + server_name: OwnedServerName, }, /// - Lists all the rooms we share/track with the specified *remote* user RemoteUserInRooms { - user_id: Box, + user_id: OwnedUserId, }, } diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 3d0a9473..7aed28db 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,26 +1,22 @@ use std::time::Duration; use conduwuit::{ - debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result, + Err, Result, debug, debug_info, debug_warn, error, info, trace, + utils::time::parse_timepoint_ago, warn, }; use conduwuit_service::media::Dim; -use ruma::{ - events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, - OwnedServerName, ServerName, -}; +use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::{admin_command, utils::parse_local_user_id}; #[admin_command] pub(super) async fn delete( &self, - mxc: Option>, - event_id: Option>, -) -> Result { + mxc: Option, + event_id: Option, +) -> Result { if event_id.is_some() && mxc.is_some() { - return Ok(RoomMessageEventContent::text_plain( - "Please specify either an MXC or an event ID, not both.", - )); + return Err!("Please specify either an MXC or an event ID, not both.",); } if let Some(mxc) = mxc { @@ -30,9 +26,7 @@ pub(super) async fn delete( .delete(&mxc.as_str().try_into()?) .await?; - return Ok(RoomMessageEventContent::text_plain( - "Deleted the MXC from our database and on our filesystem.", - )); + return Err!("Deleted the MXC from our database and on our filesystem.",); } if let Some(event_id) = event_id { @@ -41,110 +35,108 @@ pub(super) async fn delete( let mut mxc_urls = Vec::with_capacity(4); // parsing the PDU for any MXC URLs begins here - if let Ok(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id).await { - if let Some(content_key) = event_json.get("content") { - debug!("Event ID has \"content\"."); - let content_obj = content_key.as_object(); + match self.services.rooms.timeline.get_pdu_json(&event_id).await { + | Ok(event_json) => { + if let Some(content_key) = event_json.get("content") { + debug!("Event ID has \"content\"."); + let content_obj = content_key.as_object(); - if let Some(content) = content_obj { - // 1. attempts to parse the "url" key - debug!("Attempting to go into \"url\" key for main media file"); - if let Some(url) = content.get("url") { - debug!("Got a URL in the event ID {event_id}: {url}"); + if let Some(content) = content_obj { + // 1. attempts to parse the "url" key + debug!("Attempting to go into \"url\" key for main media file"); + if let Some(url) = content.get("url") { + debug!("Got a URL in the event ID {event_id}: {url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - info!( - "Found a URL in the event ID {event_id} but did not start with \ - mxc://, ignoring" - ); - } - } - - // 2. attempts to parse the "info" key - debug!("Attempting to go into \"info\" key for thumbnails"); - if let Some(info_key) = content.get("info") { - debug!("Event ID has \"info\"."); - let info_obj = info_key.as_object(); - - if let Some(info) = info_obj { - if let Some(thumbnail_url) = info.get("thumbnail_url") { - debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - - if thumbnail_url.to_string().starts_with("\"mxc://") { - debug!( - "Pushing thumbnail URL {thumbnail_url} to list of MXCs \ - to delete" - ); - let final_thumbnail_url = - thumbnail_url.to_string().replace('"', ""); - mxc_urls.push(final_thumbnail_url); - } else { - info!( - "Found a thumbnail URL in the event ID {event_id} but \ - did not start with mxc://, ignoring" - ); - } + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); } else { info!( - "No \"thumbnail_url\" key in \"info\" key, assuming no \ - thumbnails." + "Found a URL in the event ID {event_id} but did not start \ + with mxc://, ignoring" ); } } - } - // 3. attempts to parse the "file" key - debug!("Attempting to go into \"file\" key"); - if let Some(file_key) = content.get("file") { - debug!("Event ID has \"file\"."); - let file_obj = file_key.as_object(); + // 2. attempts to parse the "info" key + debug!("Attempting to go into \"info\" key for thumbnails"); + if let Some(info_key) = content.get("info") { + debug!("Event ID has \"info\"."); + let info_obj = info_key.as_object(); - if let Some(file) = file_obj { - if let Some(url) = file.get("url") { - debug!("Found url in file key: {url}"); + if let Some(info) = info_obj { + if let Some(thumbnail_url) = info.get("thumbnail_url") { + debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); + if thumbnail_url.to_string().starts_with("\"mxc://") { + debug!( + "Pushing thumbnail URL {thumbnail_url} to list of \ + MXCs to delete" + ); + let final_thumbnail_url = + thumbnail_url.to_string().replace('"', ""); + mxc_urls.push(final_thumbnail_url); + } else { + info!( + "Found a thumbnail URL in the event ID {event_id} \ + but did not start with mxc://, ignoring" + ); + } } else { info!( - "Found a URL in the event ID {event_id} but did not \ - start with mxc://, ignoring" + "No \"thumbnail_url\" key in \"info\" key, assuming no \ + thumbnails." ); } - } else { - info!("No \"url\" key in \"file\" key."); } } + + // 3. attempts to parse the "file" key + debug!("Attempting to go into \"file\" key"); + if let Some(file_key) = content.get("file") { + debug!("Event ID has \"file\"."); + let file_obj = file_key.as_object(); + + if let Some(file) = file_obj { + if let Some(url) = file.get("url") { + debug!("Found url in file key: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + warn!( + "Found a URL in the event ID {event_id} but did not \ + start with mxc://, ignoring" + ); + } + } else { + error!("No \"url\" key in \"file\" key."); + } + } + } + } else { + return Err!( + "Event ID does not have a \"content\" key or failed parsing the \ + event ID JSON.", + ); } } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key or failed parsing the event \ - ID JSON.", - )); + return Err!( + "Event ID does not have a \"content\" key, this is not a message or an \ + event type that contains media.", + ); } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key, this is not a message or an \ - event type that contains media.", - )); - } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); + }, + | _ => { + return Err!("Event ID does not exist or is not known to us.",); + }, } if mxc_urls.is_empty() { - info!("Parsed event ID {event_id} but did not contain any MXC URLs."); - return Ok(RoomMessageEventContent::text_plain( - "Parsed event ID but found no MXC URLs.", - )); + return Err!("Parsed event ID but found no MXC URLs.",); } let mut mxc_deletion_count: usize = 0; @@ -167,27 +159,27 @@ pub(super) async fn delete( } } - return Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \ - event ID {event_id}." - ))); + return self + .write_str(&format!( + "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \ + from event ID {event_id}." + )) + .await; } - Ok(RoomMessageEventContent::text_plain( + Err!( "Please specify either an MXC using --mxc or an event ID using --event-id of the \ - message containing an image. See --help for details.", - )) + message containing an image. See --help for details." + ) } #[admin_command] -pub(super) async fn delete_list(&self) -> Result { +pub(super) async fn delete_list(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let mut failed_parsed_mxcs: usize = 0; @@ -201,7 +193,6 @@ pub(super) async fn delete_list(&self) -> Result { .try_into() .inspect_err(|e| { debug_warn!("Failed to parse user-provided MXC URI: {e}"); - failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1); }) .ok() @@ -224,10 +215,11 @@ pub(super) async fn delete_list(&self) -> Result { } } - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \ and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.", - ))) + )) + .await } #[admin_command] @@ -237,11 +229,9 @@ pub(super) async fn delete_past_remote_media( before: bool, after: bool, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if before && after { - return Ok(RoomMessageEventContent::text_plain( - "Please only pick one argument, --before or --after.", - )); + return Err!("Please only pick one argument, --before or --after.",); } assert!(!(before && after), "--before and --after should not be specified together"); @@ -257,35 +247,28 @@ pub(super) async fn delete_past_remote_media( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn delete_all_from_user( - &self, - username: String, -) -> Result { +pub(super) async fn delete_all_from_user(&self, username: String) -> Result { let user_id = parse_local_user_id(self.services, &username)?; let deleted_count = self.services.media.delete_from_user(&user_id).await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] pub(super) async fn delete_all_from_server( &self, - server_name: Box, + server_name: OwnedServerName, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { - return Ok(RoomMessageEventContent::text_plain( - "This command only works for remote media by default.", - )); + return Err!("This command only works for remote media by default.",); } let Ok(all_mxcs) = self @@ -295,9 +278,7 @@ pub(super) async fn delete_all_from_server( .await .inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}")) else { - return Ok(RoomMessageEventContent::text_plain( - "Failed to get MXC URIs from our database", - )); + return Err!("Failed to get MXC URIs from our database",); }; let mut deleted_count: usize = 0; @@ -333,17 +314,16 @@ pub(super) async fn delete_all_from_server( } } - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { +pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let metadata = self.services.media.get_metadata(&mxc).await; - Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```"))) + self.write_str(&format!("```\n{metadata:#?}\n```")).await } #[admin_command] @@ -352,7 +332,7 @@ pub(super) async fn get_remote_file( mxc: OwnedMxcUri, server: Option, timeout: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let mut result = self @@ -365,8 +345,8 @@ pub(super) async fn get_remote_file( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } #[admin_command] @@ -377,7 +357,7 @@ pub(super) async fn get_remote_thumbnail( timeout: u32, width: u32, height: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let dim = Dim::new(width, height, None); @@ -391,6 +371,6 @@ pub(super) async fn get_remote_thumbnail( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index d212aab4..d1e6cd3a 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -1,8 +1,9 @@ +#![allow(rustdoc::broken_intra_doc_links)] mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName}; +use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::admin_command_dispatch; @@ -14,12 +15,12 @@ pub(super) enum MediaCommand { Delete { /// The MXC URL to delete #[arg(long)] - mxc: Option>, + mxc: Option, /// - The message event ID which contains the media and thumbnail MXC /// URLs #[arg(long)] - event_id: Option>, + event_id: Option, }, /// - Deletes a codeblock list of MXC URLs from our database and on the @@ -27,18 +28,18 @@ pub(super) enum MediaCommand { DeleteList, /// - Deletes all remote (and optionally local) media created before or - /// after \[duration] time using filesystem metadata first created at - /// date, or fallback to last modified date. This will always ignore - /// errors by default. + /// after [duration] time using filesystem metadata first created at date, + /// or fallback to last modified date. This will always ignore errors by + /// default. DeletePastRemoteMedia { /// - The relative time (e.g. 30s, 5m, 7d) within which to search duration: String, - /// - Only delete media created more recently than \[duration] ago + /// - Only delete media created before [duration] ago #[arg(long, short)] before: bool, - /// - Only delete media created after \[duration] ago + /// - Only delete media created after [duration] ago #[arg(long, short)] after: bool, @@ -56,7 +57,7 @@ pub(super) enum MediaCommand { /// - Deletes all remote media from the specified remote server. This will /// always ignore errors by default. DeleteAllFromServer { - server_name: Box, + server_name: OwnedServerName, /// Long argument to delete local media #[arg(long)] diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 695155e8..1f777fa9 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -4,7 +4,7 @@ #![allow(clippy::too_many_arguments)] pub(crate) mod admin; -pub(crate) mod command; +pub(crate) mod context; pub(crate) mod processor; mod tests; pub(crate) mod utils; @@ -23,13 +23,9 @@ extern crate conduwuit_api as api; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::Result; pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch}; -pub(crate) use crate::{ - command::Command, - utils::{escape_html, get_room_info}, -}; +pub(crate) use crate::{context::Context, utils::get_room_info}; pub(crate) const PAGE_SIZE: usize = 100; diff --git a/src/admin/processor.rs b/src/admin/processor.rs index eefcdcd6..8282a846 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -8,7 +8,7 @@ use std::{ use clap::{CommandFactory, Parser}; use conduwuit::{ - debug, error, + Error, Result, debug, error, log::{ capture, capture::Capture, @@ -16,24 +16,24 @@ use conduwuit::{ }, trace, utils::string::{collect_stream, common_prefix}, - warn, Error, Result, + warn, }; -use futures::{future::FutureExt, io::BufWriter, AsyncWriteExt}; +use futures::{AsyncWriteExt, future::FutureExt, io::BufWriter}; use ruma::{ + EventId, events::{ relation::InReplyTo, room::message::{Relation::Reply, RoomMessageEventContent}, }, - EventId, }; use service::{ - admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, Services, + admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, }; use tracing::Level; -use tracing_subscriber::{filter::LevelFilter, EnvFilter}; +use tracing_subscriber::{EnvFilter, filter::LevelFilter}; -use crate::{admin, admin::AdminCommand, Command}; +use crate::{admin, admin::AdminCommand, context::Context}; #[must_use] pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } @@ -58,7 +58,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce | Ok(parsed) => parsed, }; - let context = Command { + let context = Context { services: &services, body: &body, timer: SystemTime::now(), @@ -91,6 +91,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce } } +#[allow(clippy::result_large_err)] fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { let link = "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; @@ -100,9 +101,9 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { Err(reply(content, command.reply_id.as_deref())) } -// Parse and process a message from the admin room +/// Parse and process a message from the admin room async fn process( - context: &Command<'_>, + context: &Context<'_>, command: AdminCommand, args: &[String], ) -> (Result, String) { @@ -131,7 +132,7 @@ async fn process( (result, output) } -fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { +fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { let env_config = &context.services.server.config.admin_log_capture; let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| { warn!("admin_log_capture filter invalid: {e:?}"); @@ -164,7 +165,8 @@ fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { (capture, logs) } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object +#[allow(clippy::result_large_err)] fn parse<'a>( services: &Arc, input: &'a CommandInput, @@ -232,7 +234,7 @@ fn complete_command(mut cmd: clap::Command, line: &str) -> String { ret.join(" ") } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object fn parse_line(command_line: &str) -> Vec { let mut argv = command_line .split_whitespace() diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index b75d8234..228d2120 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; +use ruma::{OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -12,52 +12,51 @@ pub(crate) enum AccountDataCommand { /// - Returns all changes to the account data that happened after `since`. ChangesSince { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// UNIX timestamp since (u64) since: u64, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, /// - Searches the account data for a specific kind. AccountDataGet { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// Account data event type kind: String, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, } #[admin_command] async fn changes_since( &self, - user_id: Box, + user_id: OwnedUserId, since: u64, - room_id: Option>, -) -> Result { + room_id: Option, +) -> Result { let timer = tokio::time::Instant::now(); let results: Vec<_> = self .services .account_data - .changes_since(room_id.as_deref(), &user_id, since) + .changes_since(room_id.as_deref(), &user_id, since, None) .collect() .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } #[admin_command] async fn account_data_get( &self, - user_id: Box, + user_id: OwnedUserId, kind: String, - room_id: Option>, -) -> Result { + room_id: Option, +) -> Result { let timer = tokio::time::Instant::now(); let results = self .services @@ -66,7 +65,6 @@ async fn account_data_get( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index f9e1fd2c..28bf6451 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -1,7 +1,8 @@ use clap::Subcommand; use conduwuit::Result; +use futures::TryStreamExt; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/appservice.rs @@ -9,7 +10,7 @@ pub(crate) enum AppserviceCommand { /// - Gets the appservice registration info/details from the ID as a string GetRegistration { /// Appservice registration ID - appservice_id: Box, + appservice_id: String, }, /// - Gets all appservice registrations with their ID and registration info @@ -17,7 +18,7 @@ pub(crate) enum AppserviceCommand { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> }, | AppserviceCommand::All => { let timer = tokio::time::Instant::now(); - let results = services.appservice.all().await; + let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 1642f7cd..c8c1f512 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -1,8 +1,8 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::ServerName; +use ruma::OwnedServerName; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/globals.rs @@ -11,17 +11,17 @@ pub(crate) enum GlobalsCommand { CurrentCount, - LastCheckForUpdatesId, + LastCheckForAnnouncementsId, /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. SigningKeysFor { - origin: Box, + origin: OwnedServerName, }, } /// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -39,9 +39,12 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, - | GlobalsCommand::LastCheckForUpdatesId => { + | GlobalsCommand::LastCheckForAnnouncementsId => { let timer = tokio::time::Instant::now(); - let results = services.updates.last_check_for_updates_id().await; + let results = services + .announcements + .last_check_for_announcements_id() + .await; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 38272749..5b7ead4b 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,9 +1,9 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::UserId; +use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/presence.rs @@ -11,7 +11,7 @@ pub(crate) enum PresenceCommand { /// - Returns the latest presence event for the given user. GetPresence { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, /// - Iterator of the most recent presence updates that happened after the @@ -23,7 +23,7 @@ pub(crate) enum PresenceCommand { } /// All the getters and iterators in key_value/presence.rs -pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 34edf4db..0d0e6cc9 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -1,19 +1,19 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::UserId; +use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum PusherCommand { /// - Returns all the pushers for the user. GetPushers { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, } -pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 5a6006ec..0e248c65 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -1,17 +1,16 @@ -use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; +use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc}; use clap::Subcommand; use conduwuit::{ - apply, at, is_zero, + Err, Result, apply, at, is_zero, utils::{ - stream::{ReadyExt, TryIgnore, TryParallelExt}, + stream::{IterStream, ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, - IterStream, }, - Err, Result, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::events::room::message::RoomMessageEventContent; +use conduwuit_database::Map; +use conduwuit_service::Services; +use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; use tokio::time::Instant; use crate::{admin_command, admin_command_dispatch}; @@ -170,25 +169,21 @@ pub(super) async fn compact( into: Option, parallelism: Option, exhaustive: bool, -) -> Result { +) -> Result { use conduwuit_database::compact::Options; - let default_all_maps = map - .is_none() - .then(|| { - self.services - .db - .keys() - .map(Deref::deref) - .map(ToOwned::to_owned) - }) - .into_iter() - .flatten(); + let default_all_maps: Option<_> = map.is_none().then(|| { + self.services + .db + .keys() + .map(Deref::deref) + .map(ToOwned::to_owned) + }); let maps: Vec<_> = map .unwrap_or_default() .into_iter() - .chain(default_all_maps) + .chain(default_all_maps.into_iter().flatten()) .map(|map| self.services.db.get(&map)) .filter_map(Result::ok) .cloned() @@ -225,55 +220,26 @@ pub(super) async fn compact( let results = results.await; let query_time = timer.elapsed(); self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_count( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_count(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let count = maps - .iter() - .stream() + let count = with_maps_or(map.as_deref(), self.services) .then(|map| map.raw_count_prefix(&prefix)) .ready_fold(0_usize, usize::saturating_add) .await; let query_time = timer.elapsed(); self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_keys( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").boxed().await?; let map = self.services.db.get(map.as_str())?; @@ -287,39 +253,16 @@ pub(super) async fn raw_keys( .await?; let query_time = timer.elapsed(); - let out = format!("\n```\n\nQuery completed in {query_time:?}"); - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -332,39 +275,16 @@ pub(super) async fn raw_keys_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -373,40 +293,16 @@ pub(super) async fn raw_keys_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_vals_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -420,39 +316,16 @@ pub(super) async fn raw_vals_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_vals_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -462,19 +335,12 @@ pub(super) async fn raw_vals_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_iter( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_iter(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -490,9 +356,7 @@ pub(super) async fn raw_iter( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -501,7 +365,7 @@ pub(super) async fn raw_keys_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -515,9 +379,7 @@ pub(super) async fn raw_keys_from( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -526,7 +388,7 @@ pub(super) async fn raw_iter_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let result = map @@ -538,39 +400,53 @@ pub(super) async fn raw_iter_from( .await?; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_del(&self, map: String, key: String) -> Result { +pub(super) async fn raw_del(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); map.remove(&key); - let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Operation completed in {query_time:?}" - ))) + let query_time = timer.elapsed(); + self.write_str(&format!("Operation completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_get(&self, map: String, key: String) -> Result { +pub(super) async fn raw_get(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let handle = map.get(&key).await?; + let query_time = timer.elapsed(); let result = String::from_utf8_lossy(&handle); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_maps(&self) -> Result { +pub(super) async fn raw_maps(&self) -> Result { let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect(); - Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) + self.write_str(&format!("{list:#?}")).await +} + +fn with_maps_or<'a>( + map: Option<&'a str>, + services: &'a Services, +) -> impl Stream> + Send + 'a { + let default_all_maps = map + .is_none() + .then(|| services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + map.into_iter() + .chain(default_all_maps) + .map(|map| services.db.get(map)) + .filter_map(Result::ok) + .stream() } diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 08b5d171..4a39a40e 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::time, Result}; +use conduwuit::{Result, utils::time}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName}; +use ruma::OwnedServerName; use crate::{admin_command, admin_command_dispatch}; @@ -21,10 +21,7 @@ pub(crate) enum ResolverCommand { } #[admin_command] -async fn destinations_cache( - &self, - server_name: Option, -) -> Result { +async fn destinations_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedDest; writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; @@ -44,11 +41,11 @@ async fn destinations_cache( .await?; } - Ok(RoomMessageEventContent::notice_plain("")) + Ok(()) } #[admin_command] -async fn overrides_cache(&self, server_name: Option) -> Result { +async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?; @@ -70,5 +67,5 @@ async fn overrides_cache(&self, server_name: Option) -> Result, + alias: OwnedRoomAliasId, }, /// - Iterator of all our local room aliases for the room ID LocalAliasesForRoom { /// Full room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Iterator of all our local aliases in our database with their room IDs @@ -24,7 +24,7 @@ pub(crate) enum RoomAliasCommand { } /// All the getters and iterators in src/database/key_value/rooms/alias.rs -pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 71dadc99..c64cd173 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,85 +1,85 @@ use clap::Subcommand; -use conduwuit::{Error, Result}; +use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomStateCacheCommand { ServerInRoom { - server: Box, - room_id: Box, + server: OwnedServerName, + room_id: OwnedRoomId, }, RoomServers { - room_id: Box, + room_id: OwnedRoomId, }, ServerRooms { - server: Box, + server: OwnedServerName, }, RoomMembers { - room_id: Box, + room_id: OwnedRoomId, }, LocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, ActiveLocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, RoomJoinedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomInvitedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomUserOnceJoined { - room_id: Box, + room_id: OwnedRoomId, }, RoomMembersInvited { - room_id: Box, + room_id: OwnedRoomId, }, GetInviteCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, GetLeftCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, RoomsJoined { - user_id: Box, + user_id: OwnedUserId, }, RoomsLeft { - user_id: Box, + user_id: OwnedUserId, }, RoomsInvited { - user_id: Box, + user_id: OwnedUserId, }, InviteState { - user_id: Box, - room_id: Box, + user_id: OwnedUserId, + room_id: OwnedRoomId, }, } -pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result { let services = context.services; - let c = match subcommand { + match subcommand { | RoomStateCacheCommand::ServerInRoom { server, room_id } => { let timer = tokio::time::Instant::now(); let result = services @@ -89,9 +89,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomServers { room_id } => { let timer = tokio::time::Instant::now(); @@ -104,9 +106,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ServerRooms { server } => { let timer = tokio::time::Instant::now(); @@ -119,9 +123,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembers { room_id } => { let timer = tokio::time::Instant::now(); @@ -134,9 +140,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::LocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -149,9 +157,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -164,18 +174,22 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomJoinedCount { room_id } => { let timer = tokio::time::Instant::now(); let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomInvitedCount { room_id } => { let timer = tokio::time::Instant::now(); @@ -186,9 +200,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomUserOnceJoined { room_id } => { let timer = tokio::time::Instant::now(); @@ -201,9 +217,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembersInvited { room_id } => { let timer = tokio::time::Instant::now(); @@ -216,9 +234,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetInviteCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -229,9 +249,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetLeftCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -242,9 +264,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsJoined { user_id } => { let timer = tokio::time::Instant::now(); @@ -257,9 +281,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsInvited { user_id } => { let timer = tokio::time::Instant::now(); @@ -271,9 +297,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsLeft { user_id } => { let timer = tokio::time::Instant::now(); @@ -285,9 +313,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::InviteState { user_id, room_id } => { let timer = tokio::time::Instant::now(); @@ -298,13 +328,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, - }?; - - context.write_str(c.body()).await?; - - Ok(()) + } } diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 3fe653e3..0fd22ca7 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::stream::TryTools, PduCount, Result}; +use conduwuit::{PduCount, Result, utils::stream::TryTools}; use futures::TryStreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomOrAliasId}; +use ruma::OwnedRoomOrAliasId; use crate::{admin_command, admin_command_dispatch}; @@ -24,7 +24,7 @@ pub(crate) enum RoomTimelineCommand { } #[admin_command] -pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { +pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let result = self @@ -34,7 +34,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result, limit: Option, -) -> Result { +) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let from: Option = from.as_deref().map(str::parse).transpose()?; @@ -57,5 +57,5 @@ pub(super) async fn pdus( .try_collect() .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}"))) + self.write_str(&format!("{result:#?}")).await } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 8c6fb25f..8b1676bc 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,10 +1,10 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId}; +use ruma::{OwnedServerName, OwnedUserId}; use service::sending::Destination; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/sending.rs @@ -27,9 +27,9 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, @@ -49,30 +49,20 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, GetLatestEduCount { - server_name: Box, + server_name: OwnedServerName, }, } /// All the getters and iterators in key_value/sending.rs -pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result { - let c = reprocess(subcommand, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -/// All the getters and iterators in key_value/sending.rs -pub(super) async fn reprocess( - subcommand: SendingCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -82,9 +72,11 @@ pub(super) async fn reprocess( let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::QueuedRequests { appservice_id, @@ -97,19 +89,19 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -120,40 +112,42 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .queued_requests(&Destination::Federation(server_name.into())), + .queued_requests(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services .sending .db - .queued_requests(&Destination::Push(user_id.into(), push_key)) + .queued_requests(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let queued_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" + )) + .await }, | SendingCommand::ActiveRequestsFor { appservice_id, @@ -166,20 +160,20 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -190,49 +184,53 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .active_requests_for(&Destination::Federation(server_name.into())), + .active_requests_for(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services .sending .db - .active_requests_for(&Destination::Push(user_id.into(), push_key)) + .active_requests_for(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::GetLatestEduCount { server_name } => { let timer = tokio::time::Instant::now(); let results = services.sending.db.get_latest_educount(&server_name).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, } } diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index 7f0f3449..aa7c8666 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, OwnedEventId, OwnedRoomOrAliasId}; +use ruma::{OwnedEventId, OwnedRoomOrAliasId}; use crate::{admin_command, admin_command_dispatch}; @@ -18,10 +18,7 @@ pub(crate) enum ShortCommand { } #[admin_command] -pub(super) async fn short_event_id( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result { let shortid = self .services .rooms @@ -29,17 +26,14 @@ pub(super) async fn short_event_id( .get_shorteventid(&event_id) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } #[admin_command] -pub(super) async fn short_room_id( - &self, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 3715ac25..0f34d13f 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -1,9 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; -use ruma::{ - events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId, -}; +use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -99,11 +97,7 @@ pub(crate) enum UsersCommand { } #[admin_command] -async fn get_shared_rooms( - &self, - user_a: OwnedUserId, - user_b: OwnedUserId, -) -> Result { +async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self .services @@ -115,9 +109,8 @@ async fn get_shared_rooms( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -127,7 +120,7 @@ async fn get_backup_session( version: String, room_id: OwnedRoomId, session_id: String, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -136,9 +129,8 @@ async fn get_backup_session( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -147,7 +139,7 @@ async fn get_room_backups( user_id: OwnedUserId, version: String, room_id: OwnedRoomId, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -156,32 +148,22 @@ async fn get_room_backups( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_all_backups( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_all(&user_id, &version).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_backup_algorithm( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -190,16 +172,12 @@ async fn get_backup_algorithm( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup_version( - &self, - user_id: OwnedUserId, -) -> Result { +async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -208,36 +186,33 @@ async fn get_latest_backup_version( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { +async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_latest_backup(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users(&self) -> Result { +async fn iter_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec = self.services.users.stream().map(Into::into).collect().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users2(&self) -> Result { +async fn iter_users2(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self.services.users.stream().collect().await; let result: Vec<_> = result @@ -248,35 +223,32 @@ async fn iter_users2(&self) -> Result { let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -async fn count_users(&self) -> Result { +async fn count_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.count().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn password_hash(&self, user_id: OwnedUserId) -> Result { +async fn password_hash(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.password_hash(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn list_devices(&self, user_id: OwnedUserId) -> Result { +async fn list_devices(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -288,13 +260,12 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result Result { +async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -304,17 +275,12 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result Result { +async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let device = self .services @@ -323,28 +289,22 @@ async fn get_device_metadata( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { +async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let device = self.services.users.get_devicelist_version(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn count_one_time_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -353,17 +313,12 @@ async fn count_one_time_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_device_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -372,24 +327,22 @@ async fn get_device_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { +async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.get_user_signing_key(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_master_key(&self, user_id: OwnedUserId) -> Result { +async fn get_master_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -398,27 +351,21 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result Result { +async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services .users - .get_to_device_events(&user_id, &device_id) + .get_to_device_events(&user_id, &device_id, None, None) .collect::>() .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 9710cfc8..6b37ffe4 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -1,13 +1,11 @@ use std::fmt::Write; use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomId, -}; +use ruma::{OwnedRoomAliasId, OwnedRoomId}; -use crate::{escape_html, Command}; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomAliasCommand { @@ -18,7 +16,7 @@ pub(crate) enum RoomAliasCommand { force: bool, /// The room id to set the alias on - room_id: Box, + room_id: OwnedRoomId, /// The alias localpart to use (`alias`, not `#alias:servername.tld`) room_alias_localpart: String, @@ -40,21 +38,11 @@ pub(crate) enum RoomAliasCommand { /// - List aliases currently being used List { /// If set, only list the aliases for this room - room_id: Option>, + room_id: Option, }, } -pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomAliasCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; let server_user = &services.globals.server_user; @@ -66,13 +54,12 @@ pub(super) async fn reprocess( format!("#{}:{}", room_alias_localpart, services.globals.server_name()); let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, - | Err(err) => - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse alias: {err}" - ))), + | Err(err) => { + return Err!("Failed to parse alias: {err}"); + }, }; match command { - | RoomAliasCommand::Set { force, room_id, .. } => + | RoomAliasCommand::Set { force, room_id, .. } => { match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { | (true, Ok(id)) => { match services.rooms.alias.set_alias( @@ -80,59 +67,52 @@ pub(super) async fn reprocess( &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully overwrote alias (formerly {id})" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context + .write_str(&format!( + "Successfully overwrote alias (formerly {id})" + )) + .await, } }, - | (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!( + | (false, Ok(id)) => Err!( "Refusing to overwrite in use alias for {id}, use -f or --force to \ overwrite" - ))), + ), | (_, Err(_)) => { match services.rooms.alias.set_alias( &room_alias, &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain( - "Successfully set alias", - )), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => context.write_str("Successfully set alias").await, } }, - }, - | RoomAliasCommand::Remove { .. } => + } + }, + | RoomAliasCommand::Remove { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { + | Err(_) => Err!("Alias isn't in use."), | Ok(id) => match services .rooms .alias .remove_alias(&room_alias, server_user) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Removed alias from {id}" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context.write_str(&format!("Removed alias from {id}")).await, }, - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, - | RoomAliasCommand::Which { .. } => + } + }, + | RoomAliasCommand::Which { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { - | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( - "Alias resolves to {id}" - ))), - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, + | Err(_) => Err!("Alias isn't in use."), + | Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await, + } + }, | RoomAliasCommand::List { .. } => unreachable!(), } }, @@ -152,15 +132,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "
  • {}
  • ", escape_html(alias.as_ref())) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases for {room_id}:\n{plain_list}"); - let html = format!("Aliases for {room_id}:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await } else { let aliases = services .rooms @@ -179,23 +152,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases - .iter() - .fold(String::new(), |mut output, (alias, id)| { - writeln!( - output, - "
  • {} -> #{}:{}
  • ", - escape_html(alias.as_ref()), - escape_html(id), - server_name - ) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases:\n{plain_list}"); - let html = format!("Aliases:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await }, } } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index b58d04c5..81f36f15 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,8 +1,8 @@ -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; +use ruma::OwnedRoomId; -use crate::{admin_command, get_room_info, PAGE_SIZE}; +use crate::{PAGE_SIZE, admin_command, get_room_info}; #[admin_command] pub(super) async fn list_rooms( @@ -11,7 +11,7 @@ pub(super) async fn list_rooms( exclude_disabled: bool, exclude_banned: bool, no_details: bool, -) -> Result { +) -> Result { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); let mut rooms = self @@ -41,29 +41,28 @@ pub(super) async fn list_rooms( .collect::>(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + return Err!("No more rooms."); + } - let output_plain = format!( - "Rooms ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| if no_details { + let body = rooms + .iter() + .map(|(id, members, name)| { + if no_details { format!("{id}") } else { format!("{id}\tMembers: {members}\tName: {name}") - }) - .collect::>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] -pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { let result = self.services.rooms.metadata.exists(&room_id).await; - Ok(RoomMessageEventContent::notice_markdown(format!("{result}"))) + self.write_str(&format!("{result}")).await } diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 791b9204..a6be9a15 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,22 +1,22 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::OwnedRoomId; -use crate::{get_room_info, Command, PAGE_SIZE}; +use crate::{Context, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { /// - Publish a room to the room directory Publish { /// The room id of the room to publish - room_id: Box, + room_id: OwnedRoomId, }, /// - Unpublish a room to the room directory Unpublish { /// The room id of the room to unpublish - room_id: Box, + room_id: OwnedRoomId, }, /// - List rooms that are published @@ -25,25 +25,16 @@ pub(crate) enum RoomDirectoryCommand { }, } -pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomDirectoryCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result { let services = context.services; match command { | RoomDirectoryCommand::Publish { room_id } => { services.rooms.directory.set_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room published")) + context.write_str("Room published").await }, | RoomDirectoryCommand::Unpublish { room_id } => { services.rooms.directory.set_not_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room unpublished")) + context.write_str("Room unpublished").await }, | RoomDirectoryCommand::List { page } => { // TODO: i know there's a way to do this with clap, but i can't seem to find it @@ -66,20 +57,18 @@ pub(super) async fn reprocess( .collect(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + return Err!("No more rooms."); + } - let output = format!( - "Rooms (page {page}):\n```\n{}\n```", - rooms - .iter() - .map(|(id, members, name)| format!( - "{id} | Members: {members} | Name: {name}" - )) - .collect::>() - .join("\n") - ); - Ok(RoomMessageEventContent::text_markdown(output)) + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); + + context + .write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",)) + .await }, } } diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 34abf8a9..1278e820 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::ReadyExt, Result}; +use conduwuit::{Err, Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::OwnedRoomId; use crate::{admin_command, admin_command_dispatch}; @@ -10,7 +10,7 @@ use crate::{admin_command, admin_command_dispatch}; pub(crate) enum RoomInfoCommand { /// - List joined members in a room ListJoinedMembers { - room_id: Box, + room_id: OwnedRoomId, /// Lists only our local users in the specified room #[arg(long)] @@ -22,16 +22,12 @@ pub(crate) enum RoomInfoCommand { /// Room topics can be huge, so this is in its /// own separate command ViewRoomTopic { - room_id: Box, + room_id: OwnedRoomId, }, } #[admin_command] -async fn list_joined_members( - &self, - room_id: Box, - local_only: bool, -) -> Result { +async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result { let room_name = self .services .rooms @@ -64,22 +60,19 @@ async fn list_joined_members( .collect() .await; - let output_plain = format!( - "{} Members in Room \"{}\":\n```\n{}\n```", - member_info.len(), - room_name, - member_info - .into_iter() - .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) - .collect::>() - .join("\n") - ); + let num = member_info.len(); + let body = member_info + .into_iter() + .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",)) + .await } #[admin_command] -async fn view_room_topic(&self, room_id: Box) -> Result { +async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { let Ok(room_topic) = self .services .rooms @@ -87,10 +80,9 @@ async fn view_room_topic(&self, room_id: Box) -> Result, + room: OwnedRoomOrAliasId, }, /// - Bans a list of rooms (room IDs and room aliases) from a newline - /// delimited codeblock similar to `user deactivate-all` - BanListOfRooms { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - }, + /// delimited codeblock similar to `user deactivate-all`. Applies the same + /// steps as ban-room + BanListOfRooms, /// - Unbans a room to allow local users to join again - /// - /// To re-enable incoming federation of the room, use --enable-federation UnbanRoom { - #[arg(long)] - /// Enables incoming federation of the room after unbanning - enable_federation: bool, - /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` - room: Box, + room: OwnedRoomOrAliasId, }, /// - List of all rooms we have banned @@ -77,31 +46,27 @@ pub(crate) enum RoomModerationCommand { } #[admin_command] -async fn ban_room( - &self, - force: bool, - disable_federation: bool, - room: Box, -) -> Result { +async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) { - return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room.")); + return Err!("Not allowed to ban the admin room."); } } let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => - return Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => { + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ); + }, }; debug!("Room specified is a room ID, banning room ID"); @@ -111,12 +76,13 @@ async fn ban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => - return Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => { + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ); + }, }; debug!( @@ -124,153 +90,111 @@ async fn ban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room_id}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::notice_plain(format!( - "Failed to resolve room alias {room_alias} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for {room_id}" + ); + room_id + }, + | Err(e) => { + return Err!( + "Failed to resolve room alias {room_alias} to a room ID: {e}" + ); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, true); room_id } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Room specified is not a room ID or room alias. Please note that this requires a \ full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`)", - )); + ); }; - debug!("Making all users leave the room {}", &room); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all \ - errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {} in room {}", &local_user, &room_id); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {} leave room {} during room banning: \ - {}", - &local_user, &room_id, e - ); - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room banning \ - (room is still banned but not removing any more users): {}\nIf you would \ - like to ignore errors, use --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } - // remove any local aliases, ignore errors - for local_alias in &self - .services + self.services .rooms .alias .local_aliases_for_room(&room_id) .map(ToOwned::to_owned) - .collect::>() - .await - { - _ = self - .services - .rooms - .alias - .remove_alias(local_alias, &self.services.globals.server_user) - .await; - } + .for_each(|local_alias| async move { + self.services + .rooms + .alias + .remove_alias(&local_alias, &self.services.globals.server_user) + .await + .ok(); + }) + .await; - // unpublish from room directory, ignore errors + // unpublish from room directory self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - return Ok(RoomMessageEventContent::text_plain( - "Room banned, removed all our local users, and disabled incoming federation with \ - room.", - )); - } + self.services.rooms.metadata.disable_room(&room_id, true); - Ok(RoomMessageEventContent::text_plain( - "Room banned and removed all our local users, use `!admin federation disable-room` to \ - stop receiving new inbound federation events as well if needed.", - )) + self.write_str( + "Room banned, removed all our local users, and disabled incoming federation with room.", + ) + .await } #[admin_command] -async fn ban_list_of_rooms( - &self, - force: bool, - disable_federation: bool, -) -> Result { +async fn ban_list_of_rooms(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let rooms_s = self @@ -290,7 +214,7 @@ async fn ban_list_of_rooms( if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) { - info!("User specified admin room in bulk ban list, ignoring"); + warn!("User specified admin room in bulk ban list, ignoring"); continue; } } @@ -299,19 +223,12 @@ async fn ban_list_of_rooms( let room_id = match RoomId::parse(room_alias_or_id) { | Ok(room_id) => room_id, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force banning - warn!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + // ignore rooms we failed to parse + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, }; @@ -321,87 +238,65 @@ async fn ban_list_of_rooms( if room_alias_or_id.is_room_alias_id() { match RoomAliasId::parse(room_alias_or_id) { | Ok(room_alias) => { - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, \ - attempting to fetch room ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, \ + attempting to fetch room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room}", - ); - room_id - }, - | Err(e) => { - // don't fail if force blocking - if force { + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for \ + {room}", + ); + room_id + }, + | Err(e) => { warn!( "Failed to resolve room alias {room} to a room \ ID: {e}" ); continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: \ - {e}" - ))); - }, - } + }, + } + }, }; room_ids.push(room_id); }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, } } }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error \ - and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the list and try \ - again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ + logging here: {e}" + ); + continue; }, } } @@ -412,56 +307,27 @@ async fn ban_list_of_rooms( debug!("Banned {room_id} successfully"); room_ban_count = room_ban_count.saturating_add(1); - debug!("Making all users leave the room {}", &room_id); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring \ - all errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {local_user} in room {room_id}"); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {local_user} leave room {room_id} \ - during bulk room banning: {e}", - ); - - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room \ - banning (room is still banned but not removing any more users and not \ - banning any more rooms): {}\nIf you would like to ignore errors, use \ - --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } // remove any local aliases, ignore errors @@ -483,38 +349,28 @@ async fn ban_list_of_rooms( // unpublish from room directory, ignore errors self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - } + self.services.rooms.metadata.disable_room(&room_id, true); } - if disable_federation { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, \ - and disabled incoming federation with the room." - ))) - } else { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms and evicted all users." - ))) - } + self.write_str(&format!( + "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ + disabled incoming federation with the room." + )) + .await } #[admin_command] -async fn unban_room( - &self, - enable_federation: bool, - room: Box, -) -> Result { +async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => - return Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => { + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ); + }, }; debug!("Room specified is a room ID, unbanning room ID"); @@ -524,12 +380,13 @@ async fn unban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => - return Ok(RoomMessageEventContent::text_plain(format!( + | Err(e) => { + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ); + }, }; debug!( @@ -537,67 +394,60 @@ async fn unban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for room {room}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for room {room}" + ); + room_id + }, + | Err(e) => { + return Err!("Failed to resolve room alias {room} to a room ID: {e}"); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, false); room_id } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Room specified is not a room ID or room alias. Please note that this requires a \ full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`)", - )); + ); }; - if enable_federation { - self.services.rooms.metadata.disable_room(&room_id, false); - return Ok(RoomMessageEventContent::text_plain("Room unbanned.")); - } - - Ok(RoomMessageEventContent::text_plain( - "Room unbanned, you may need to re-enable federation with the room using enable-room if \ - this is a remote room to make it fully functional.", - )) + self.services.rooms.metadata.disable_room(&room_id, false); + self.write_str("Room unbanned and federation re-enabled.") + .await } #[admin_command] -async fn list_banned_rooms(&self, no_details: bool) -> Result { +async fn list_banned_rooms(&self, no_details: bool) -> Result { let room_ids: Vec = self .services .rooms @@ -608,7 +458,7 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result Result>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 910dce6e..6027a9eb 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,12 +1,16 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Err, Result}; -use ruma::events::room::message::RoomMessageEventContent; +use conduwuit::{ + Err, Result, info, + utils::{stream::IterStream, time}, + warn, +}; +use futures::TryStreamExt; use crate::admin_command; #[admin_command] -pub(super) async fn uptime(&self) -> Result { +pub(super) async fn uptime(&self) -> Result { let elapsed = self .services .server @@ -15,47 +19,36 @@ pub(super) async fn uptime(&self) -> Result { .expect("standard duration"); let result = time::pretty(elapsed); - Ok(RoomMessageEventContent::notice_plain(format!("{result}."))) + self.write_str(&format!("{result}.")).await } #[admin_command] -pub(super) async fn show_config(&self) -> Result { - // Construct and send the response - Ok(RoomMessageEventContent::text_markdown(format!( - "{}", - *self.services.server.config - ))) +pub(super) async fn show_config(&self) -> Result { + self.write_str(&format!("{}", *self.services.server.config)) + .await } #[admin_command] -pub(super) async fn reload_config( - &self, - path: Option, -) -> Result { +pub(super) async fn reload_config(&self, path: Option) -> Result { let path = path.as_deref().into_iter(); self.services.config.reload(path)?; - Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) + self.write_str("Successfully reconfigured.").await } #[admin_command] -pub(super) async fn list_features( - &self, - available: bool, - enabled: bool, - comma: bool, -) -> Result { +pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result { let delim = if comma { "," } else { " " }; if enabled && !available { let features = info::rustc::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } if available && !enabled { let features = info::cargo::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } let mut features = String::new(); @@ -68,84 +61,76 @@ pub(super) async fn list_features( writeln!(features, "{emoji} {feature} {remark}")?; } - Ok(RoomMessageEventContent::text_markdown(features)) + self.write_str(&features).await } #[admin_command] -pub(super) async fn memory_usage(&self) -> Result { +pub(super) async fn memory_usage(&self) -> Result { let services_usage = self.services.memory_usage().await?; let database_usage = self.services.db.db.memory_usage()?; let allocator_usage = conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}", - ))) + )) + .await } #[admin_command] -pub(super) async fn clear_caches(&self) -> Result { +pub(super) async fn clear_caches(&self) -> Result { self.services.clear_cache().await; - Ok(RoomMessageEventContent::text_plain("Done.")) + self.write_str("Done.").await } #[admin_command] -pub(super) async fn list_backups(&self) -> Result { - let result = self.services.globals.db.backup_list()?; - - if result.is_empty() { - Ok(RoomMessageEventContent::text_plain("No backups found.")) - } else { - Ok(RoomMessageEventContent::text_plain(result)) - } +pub(super) async fn list_backups(&self) -> Result { + self.services + .db + .db + .backup_list()? + .try_stream() + .try_for_each(|result| write!(self, "{result}")) + .await } #[admin_command] -pub(super) async fn backup_database(&self) -> Result { - let globals = Arc::clone(&self.services.globals); - let mut result = self +pub(super) async fn backup_database(&self) -> Result { + let db = Arc::clone(&self.services.db); + let result = self .services .server .runtime() - .spawn_blocking(move || match globals.db.backup() { - | Ok(()) => String::new(), - | Err(e) => e.to_string(), + .spawn_blocking(move || match db.db.backup() { + | Ok(()) => "Done".to_owned(), + | Err(e) => format!("Failed: {e}"), }) .await?; - if result.is_empty() { - result = self.services.globals.db.backup_list()?; - } - - Ok(RoomMessageEventContent::notice_markdown(result)) + let count = self.services.db.db.backup_count()?; + self.write_str(&format!("{result}. Currently have {count} backups.")) + .await } #[admin_command] -pub(super) async fn list_database_files(&self) -> Result { - let result = self.services.globals.db.file_list()?; - - Ok(RoomMessageEventContent::notice_markdown(result)) -} - -#[admin_command] -pub(super) async fn admin_notice(&self, message: Vec) -> Result { +pub(super) async fn admin_notice(&self, message: Vec) -> Result { let message = message.join(" "); self.services.admin.send_text(&message).await; - Ok(RoomMessageEventContent::notice_plain("Notice was sent to #admins")) + self.write_str("Notice was sent to #admins").await } #[admin_command] -pub(super) async fn reload_mods(&self) -> Result { +pub(super) async fn reload_mods(&self) -> Result { self.services.server.reload()?; - Ok(RoomMessageEventContent::notice_plain("Reloading server...")) + self.write_str("Reloading server...").await } #[admin_command] #[cfg(unix)] -pub(super) async fn restart(&self, force: bool) -> Result { +pub(super) async fn restart(&self, force: bool) -> Result { use conduwuit::utils::sys::current_exe_deleted; if !force && current_exe_deleted() { @@ -157,13 +142,13 @@ pub(super) async fn restart(&self, force: bool) -> Result Result { +pub(super) async fn shutdown(&self) -> Result { warn!("shutdown command"); self.services.server.shutdown()?; - Ok(RoomMessageEventContent::notice_plain("Shutting down server...")) + self.write_str("Shutting down server...").await } diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 3f3d6c5e..6b99e5de 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -36,7 +36,7 @@ pub(super) enum ServerCommand { /// - Print database memory usage statistics MemoryUsage, - /// - Clears all of Conduwuit's caches + /// - Clears all of Continuwuity's caches ClearCaches, /// - Performs an online backup of the database (only available for RocksDB @@ -46,9 +46,6 @@ pub(super) enum ServerCommand { /// - List database backups ListBackups, - /// - List database files - ListDatabaseFiles, - /// - Send a message to the admin room. AdminNotice { message: Vec, diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 64767a36..e5e481e5 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,23 +2,23 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - debug_warn, error, info, is_equal_to, + Err, Result, debug, debug_warn, error, info, is_equal_to, + matrix::pdu::PduBuilder, utils::{self, ReadyExt}, - warn, PduBuilder, Result, + warn, }; use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ + OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, events::{ + RoomAccountDataEventType, StateEventType, room::{ - message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, redaction::RoomRedactionEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, StateEventType, }, - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, }; use crate::{ @@ -30,7 +30,7 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25; const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin."; #[admin_command] -pub(super) async fn list_users(&self) -> Result { +pub(super) async fn list_users(&self) -> Result { let users: Vec<_> = self .services .users @@ -43,30 +43,22 @@ pub(super) async fn list_users(&self) -> Result { plain_msg += users.join("\n").as_str(); plain_msg += "\n```"; - self.write_str(plain_msg.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&plain_msg).await } #[admin_command] -pub(super) async fn create_user( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn create_user(&self, username: String, password: Option) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &username)?; - if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!( - "Userid {user_id} already exists" - ))); + if let Err(e) = user_id.validate_strict() { + if self.services.config.emergency_password.is_none() { + return Err!("Username {user_id} contains disallowed characters or spaces: {e}"); + } } - if user_id.is_historical() { - return Ok(RoomMessageEventContent::text_plain(format!( - "User ID {user_id} does not conform to new Matrix identifier spec" - ))); + if self.services.users.exists(&user_id).await { + return Err!("User {user_id} already exists"); } let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -88,8 +80,7 @@ pub(super) async fn create_user( .new_user_displayname_suffix .is_empty() { - write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix) - .expect("should be able to write to string buffer"); + write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?; } self.services @@ -109,15 +100,17 @@ pub(super) async fn create_user( content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }) - .expect("to json value always works"), + })?, ) .await?; if !self.services.server.config.auto_join_rooms.is_empty() { for room in &self.services.server.config.auto_join_rooms { let Ok(room_id) = self.services.rooms.alias.resolve(room).await else { - error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); + error!( + %user_id, + "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping" + ); continue; }; @@ -153,20 +146,19 @@ pub(super) async fn create_user( info!("Automatically joined room {room} for user {user_id}"); }, | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to automatically join room {room} for user {user_id}: \ - {e}" - ))) - .await - .ok(); // don't return this error so we don't fail registrations error!( "Failed to automatically join room {room} for user {user_id}: {e}" ); + self.services + .admin + .send_text(&format!( + "Failed to automatically join room {room} for user {user_id}: \ + {e}" + )) + .await; }, - }; + } } } } @@ -185,31 +177,24 @@ pub(super) async fn create_user( .is_ok_and(is_equal_to!(1)) { self.services.admin.make_user_admin(&user_id).await?; - warn!("Granting {user_id} admin privileges as the first user"); } + } else { + debug!("create_user admin command called without an admin room being available"); } - // Inhibit login does not work for guests - Ok(RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: `{password}`" - ))) + self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`")) + .await } #[admin_command] -pub(super) async fn deactivate( - &self, - no_leave_rooms: bool, - user_id: String, -) -> Result { +pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; // don't deactivate the server service account if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to deactivate the server service account.", - )); + return Err!("Not allowed to deactivate the server service account.",); } self.services.users.deactivate_account(&user_id).await?; @@ -217,11 +202,8 @@ pub(super) async fn deactivate( if !no_leave_rooms { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Making {user_id} leave all rooms after deactivation..." - ))) - .await - .ok(); + .send_text(&format!("Making {user_id} leave all rooms after deactivation...")) + .await; let all_joined_rooms: Vec = self .services @@ -238,24 +220,19 @@ pub(super) async fn deactivate( leave_all_rooms(self.services, &user_id).await; } - Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} has been deactivated" - ))) + self.write_str(&format!("User {user_id} has been deactivated")) + .await } #[admin_command] -pub(super) async fn reset_password( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn reset_password(&self, username: String, password: Option) -> Result { let user_id = parse_local_user_id(self.services, &username)?; if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to set the password for the server account. Please use the emergency \ password config option.", - )); + ); } let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -265,28 +242,20 @@ pub(super) async fn reset_password( .users .set_password(&user_id, Some(new_password.as_str())) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {user_id}: `{new_password}`" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {user_id}: {e}" - ))), + | Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"), + | Ok(()) => + write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"), } + .await } #[admin_command] -pub(super) async fn deactivate_all( - &self, - no_leave_rooms: bool, - force: bool, -) -> Result { +pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let usernames = self @@ -300,15 +269,23 @@ pub(super) async fn deactivate_all( for username in usernames { match parse_active_local_user_id(self.services, username).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + + continue; + }, | Ok(user_id) => { if self.services.users.is_admin(&user_id).await && !force { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is an admin and --force is not set, skipping over" - ))) - .await - .ok(); + )) + .await; + admins.push(username); continue; } @@ -317,26 +294,16 @@ pub(super) async fn deactivate_all( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } user_ids.push(user_id); }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); - continue; - }, } } @@ -344,6 +311,12 @@ pub(super) async fn deactivate_all( for user_id in user_ids { match self.services.users.deactivate_account(&user_id).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("Failed deactivating user: {e}")) + .await; + }, | Ok(()) => { deactivation_count = deactivation_count.saturating_add(1); if !no_leave_rooms { @@ -364,33 +337,24 @@ pub(super) async fn deactivate_all( leave_all_rooms(self.services, &user_id).await; } }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed deactivating user: {e}" - ))) - .await - .ok(); - }, } } if admins.is_empty() { - Ok(RoomMessageEventContent::text_plain(format!( - "Deactivated {deactivation_count} accounts." - ))) + write!(self, "Deactivated {deactivation_count} accounts.") } else { - Ok(RoomMessageEventContent::text_plain(format!( + write!( + self, "Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \ --force to deactivate admin accounts", admins.join(", ") - ))) + ) } + .await } #[admin_command] -pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { +pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; @@ -404,23 +368,20 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result>() - .join("\n") - ); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] @@ -428,27 +389,23 @@ pub(super) async fn force_join_list_of_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all specified local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -465,7 +422,7 @@ pub(super) async fn force_join_list_of_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -485,9 +442,7 @@ pub(super) async fn force_join_list_of_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let usernames = self @@ -505,11 +460,11 @@ pub(super) async fn force_join_list_of_local_users( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } @@ -518,11 +473,9 @@ pub(super) async fn force_join_list_of_local_users( | Err(e) => { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + continue; }, } @@ -550,13 +503,14 @@ pub(super) async fn force_join_list_of_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -564,18 +518,16 @@ pub(super) async fn force_join_all_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -592,7 +544,7 @@ pub(super) async fn force_join_all_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -612,9 +564,7 @@ pub(super) async fn force_join_all_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let mut failed_joins: usize = 0; @@ -646,13 +596,14 @@ pub(super) async fn force_join_all_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -660,7 +611,7 @@ pub(super) async fn force_join_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let (room_id, servers) = self .services @@ -676,9 +627,8 @@ pub(super) async fn force_join_room( join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been joined to {room_id}.", - ))) + self.write_str(&format!("{user_id} has been joined to {room_id}.",)) + .await } #[admin_command] @@ -686,7 +636,7 @@ pub(super) async fn force_leave_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -694,19 +644,25 @@ pub(super) async fn force_leave_room( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + + if !self + .services + .rooms + .state_cache + .is_joined(&user_id, &room_id) + .await + { + return Err!("{user_id} is not joined in the room"); + } + leave_room(self.services, &user_id, &room_id, None).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has left {room_id}.", - ))) + self.write_str(&format!("{user_id} has left {room_id}.",)) + .await } #[admin_command] -pub(super) async fn force_demote( - &self, - user_id: String, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -717,15 +673,11 @@ pub(super) async fn force_demote( let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - let room_power_levels = self + let room_power_levels: Option = self .services .rooms .state_accessor - .room_state_get_content::( - &room_id, - &StateEventType::RoomPowerLevels, - "", - ) + .room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "") .await .ok(); @@ -743,9 +695,7 @@ pub(super) async fn force_demote( .is_ok_and(|event| event.sender == user_id); if !user_can_demote_self { - return Ok(RoomMessageEventContent::notice_markdown( - "User is not allowed to modify their own power levels in the room.", - )); + return Err!("User is not allowed to modify their own power levels in the room.",); } let mut power_levels_content = room_power_levels.unwrap_or_default(); @@ -763,34 +713,34 @@ pub(super) async fn force_demote( ) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "User {user_id} demoted themselves to the room default power level in {room_id} - \ {event_id}" - ))) + )) + .await } #[admin_command] -pub(super) async fn make_user_admin(&self, user_id: String) -> Result { +pub(super) async fn make_user_admin(&self, user_id: String) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; - assert!( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + self.services.admin.make_user_admin(&user_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been granted admin privileges.", - ))) + self.write_str(&format!("{user_id} has been granted admin privileges.",)) + .await } #[admin_command] pub(super) async fn put_room_tag( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -817,18 +767,19 @@ pub(super) async fn put_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id} with tag {tag}" - ))) + )) + .await } #[admin_command] pub(super) async fn delete_room_tag( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -852,18 +803,15 @@ pub(super) async fn delete_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id}, deleting room \ tag {tag}" - ))) + )) + .await } #[admin_command] -pub(super) async fn get_room_tags( - &self, - user_id: String, - room_id: Box, -) -> Result { +pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let tags_event = self @@ -875,17 +823,12 @@ pub(super) async fn get_room_tags( content: TagEventContent { tags: BTreeMap::new() }, }); - Ok(RoomMessageEventContent::notice_markdown(format!( - "```\n{:#?}\n```", - tags_event.content.tags - ))) + self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags)) + .await } #[admin_command] -pub(super) async fn redact_event( - &self, - event_id: Box, -) -> Result { +pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self .services .rooms @@ -893,20 +836,18 @@ pub(super) async fn redact_event( .get_non_outlier_pdu(&event_id) .await else { - return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database.")); + return Err!("Event does not exist in our database."); }; if event.is_redacted() { - return Ok(RoomMessageEventContent::text_plain("Event is already redacted.")); + return Err!("Event is already redacted."); } let room_id = event.room_id; let sender_user = event.sender; if !self.services.globals.user_is_local(&sender_user) { - return Ok(RoomMessageEventContent::text_plain( - "This command only works on local users.", - )); + return Err!("This command only works on local users."); } let reason = format!( @@ -935,9 +876,8 @@ pub(super) async fn redact_event( .await? }; - let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}"); - - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!( + "Successfully redacted event. Redaction event ID: {redaction_event_id}" + )) + .await } diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index 1494ea8f..e789376a 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId}; use crate::admin_command_dispatch; @@ -102,21 +102,21 @@ pub(super) enum UserCommand { /// room's internal ID, and the tag name `m.server_notice`. PutRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Deletes the room tag for the specified user and room ID DeleteRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Gets all the room tags for the specified user and room ID GetRoomTags { user_id: String, - room_id: Box, + room_id: OwnedRoomId, }, /// - Attempts to forcefully redact the specified event ID from the sender @@ -124,7 +124,7 @@ pub(super) enum UserCommand { /// /// This is only valid for local users RedactEvent { - event_id: Box, + event_id: OwnedEventId, }, /// - Force joins a specified list of local users to join the specified diff --git a/src/admin/utils.rs b/src/admin/utils.rs index eba33fba..ea9696b2 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,4 +1,6 @@ -use conduwuit_core::{err, Err, Result}; +#![allow(dead_code)] + +use conduwuit_core::{Err, Result, err}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 385e786f..15ada812 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,31 +17,60 @@ crate-type = [ ] [features] -element_hacks = [] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "reqwest/brotli", ] -zstd_compression = [ - "reqwest/zstd", +element_hacks = [ + "conduwuit-service/element_hacks", ] gzip_compression = [ + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", "reqwest/gzip", ] -brotli_compression = [ - "reqwest/brotli", +io_uring = [ + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] +zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "reqwest/zstd", ] [dependencies] +async-trait.workspace = true axum-client-ip.workspace = true axum-extra.workspace = true axum.workspace = true base64.workspace = true bytes.workspace = true conduwuit-core.workspace = true -conduwuit-database.workspace = true conduwuit-service.workspace = true const-str.workspace = true futures.workspace = true diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cb25b276..32f2530c 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,34 +3,38 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result, + Err, Error, Result, debug_info, err, error, info, is_equal_to, + matrix::pdu::PduBuilder, + utils, + utils::{ReadyExt, stream::BroadbandExt}, + warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ + OwnedRoomId, UserId, api::client::{ account::{ - change_password, check_registration_token_validity, deactivate, get_3pids, - get_username_availability, + ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, + deactivate, get_3pids, get_username_availability, register::{self, LoginType}, request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, - whoami, ThirdPartyIdRemovalStatus, + whoami, }, - error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ + GlobalAccountDataEventType, StateEventType, room::{ message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - GlobalAccountDataEventType, StateEventType, }, - push, OwnedRoomId, UserId, + push, }; -use service::Services; -use super::{join_room_by_id_helper, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; @@ -59,6 +63,14 @@ pub(crate) async fn get_register_available_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services + .globals + .forbidden_usernames() + .is_match(&body.username) + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { body.username.clone() @@ -67,30 +79,45 @@ pub(crate) async fn get_register_available_route( }; // Validate user id - let user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + match UserId::parse_with_server_name(&body_username, services.globals.server_name()) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, we + // should follow synapse's behaviour on not allowing things like spaces + // and UTF-8 characters in usernames + if !is_matrix_appservice_irc { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or spaces: \ + {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; // Check if username is creative enough if services.users.exists(&user_id).await { - return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); + return Err!(Request(UserInUse("User ID is not available."))); } - if services - .globals - .forbidden_usernames() - .is_match(user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + if let Some(ref info) = body.appservice_info { + if !info.is_user_match(&user_id) { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); + } } - // TODO add check for appservice namespaces + if services.appservice.is_exclusive_user_id(&user_id).await { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); + } - // If no if check is true we have an username that's available to be used. Ok(get_username_availability::v3::Response { available: true }) } @@ -118,20 +145,31 @@ pub(crate) async fn register_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - if !services.globals.allow_registration() && body.appservice_info.is_none() { - info!( - "Registration disabled and request not from known appservice, rejecting \ - registration attempt for username \"{}\"", - body.username.as_deref().unwrap_or("") - ); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled.")); + let is_guest = body.kind == RegistrationKind::Guest; + let emergency_mode_enabled = services.config.emergency_password.is_some(); + + if !services.config.allow_registration && body.appservice_info.is_none() { + match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { + | (Some(username), Some(device_display_name)) => { + info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (Some(username), _) => { + info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled"); + }, + | (_, Some(device_display_name)) => { + info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (None, _) => { + info!(%is_guest, "Rejecting registration attempt as registration is disabled"); + }, + } + + return Err!(Request(Forbidden("Registration has been disabled."))); } - let is_guest = body.kind == RegistrationKind::Guest; - if is_guest - && (!services.globals.allow_guest_registration() - || (services.globals.allow_registration() + && (!services.config.allow_guest_registration + || (services.config.allow_registration && services.globals.registration_token.is_some())) { info!( @@ -139,10 +177,7 @@ pub(crate) async fn register_route( rejecting guest registration attempt, initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::GuestAccessForbidden, - "Guest registration is disabled.", - )); + return Err!(Request(GuestAccessForbidden("Guest registration is disabled."))); } // forbid guests from registering if there is not a real admin user yet. give @@ -153,13 +188,10 @@ pub(crate) async fn register_route( rejecting registration. Guest's initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Registration temporarily disabled.", - )); + return Err!(Request(Forbidden("Registration is temporarily disabled."))); } - let user_id = match (&body.username, is_guest) { + let user_id = match (body.username.as_ref(), is_guest) { | (Some(username), false) => { // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue let is_matrix_appservice_irc = @@ -169,6 +201,12 @@ pub(crate) async fn register_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services.globals.forbidden_usernames().is_match(username) + && !emergency_mode_enabled + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { username.clone() @@ -176,31 +214,34 @@ pub(crate) async fn register_route( username.to_lowercase() }; - let proposed_user_id = - UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let proposed_user_id = match UserId::parse_with_server_name( + &body_username, + services.globals.server_name(), + ) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, or + // we are in emergency mode, we should follow synapse's behaviour on + // not allowing things like spaces and UTF-8 characters in usernames + if !is_matrix_appservice_irc && !emergency_mode_enabled { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or \ + spaces: {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; if services.users.exists(&proposed_user_id).await { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - if services - .globals - .forbidden_usernames() - .is_match(proposed_user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + return Err!(Request(UserInUse("User ID is not available."))); } proposed_user_id @@ -218,15 +259,20 @@ pub(crate) async fn register_route( }; if body.body.login_type == Some(LoginType::ApplicationService) { - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); - } - } else { - return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token.")); + match body.appservice_info { + | Some(ref info) => + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive( + "Username is not in an appservice namespace." + ))); + }, + | _ => { + return Err!(Request(MissingToken("Missing appservice token."))); + }, } - } else if services.appservice.is_exclusive_user_id(&user_id).await { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); + } else if services.appservice.is_exclusive_user_id(&user_id).await && !emergency_mode_enabled + { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); } // UIAA @@ -256,33 +302,39 @@ pub(crate) async fn register_route( }; if !skip_auth { - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - auth, - &uiaainfo, - ) - .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services.uiaa.create( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - &uiaainfo, - &json, - ); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth( + &UserId::parse_with_server_name("", services.globals.server_name()) + .unwrap(), + "".into(), + auth, + &uiaainfo, + ) + .await?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(ref json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services.uiaa.create( + &UserId::parse_with_server_name("", services.globals.server_name()) + .unwrap(), + "".into(), + &uiaainfo, + json, + ); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("JSON body is not valid"))); + }, + }, } } @@ -323,8 +375,12 @@ pub(crate) async fn register_route( ) .await?; - // Inhibit login does not work for guests - if !is_guest && body.inhibit_login { + if (!is_guest && body.inhibit_login) + || body + .appservice_info + .as_ref() + .is_some_and(|appservice| appservice.registration.device_management) + { return Ok(register::v3::Response { access_token: None, user_id, @@ -391,8 +447,8 @@ pub(crate) async fn register_route( } // log in conduit admin channel if a guest registered - if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() { - info!("New guest user \"{user_id}\" registered on this server."); + if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations { + debug_info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { if services.server.config.admin_room_notices { @@ -421,7 +477,8 @@ pub(crate) async fn register_route( } // If this is the first real user, grant them admin privileges except for guest - // users Note: the server user, @conduit:servername, is generated first + // users + // Note: the server user is generated first if !is_guest { if let Ok(admin_room) = services.admin.get_admin_room().await { if services @@ -439,7 +496,7 @@ pub(crate) async fn register_route( if body.appservice_info.is_none() && !services.server.config.auto_join_rooms.is_empty() - && (services.globals.allow_guests_auto_join_rooms() || !is_guest) + && (services.config.allow_guests_auto_join_rooms || !is_guest) { for room in &services.server.config.auto_join_rooms { let Ok(room_id) = services.rooms.alias.resolve(room).await else { @@ -463,7 +520,7 @@ pub(crate) async fn register_route( } if let Some(room_server_name) = room.server_name() { - if let Err(e) = join_room_by_id_helper( + match join_room_by_id_helper( &services, &user_id, &room_id, @@ -475,11 +532,16 @@ pub(crate) async fn register_route( .boxed() .await { - // don't return this error so we don't fail registrations - error!("Failed to automatically join room {room} for user {user_id}: {e}"); - } else { - info!("Automatically joined room {room} for user {user_id}"); - }; + | Err(e) => { + // don't return this error so we don't fail registrations + error!( + "Failed to automatically join room {room} for user {user_id}: {e}" + ); + }, + | _ => { + info!("Automatically joined room {room} for user {user_id}"); + }, + } } } } @@ -521,8 +583,8 @@ pub(crate) async fn change_password_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -532,26 +594,32 @@ pub(crate) async fn change_password_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + // Success! + }, + | _ => match body.json_body { + | Some(ref json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("JSON body is not valid"))); + }, + }, } services @@ -563,9 +631,29 @@ pub(crate) async fn change_password_route( services .users .all_device_ids(sender_user) - .ready_filter(|id| id != sender_device) + .ready_filter(|id| *id != sender_device) .for_each(|id| services.users.remove_device(sender_user, id)) .await; + + // Remove all pushers except the ones associated with this session + services + .pusher + .get_pushkeys(sender_user) + .map(ToOwned::to_owned) + .broad_filter_map(|pushkey| async move { + services + .pusher + .get_pusher_device(&pushkey) + .await + .ok() + .filter(|pusher_device| pusher_device != sender_device) + .is_some() + .then_some(pushkey) + }) + .for_each(|pushkey| async move { + services.pusher.delete_pusher(sender_user, &pushkey).await; + }) + .await; } info!("User {sender_user} changed their password."); @@ -625,8 +713,8 @@ pub(crate) async fn deactivate_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -636,25 +724,31 @@ pub(crate) async fn deactivate_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(ref json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("JSON body is not valid"))); + }, + }, } // Remove profile pictures and display name @@ -711,10 +805,7 @@ pub(crate) async fn third_party_route( pub(crate) async fn request_3pid_management_token_via_email_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` @@ -727,10 +818,7 @@ pub(crate) async fn request_3pid_management_token_via_email_route( pub(crate) async fn request_3pid_management_token_via_msisdn_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `GET /_matrix/client/v1/register/m.login.registration_token/validity` @@ -744,10 +832,7 @@ pub(crate) async fn check_registration_token_validity( body: Ruma, ) -> Result { let Some(reg_token) = services.globals.registration_token.clone() else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server does not allow token registration.", - )); + return Err!(Request(Forbidden("Server does not allow token registration"))); }; Ok(check_registration_token_validity::v1::Response { valid: reg_token == body.token }) @@ -809,7 +894,7 @@ pub async fn full_user_deactivate( power_levels_content.users.remove(user_id); // ignore errors so deactivation doesn't fail - if let Err(e) = services + match services .rooms .timeline .build_and_append_pdu( @@ -820,9 +905,12 @@ pub async fn full_user_deactivate( ) .await { - warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); - } else { - info!("Demoted {user_id} in {room_id} as part of account deactivation"); + | Err(e) => { + warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); + }, + | _ => { + info!("Demoted {user_id} in {room_id} as part of account deactivation"); + }, } } } diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 9f84f227..e44ce4e7 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,6 +1,8 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, Result, err}; +use conduwuit_service::Services; use ruma::{ + RoomId, UserId, api::client::config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, @@ -10,12 +12,11 @@ use ruma::{ GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{service::Services, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index e1af416e..9f1b05f8 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,12 +1,12 @@ use axum::extract::State; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug}; +use conduwuit_service::Services; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ - api::client::alias::{create_alias, delete_alias, get_alias}, OwnedServerName, RoomAliasId, RoomId, + api::client::alias::{create_alias, delete_alias, get_alias}, }; -use service::Services; use crate::Ruma; @@ -128,18 +128,26 @@ async fn room_available_servers( // insert our server as the very first choice if in list, else check if we can // prefer the room alias server first - if let Some(server_index) = servers + match servers .iter() .position(|server_name| services.globals.server_is_ours(server_name)) { - servers.swap_remove(server_index); - servers.insert(0, services.globals.server_name().to_owned()); - } else if let Some(alias_server_index) = servers - .iter() - .position(|server| server == room_alias.server_name()) - { - servers.swap_remove(alias_server_index); - servers.insert(0, room_alias.server_name().into()); + | Some(server_index) => { + servers.swap_remove(server_index); + servers.insert(0, services.globals.server_name().to_owned()); + }, + | _ => { + match servers + .iter() + .position(|server| server == room_alias.server_name()) + { + | Some(alias_server_index) => { + servers.swap_remove(alias_server_index); + servers.insert(0, room_alias.server_name().into()); + }, + | _ => {}, + } + }, } servers diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index e4071ab0..eb6b3312 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err}; use ruma::api::{appservice::ping, client::appservice::request_ping}; use crate::Ruma; @@ -22,7 +22,13 @@ pub(crate) async fn appservice_ping( ))); } - if appservice_info.registration.url.is_none() { + if appservice_info.registration.url.is_none() + || appservice_info + .registration + .url + .as_ref() + .is_some_and(|url| url.is_empty() || url == "null") + { return Err!(Request(UrlNotSet( "Appservice does not have a URL set, there is nothing to ping." ))); diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index d330952d..2ad37cf3 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,6 +1,9 @@ +use std::cmp::Ordering; + use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, Result, err}; use ruma::{ + UInt, api::client::backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, delete_backup_keys, delete_backup_keys_for_room, @@ -8,10 +11,9 @@ use ruma::{ get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, update_backup_version, }, - UInt, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/room_keys/version` /// @@ -232,16 +234,77 @@ pub(crate) async fn add_backup_keys_for_session_route( ))); } - services + // Check if we already have a better key + let mut ok_to_replace = true; + if let Some(old_key) = &services .key_backups - .add_key( - body.sender_user(), - &body.version, - &body.room_id, - &body.session_id, - &body.session_data, - ) - .await?; + .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) + .await + .ok() + { + let old_is_verified = old_key + .get_field::("is_verified")? + .unwrap_or_default(); + + let new_is_verified = body + .session_data + .get_field::("is_verified")? + .ok_or_else(|| err!(Request(BadJson("`is_verified` field should exist"))))?; + + // Prefer key that `is_verified` + if old_is_verified != new_is_verified { + if old_is_verified { + ok_to_replace = false; + } + } else { + // If both have same `is_verified`, prefer the one with lower + // `first_message_index` + let old_first_message_index = old_key + .get_field::("first_message_index")? + .unwrap_or(UInt::MAX); + + let new_first_message_index = body + .session_data + .get_field::("first_message_index")? + .ok_or_else(|| { + err!(Request(BadJson("`first_message_index` field should exist"))) + })?; + + ok_to_replace = match new_first_message_index.cmp(&old_first_message_index) { + | Ordering::Less => true, + | Ordering::Greater => false, + | Ordering::Equal => { + // If both have same `first_message_index`, prefer the one with lower + // `forwarded_count` + let old_forwarded_count = old_key + .get_field::("forwarded_count")? + .unwrap_or(UInt::MAX); + + let new_forwarded_count = body + .session_data + .get_field::("forwarded_count")? + .ok_or_else(|| { + err!(Request(BadJson("`forwarded_count` field should exist"))) + })?; + + new_forwarded_count < old_forwarded_count + }, + }; + } + } + + if ok_to_replace { + services + .key_backups + .add_key( + body.sender_user(), + &body.version, + &body.room_id, + &body.session_id, + &body.session_data, + ) + .await?; + } Ok(add_backup_keys_for_session::v3::Response { count: services diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 7188aa23..470ff6ab 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -3,11 +3,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Result, Server}; use ruma::{ + RoomVersionId, api::client::discovery::get_capabilities::{ self, Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability, ThirdPartyIdChangesCapability, }, - RoomVersionId, }; use serde_json::json; @@ -42,5 +42,12 @@ pub(crate) async fn get_capabilities_route( .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) .expect("this is valid JSON we created"); + capabilities + .set( + "org.matrix.msc4267.forget_forced_upon_leave", + json!({"enabled": services.config.forget_forced_upon_leave}), + ) + .expect("valid JSON we created"); + Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 7256683f..dbc2a22f 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,23 +1,24 @@ use axum::extract::State; use conduwuit::{ - at, err, ref_at, + Err, Result, at, debug_warn, err, + matrix::pdu::PduEvent, + ref_at, utils::{ + IterStream, future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, - IterStream, }, - Err, PduEvent, Result, }; +use conduwuit_service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use futures::{ - future::{join, join3, try_join3, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, try_join3}, }; -use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; -use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; +use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; use crate::{ - client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, Ruma, + client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, }; const LIMIT_MAX: usize = 100; @@ -36,8 +37,13 @@ pub(crate) async fn get_context_route( let sender = body.sender(); let (sender_user, sender_device) = sender; let room_id = &body.room_id; + let event_id = &body.event_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + // Use limit or else 10, with maximum 100 let limit: usize = body .limit @@ -48,29 +54,30 @@ pub(crate) async fn get_context_route( let base_id = services .rooms .timeline - .get_pdu_id(&body.event_id) + .get_pdu_id(event_id) .map_err(|_| err!(Request(NotFound("Event not found.")))); let base_pdu = services .rooms .timeline - .get_pdu(&body.event_id) + .get_pdu(event_id) .map_err(|_| err!(Request(NotFound("Base event not found.")))); let visible = services .rooms .state_accessor - .user_can_see_event(sender_user, &body.room_id, &body.event_id) + .user_can_see_event(sender_user, room_id, event_id) .map(Ok); let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; - if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id { + if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id { return Err!(Request(NotFound("Base event not found."))); } if !visible { - return Err!(Request(Forbidden("You don't have permission to view this event."))); + debug_warn!(req_evt = ?event_id, ?base_id, ?room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404"); + return Err!(Request(NotFound("Event not found."))); } let base_count = base_id.pdu_count(); @@ -100,7 +107,7 @@ pub(crate) async fn get_context_route( .collect(); let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = - join3(base_event, events_before, events_after).await; + join3(base_event, events_before, events_after).boxed().await; let lazy_loading_context = lazy_loading::Context { user_id: sender_user, @@ -177,7 +184,7 @@ pub(crate) async fn get_context_route( .await; Ok(get_context::v3::Response { - event: base_event.map(at!(1)).as_ref().map(PduEvent::to_room_event), + event: base_event.map(at!(1)).map(PduEvent::into_room_event), start: events_before .last() @@ -196,13 +203,13 @@ pub(crate) async fn get_context_route( events_before: events_before .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), events_after: events_after .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), state, diff --git a/src/api/client/device.rs b/src/api/client/device.rs index bb0773dd..5519a1a5 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,18 +1,18 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{err, Err}; +use conduwuit::{Err, Error, Result, debug, err, utils}; use futures::StreamExt; use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedDeviceId, api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - MilliSecondsSinceUnixEpoch, }; use super::SESSION_ID_LENGTH; -use crate::{utils, Error, Result, Ruma}; +use crate::{Ruma, client::DEVICE_ID_LENGTH}; /// # `GET /_matrix/client/r0/devices` /// @@ -59,26 +59,58 @@ pub(crate) async fn update_device_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); + let appservice = body.appservice_info.as_ref(); - let mut device = services + match services .users .get_device_metadata(sender_user, &body.device_id) .await - .map_err(|_| err!(Request(NotFound("Device not found."))))?; + { + | Ok(mut device) => { + device.display_name.clone_from(&body.display_name); + device.last_seen_ip.clone_from(&Some(client.to_string())); + device + .last_seen_ts + .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); - device.display_name.clone_from(&body.display_name); - device.last_seen_ip.clone_from(&Some(client.to_string())); - device - .last_seen_ts - .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); + services + .users + .update_device_metadata(sender_user, &body.device_id, &device) + .await?; - services - .users - .update_device_metadata(sender_user, &body.device_id, &device) - .await?; + Ok(update_device::v3::Response {}) + }, + | Err(_) => { + let Some(appservice) = appservice else { + return Err!(Request(NotFound("Device not found."))); + }; + if !appservice.registration.device_management { + return Err!(Request(NotFound("Device not found."))); + } - Ok(update_device::v3::Response {}) + debug!( + "Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \ + and device ID does not exist", + appservice.registration.id + ); + + let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH)); + + services + .users + .create_device( + sender_user, + &device_id, + &appservice.registration.as_token, + None, + Some(client.to_string()), + ) + .await?; + + return Ok(update_device::v3::Response {}); + }, + } } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -95,8 +127,21 @@ pub(crate) async fn delete_device_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + services + .users + .remove_device(sender_user, &body.device_id) + .await; + + return Ok(delete_device::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -107,25 +152,31 @@ pub(crate) async fn delete_device_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err!(Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err!(Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(ref json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err!(Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("Not json."))); + return Err!(Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("Not json."))); + }, + }, } services @@ -136,11 +187,12 @@ pub(crate) async fn delete_device_route( Ok(delete_device::v3::Response {}) } -/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// # `POST /_matrix/client/v3/delete_devices` /// -/// Deletes the given device. +/// Deletes the given list of devices. /// -/// - Requires UIAA to verify user password +/// - Requires UIAA to verify user password unless from an appservice with +/// MSC4190 enabled. /// /// For each device: /// - Invalidates access token @@ -152,8 +204,20 @@ pub(crate) async fn delete_devices_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + for device_id in &body.devices { + services.users.remove_device(sender_user, device_id).await; + } + + return Ok(delete_devices::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -164,25 +228,31 @@ pub(crate) async fn delete_devices_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(ref json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } for device_id in &body.devices { diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9166eed9..aa6ae168 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,30 +1,41 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, warn, Err, Error, Result}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{ + Err, Result, err, info, + utils::{ + TryFutureExtExt, + math::Expected, + result::FlatOk, + stream::{ReadyExt, WidebandExt}, + }, +}; +use conduwuit_service::Services; +use futures::{ + FutureExt, StreamExt, TryFutureExt, + future::{join, join4, join5}, +}; use ruma::{ + OwnedRoomId, RoomId, ServerName, UInt, UserId, api::{ client::{ directory::{ get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility, }, - error::ErrorKind, room, }, federation, }, - directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, + directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter}, events::{ + StateEventType, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - StateEventType, }, - uint, OwnedRoomId, RoomId, ServerName, UInt, UserId, + uint, }; -use service::Services; use crate::Ruma; @@ -41,10 +52,8 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .server - .config - .forbidden_remote_room_directory_server_names - .contains(server) + .moderation + .is_remote_server_room_directory_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } @@ -60,11 +69,7 @@ pub(crate) async fn get_public_rooms_filtered_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(response) @@ -82,12 +87,7 @@ pub(crate) async fn get_public_rooms_route( body: Ruma, ) -> Result { if let Some(server) = &body.server { - if services - .server - .config - .forbidden_remote_room_directory_server_names - .contains(server) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } @@ -102,11 +102,7 @@ pub(crate) async fn get_public_rooms_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(get_public_rooms::v3::Response { @@ -126,11 +122,11 @@ pub(crate) async fn set_room_visibility_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } if services @@ -144,10 +140,7 @@ pub(crate) async fn set_room_visibility_route( } if !user_can_publish_room(&services, sender_user, &body.room_id).await? { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + return Err!(Request(Forbidden("User is not allowed to publish this room"))); } match &body.visibility { @@ -173,10 +166,9 @@ pub(crate) async fn set_room_visibility_route( .await; } - return Err(Error::BadRequest( - ErrorKind::forbidden(), + return Err!(Request(Forbidden( "Publishing rooms to the room directory is not allowed", - )); + ))); } services.rooms.directory.set_public(&body.room_id); @@ -194,10 +186,7 @@ pub(crate) async fn set_room_visibility_route( }, | room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), | _ => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room visibility type is not supported.", - )); + return Err!(Request(InvalidParam("Room visibility type is not supported.",))); }, } @@ -213,7 +202,7 @@ pub(crate) async fn get_room_visibility_route( ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } Ok(get_room_visibility::v3::Response { @@ -261,22 +250,23 @@ pub(crate) async fn get_public_rooms_filtered_helper( } // Use limit or else 10, with maximum 100 - let limit = limit.map_or(10, u64::from); - let mut num_since: u64 = 0; + let limit: usize = limit.map_or(10_u64, u64::from).try_into()?; + let mut num_since: usize = 0; if let Some(s) = &since { let mut characters = s.chars(); let backwards = match characters.next() { | Some('n') => false, | Some('p') => true, - | _ => - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")), + | _ => { + return Err!(Request(InvalidParam("Invalid `since` token"))); + }, }; num_since = characters .collect::() .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; + .map_err(|_| err!(Request(InvalidParam("Invalid `since` token."))))?; if backwards { num_since = num_since.saturating_sub(limit); @@ -288,8 +278,12 @@ pub(crate) async fn get_public_rooms_filtered_helper( .directory .public_rooms() .map(ToOwned::to_owned) - .then(|room_id| public_rooms_chunk(services, room_id)) - .filter_map(|chunk| async move { + .wide_then(|room_id| public_rooms_chunk(services, room_id)) + .ready_filter_map(|chunk| { + if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { + return None; + } + if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { @@ -321,40 +315,24 @@ pub(crate) async fn get_public_rooms_filtered_helper( all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - let total_room_count_estimate = UInt::try_from(all_rooms.len()).unwrap_or_else(|_| uint!(0)); + let total_room_count_estimate = UInt::try_from(all_rooms.len()) + .unwrap_or_else(|_| uint!(0)) + .into(); - let chunk: Vec<_> = all_rooms - .into_iter() - .skip( - num_since - .try_into() - .expect("num_since should not be this high"), - ) - .take(limit.try_into().expect("limit should not be this high")) - .collect(); + let chunk: Vec<_> = all_rooms.into_iter().skip(num_since).take(limit).collect(); - let prev_batch = if num_since == 0 { - None - } else { - Some(format!("p{num_since}")) - }; + let prev_batch = num_since.ne(&0).then_some(format!("p{num_since}")); - let next_batch = if chunk.len() < limit.try_into().unwrap() { - None - } else { - Some(format!( - "n{}", - num_since - .checked_add(limit) - .expect("num_since and limit should not be that large") - )) - }; + let next_batch = chunk + .len() + .ge(&limit) + .then_some(format!("n{}", num_since.expected_add(limit))); Ok(get_public_rooms_filtered::v3::Response { chunk, prev_batch, next_batch, - total_room_count_estimate: Some(total_room_count_estimate), + total_room_count_estimate, }) } @@ -365,88 +343,88 @@ async fn user_can_publish_room( user_id: &UserId, room_id: &RoomId, ) -> Result { - if let Ok(event) = services + match services .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") .await { - serde_json::from_str(event.content.get()) - .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) + | Ok(event) => serde_json::from_str(event.content.get()) + .map_err(|_| err!(Database("Invalid event content for m.room.power_levels"))) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) - }) - } else if let Ok(event) = services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(event.sender == user_id) - } else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + }), + | _ => { + match services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(event) => Ok(event.sender == user_id), + | _ => Err!(Request(Forbidden("User is not allowed to publish this room"))), + } + }, } } async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk { + let name = services.rooms.state_accessor.get_name(&room_id).ok(); + + let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok(); + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(&room_id) + .ok(); + + let avatar_url = services.rooms.state_accessor.get_avatar(&room_id); + + let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok(); + + let world_readable = services.rooms.state_accessor.is_world_readable(&room_id); + + let join_rule = services + .rooms + .state_accessor + .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") + .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { + | JoinRule::Public => PublicRoomJoinRule::Public, + | JoinRule::Knock => "knock".into(), + | JoinRule::KnockRestricted(_) => "knock_restricted".into(), + | _ => "invite".into(), + }); + + let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id); + + let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id); + + let ( + (avatar_url, canonical_alias, guest_can_join, join_rule, name), + (num_joined_members, room_type, topic, world_readable), + ) = join( + join5(avatar_url, canonical_alias, guest_can_join, join_rule, name), + join4(num_joined_members, room_type, topic, world_readable), + ) + .boxed() + .await; + PublicRoomsChunk { - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await - .unwrap_or(0) - .try_into() - .expect("joined count overflows ruma UInt"), - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: services - .rooms - .state_accessor - .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") - .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { - | JoinRule::Public => PublicRoomJoinRule::Public, - | JoinRule::Knock => "knock".into(), - | JoinRule::KnockRestricted(_) => "knock_restricted".into(), - | _ => "invite".into(), - }) - .await - .unwrap_or_default(), - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), + avatar_url: avatar_url.into_option().unwrap_or_default().url, + canonical_alias, + guest_can_join, + join_rule: join_rule.unwrap_or_default(), + name, + num_joined_members: num_joined_members + .map(TryInto::try_into) + .map(Result::ok) + .flat_ok() + .unwrap_or_else(|| uint!(0)), room_id, + room_type, + topic, + world_readable, } } diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index 84086452..97044ffc 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::err; +use conduwuit::{Result, err}; use ruma::api::client::filter::{create_filter, get_filter}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 7bf0a5da..650c573f 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,30 +1,30 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{err, utils, Error, Result}; -use futures::{stream::FuturesUnordered, StreamExt}; +use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils}; +use conduwuit_service::{Services, users::parse_master_key}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ + OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, api::{ client::{ error::ErrorKind, keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + claim_keys, get_key_changes, get_keys, upload_keys, + upload_signatures::{self}, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, federation, }, + encryption::CrossSigningKey, serde::Raw, - OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use super::SESSION_ID_LENGTH; -use crate::{ - service::{users::parse_master_key, Services}, - Ruma, -}; +use crate::Ruma; /// # `POST /_matrix/client/r0/keys/upload` /// @@ -40,6 +40,20 @@ pub(crate) async fn upload_keys_route( let (sender_user, sender_device) = body.sender(); for (key_id, one_time_key) in &body.one_time_keys { + if one_time_key + .deserialize() + .inspect_err(|e| { + debug_warn!( + ?key_id, + ?one_time_key, + "Invalid one time key JSON submitted by client, skipping: {e}" + ); + }) + .is_err() + { + continue; + } + services .users .add_one_time_key(sender_user, sender_device, key_id, one_time_key) @@ -47,14 +61,44 @@ pub(crate) async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { - // TODO: merge this and the existing event? - // This check is needed to assure that signatures are kept - if services + let deser_device_keys = device_keys.deserialize().map_err(|e| { + err!(Request(BadJson(debug_warn!( + ?device_keys, + "Invalid device keys JSON uploaded by client: {e}" + )))) + })?; + + if deser_device_keys.user_id != sender_user { + return Err!(Request(Unknown( + "User ID in keys uploaded does not match your own user ID" + ))); + } + if deser_device_keys.device_id != sender_device { + return Err!(Request(Unknown( + "Device ID in keys uploaded does not match your own device ID" + ))); + } + + if let Ok(existing_keys) = services .users .get_device_keys(sender_user, sender_device) .await - .is_err() { + if existing_keys.json().get() == device_keys.json().get() { + debug!( + ?sender_user, + ?sender_device, + ?device_keys, + "Ignoring user uploaded keys as they are an exact copy already in the \ + database" + ); + } else { + services + .users + .add_device_keys(sender_user, sender_device, device_keys) + .await; + } + } else { services .users .add_device_keys(sender_user, sender_device, device_keys) @@ -125,93 +169,198 @@ pub(crate) async fn upload_signing_keys_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match check_for_new_keys( + services, + sender_user, + body.self_signing_key.as_ref(), + body.user_signing_key.as_ref(), + body.master_key.as_ref(), + ) + .await + .inspect_err(|e| debug!(?e)) + { + | Ok(exists) => { + if let Some(result) = exists { + // No-op, they tried to reupload the same set of keys + // (lost connection for example) + return Ok(result); + } + debug!( + "Skipping UIA in accordance with MSC3967, the user didn't have any existing keys" + ); + // Some of the keys weren't found, so we let them upload + }, + | _ => { + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, + } + }, } - if let Some(master_key) = &body.master_key { - services - .users - .add_cross_signing_keys( - sender_user, - master_key, - &body.self_signing_key, - &body.user_signing_key, - true, // notify so that other users see the new keys - ) - .await?; - } + services + .users + .add_cross_signing_keys( + sender_user, + &body.master_key, + &body.self_signing_key, + &body.user_signing_key, + true, // notify so that other users see the new keys + ) + .await?; Ok(upload_signing_keys::v3::Response {}) } +async fn check_for_new_keys( + services: crate::State, + user_id: &UserId, + self_signing_key: Option<&Raw>, + user_signing_key: Option<&Raw>, + master_signing_key: Option<&Raw>, +) -> Result> { + debug!("checking for existing keys"); + let mut empty = false; + if let Some(master_signing_key) = master_signing_key { + let (key, value) = parse_master_key(user_id, master_signing_key)?; + let result = services + .users + .get_master_key(None, user_id, &|_| true) + .await; + if result.is_not_found() { + empty = true; + } else { + let existing_master_key = result?; + let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?; + if existing_key != key || existing_value != value { + return Err!(Request(Forbidden( + "Tried to change an existing master key, UIA required" + ))); + } + } + } + if let Some(user_signing_key) = user_signing_key { + let key = services.users.get_user_signing_key(user_id).await; + if key.is_not_found() && !empty { + return Err!(Request(Forbidden( + "Tried to update an existing user signing key, UIA required" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != user_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to change an existing user signing key, UIA required" + ))); + } + } + } + if let Some(self_signing_key) = self_signing_key { + let key = services + .users + .get_self_signing_key(None, user_id, &|_| true) + .await; + if key.is_not_found() && !empty { + debug!(?key); + return Err!(Request(Forbidden( + "Tried to add a new signing key independently from the master key" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != self_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to update an existing self signing key, UIA required" + ))); + } + } + } + if empty { + return Ok(None); + } + + Ok(Some(upload_signing_keys::v3::Response {})) +} + /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. +/// +/// TODO: clean this timo-code up more and integrate failures. tried to improve +/// it a bit to stop exploding the entire request on bad sigs, but needs way +/// more work. pub(crate) async fn upload_signatures_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if body.signed_keys.is_empty() { + debug!("Empty signed_keys sent in key signature upload"); + return Ok(upload_signatures::v3::Response::new()); + } + + let sender_user = body.sender_user(); for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { - let key = serde_json::to_value(key) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + let Ok(key) = serde_json::to_value(key) + .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) + else { + continue; + }; - for signature in key - .get("signatures") - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))? - .get(sender_user.to_string()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid user in signatures field.", - ))? - .as_object() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))? - .clone() - { - // Signature validation? - let signature = ( - signature.0, - signature - .1 - .as_str() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid signature value.", - ))? - .to_owned(), - ); + let Some(signatures) = key.get("signatures") else { + continue; + }; - services + let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { + continue; + }; + + let Some(sender_user_object) = sender_user_val.as_object() else { + continue; + }; + + for (signature, val) in sender_user_object.clone() { + let Some(val) = val.as_str().map(ToOwned::to_owned) else { + continue; + }; + let signature = (signature, val); + + if let Err(_e) = services .users .sign_key(user_id, key_id, signature, sender_user) - .await?; + .await + .inspect_err(|e| debug_warn!("{e}")) + { + continue; + } } } } - Ok(upload_signatures::v3::Response { - failures: BTreeMap::new(), // TODO: integrate - }) + Ok(upload_signatures::v3::Response { failures: BTreeMap::new() }) } /// # `POST /_matrix/client/r0/keys/changes` @@ -385,35 +534,40 @@ where .collect(); while let Some((server, response)) = futures.next().await { - if let Ok(response) = response { - for (user, master_key) in response.master_keys { - let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; + match response { + | Ok(response) => { + for (user, master_key) in response.master_keys { + let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; - if let Ok(our_master_key) = services - .users - .get_key(&master_key_id, sender_user, &user, &allowed_signatures) - .await - { - let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; - master_key.signatures.append(&mut our_master_key.signatures); + if let Ok(our_master_key) = services + .users + .get_key(&master_key_id, sender_user, &user, &allowed_signatures) + .await + { + let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; + master_key.signatures.append(&mut our_master_key.signatures); + } + let json = serde_json::to_value(master_key).expect("to_value always works"); + let raw = serde_json::from_value(json).expect("Raw::from_value always works"); + services + .users + .add_cross_signing_keys( + &user, &raw, &None, &None, + false, /* Dont notify. A notification would trigger another key + * request resulting in an endless loop */ + ) + .await?; + if let Some(raw) = raw { + master_keys.insert(user.clone(), raw); + } } - let json = serde_json::to_value(master_key).expect("to_value always works"); - let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services - .users - .add_cross_signing_keys( - &user, &raw, &None, &None, - false, /* Dont notify. A notification would trigger another key request - * resulting in an endless loop */ - ) - .await?; - master_keys.insert(user.clone(), raw); - } - self_signing_keys.extend(response.self_signing_keys); - device_keys.extend(response.device_keys); - } else { - failures.insert(server.to_string(), json!({})); + self_signing_keys.extend(response.self_signing_keys); + device_keys.extend(response.device_keys); + }, + | _ => { + failures.insert(server.to_string(), json!({})); + }, } } diff --git a/src/api/client/media.rs b/src/api/client/media.rs index afbc218a..94572413 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -3,16 +3,16 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; use conduwuit_service::{ - media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH}, Services, + media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH}, }; use reqwest::Url; use ruma::{ + Mxc, UserId, api::client::{ authenticated_media::{ get_content, get_content_as_filename, get_content_thumbnail, get_media_config, @@ -20,7 +20,6 @@ use ruma::{ }, media::create_content, }, - Mxc, UserId, }; use crate::Ruma; @@ -57,19 +56,28 @@ pub(crate) async fn create_content_route( let filename = body.filename.as_deref(); let content_type = body.content_type.as_deref(); let content_disposition = make_content_disposition(None, content_type, filename); - let mxc = Mxc { + let ref mxc = Mxc { server_name: services.globals.server_name(), media_id: &utils::random_string(MXC_LENGTH), }; services .media - .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) - .await - .map(|()| create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash: None, - }) + .create(mxc, Some(user), Some(&content_disposition), content_type, &body.file) + .await?; + + let blurhash = body.generate_blurhash.then(|| { + services + .media + .create_blurhash(&body.file, content_type, filename) + .ok() + .flatten() + }); + + Ok(create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash: blurhash.flatten(), + }) } /// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 4fa0b52e..d9f24f77 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -3,21 +3,20 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; -use conduwuit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; +use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta}; use reqwest::Url; use ruma::{ + Mxc, api::client::media::{ create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, get_media_preview, }, - Mxc, }; -use crate::{client::create_content_route, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::create_content_route}; /// # `GET /_matrix/media/v3/config` /// @@ -142,46 +141,52 @@ pub(crate) async fn get_content_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: response.file, - content_type: response.content_type, - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content::v3::Response { + file: response.file, + content_type: response.content_type, + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -227,49 +232,52 @@ pub(crate) async fn get_content_as_filename_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - Some(&body.filename), - ); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + Some(&body.filename), + ); - Ok(get_content_as_filename::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_as_filename::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_as_filename::v3::Response { - content_disposition: Some(content_disposition), - content_type: response.content_type, - file: response.file, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(content_disposition), + content_type: response.content_type, + file: response.file, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -315,46 +323,52 @@ pub(crate) async fn get_content_thumbnail_legacy_route( }; let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get_thumbnail(&mxc, &dim).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get_thumbnail(&mxc, &dim).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_thumbnail_legacy(&body) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_thumbnail::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_thumbnail_legacy(&body) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: response.file, - content_type: response.content_type, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_thumbnail::v3::Response { + file: response.file, + content_type: response.content_type, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index fccb9b53..2847d668 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,6 +1,7 @@ use std::{ borrow::Borrow, - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, + iter::once, net::IpAddr, sync::Arc, }; @@ -8,48 +9,59 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - at, debug, debug_info, debug_warn, err, info, - pdu::{gen_event_id_canonical_json, PduBuilder}, - result::FlatOk, + Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching, + matrix::{ + StateKey, + pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, + state_res, + }, + result::{FlatOk, NotFound}, trace, - utils::{self, shuffle, IterStream, ReadyExt}, - warn, Err, PduEvent, Result, + utils::{ + self, FutureBoolExt, + future::ReadyEqExt, + shuffle, + stream::{BroadbandExt, IterStream, ReadyExt}, + }, + warn, }; -use futures::{join, FutureExt, StreamExt, TryFutureExt}; +use conduwuit_service::{ + Services, + appservice::RegistrationInfo, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, +}; +use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, api::{ client::{ error::ErrorKind, knock::knock_room, membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, + ThirdPartySigned, ban_user, forget_room, + get_member_events::{self, v3::MembershipEventFilter}, + invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, - joined_rooms, kick_user, leave_room, unban_user, ThirdPartySigned, + joined_rooms, kick_user, leave_room, unban_user, }, }, federation::{self, membership::create_invite}, }, canonical_json::to_canonical_value, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, }, - StateEventType, }, - state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, -}; -use service::{ - appservice::RegistrationInfo, - pdu::gen_event_id, - rooms::{state::RoomMutexGuard, state_compressor::HashSetCompressStateEvent}, - Services, }; -use crate::{client::full_user_deactivate, Ruma}; +use crate::{Ruma, client::full_user_deactivate}; /// Checks if the room is banned in any way possible and the sender user is not /// an admin. @@ -71,10 +83,8 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .server - .config - .forbidden_remote_server_names - .contains(&room_id.server_name().unwrap().to_owned()) + .moderation + .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ @@ -89,12 +99,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -112,10 +121,9 @@ async fn banned_room_check( } } else if let Some(server_name) = server_name { if services - .server .config .forbidden_remote_server_names - .contains(&server_name.to_owned()) + .is_match(server_name.host()) { warn!( "User {user_id} who is not an admin tried joining a room which has the server \ @@ -130,12 +138,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -360,10 +367,10 @@ pub(crate) async fn knock_room_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; + let sender_user = body.sender_user(); + let body = &body.body; - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { | Ok(room_id) => { banned_room_check( &services, @@ -470,9 +477,9 @@ pub(crate) async fn leave_room_route( State(services): State, body: Ruma, ) -> Result { - leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()).await?; - - Ok(leave_room::v3::Response::new()) + leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .await + .map(|()| leave_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -486,8 +493,8 @@ pub(crate) async fn invite_user_route( ) -> Result { let sender_user = body.sender_user(); - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { - info!( + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { + debug_error!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id ); @@ -503,43 +510,52 @@ pub(crate) async fn invite_user_route( ) .await?; - if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { - let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); - let recipient_ignored_by_sender = services.users.user_is_ignored(user_id, sender_user); + match &body.recipient { + | invite_user::v3::InvitationRecipient::UserId { user_id } => { + let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); + let recipient_ignored_by_sender = + services.users.user_is_ignored(user_id, sender_user); - let (sender_ignored_recipient, recipient_ignored_by_sender) = - join!(sender_ignored_recipient, recipient_ignored_by_sender); + let (sender_ignored_recipient, recipient_ignored_by_sender) = + join!(sender_ignored_recipient, recipient_ignored_by_sender); - if sender_ignored_recipient { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); - } - - if let Ok(target_user_membership) = services - .rooms - .state_accessor - .get_member(&body.room_id, user_id) - .await - { - if target_user_membership.membership == MembershipState::Ban { - return Err!(Request(Forbidden("User is banned from this room."))); + if sender_ignored_recipient { + return Ok(invite_user::v3::Response {}); } - } - if recipient_ignored_by_sender { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - return Ok(invite_user::v3::Response {}); - } + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } - invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false) + if recipient_ignored_by_sender { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + return Ok(invite_user::v3::Response {}); + } + + invite_helper( + &services, + sender_user, + user_id, + &body.room_id, + body.reason.clone(), + false, + ) .boxed() .await?; - Ok(invite_user::v3::Response {}) - } else { - Err!(Request(NotFound("User not found."))) + Ok(invite_user::v3::Response {}) + }, + | _ => { + Err!(Request(NotFound("User not found."))) + }, } } @@ -702,21 +718,35 @@ pub(crate) async fn forget_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); + let user_id = body.sender_user(); + let room_id = &body.room_id; - if services - .rooms - .state_cache - .is_joined(sender_user, &body.room_id) - .await - { + let joined = services.rooms.state_cache.is_joined(user_id, room_id); + let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); + let invited = services.rooms.state_cache.is_invited(user_id, room_id); + + pin_mut!(joined, knocked, invited); + if joined.or(knocked).or(invited).await { return Err!(Request(Unknown("You must leave the room before forgetting it"))); } - services + let membership = services .rooms - .state_cache - .forget(&body.room_id, sender_user); + .state_accessor + .get_member(room_id, user_id) + .await; + + if membership.is_not_found() { + return Err!(Request(Unknown("No membership event was found, room was never joined"))); + } + + let non_membership = membership + .map(|member| member.membership) + .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); + + if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { + services.rooms.state_cache.forget(room_id, user_id); + } Ok(forget_room::v3::Response::new()) } @@ -739,6 +769,54 @@ pub(crate) async fn joined_rooms_route( }) } +fn membership_filter( + pdu: PduEvent, + for_membership: Option<&MembershipEventFilter>, + not_membership: Option<&MembershipEventFilter>, +) -> Option { + let membership_state_filter = match for_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(MembershipEventFilter::Leave) => MembershipState::Leave, + | Some(_) | None => MembershipState::Join, + }; + + let not_membership_state_filter = match not_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Join) => MembershipState::Join, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(_) | None => MembershipState::Leave, + }; + + let evt_membership = pdu.get_content::().ok()?.membership; + + if for_membership.is_some() && not_membership.is_some() { + if membership_state_filter != evt_membership + || not_membership_state_filter == evt_membership + { + None + } else { + Some(pdu) + } + } else if for_membership.is_some() && not_membership.is_none() { + if membership_state_filter != evt_membership { + None + } else { + Some(pdu) + } + } else if not_membership.is_some() && for_membership.is_none() { + if not_membership_state_filter == evt_membership { + None + } else { + Some(pdu) + } + } else { + Some(pdu) + } +} + /// # `POST /_matrix/client/r0/rooms/{roomId}/members` /// /// Lists all joined users in a room (TODO: at a specific point in time, with a @@ -750,6 +828,8 @@ pub(crate) async fn get_member_events_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + let membership = body.membership.as_ref(); + let not_membership = body.not_membership.as_ref(); if !services .rooms @@ -768,6 +848,7 @@ pub(crate) async fn get_member_events_route( .ready_filter_map(Result::ok) .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) .map(at!(1)) + .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) .map(PduEvent::into_member_event) .collect() .await, @@ -784,32 +865,32 @@ pub(crate) async fn joined_members_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); - if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) + .user_can_see_state_events(body.sender_user(), &body.room_id) .await { return Err!(Request(Forbidden("You don't have permission to view this room."))); } - let joined: BTreeMap = services - .rooms - .state_cache - .room_members(&body.room_id) - .map(ToOwned::to_owned) - .then(|user| async move { - (user.clone(), RoomMember { - display_name: services.users.displayname(&user).await.ok(), - avatar_url: services.users.avatar_url(&user).await.ok(), - }) - }) - .collect() - .await; + Ok(joined_members::v3::Response { + joined: services + .rooms + .state_cache + .room_members(&body.room_id) + .map(ToOwned::to_owned) + .broad_then(|user_id| async move { + let member = RoomMember { + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; - Ok(joined_members::v3::Response { joined }) + (user_id, member) + }) + .collect() + .await, + }) } pub async fn join_room_by_id_helper( @@ -978,7 +1059,7 @@ async fn join_room_by_id_helper_remote( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1007,10 +1088,17 @@ async fn join_room_by_id_helper_remote( .await, }; - let send_join_response = services + let send_join_response = match services .sending .send_synapse_request(&remote_server, send_join_request) - .await?; + .await + { + | Ok(response) => response, + | Err(e) => { + error!("send_join failed: {e}"); + return Err(e); + }, + }; info!("send_join finished"); @@ -1029,9 +1117,10 @@ async fn join_room_by_id_helper_remote( })?; if signed_event_id != event_id { - return Err!(Request(BadJson( - warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") - ))); + return Err!(Request(BadJson(warn!( + %signed_event_id, %event_id, + "Server {remote_server} sent event with wrong event ID" + )))); } match signed_value["signatures"] @@ -1147,8 +1236,8 @@ async fn join_room_by_id_helper_remote( debug!("Running send_join auth check"); let fetch_state = &state; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = services.rooms.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = fetch_state.get(&shortstatekey)?; services.rooms.timeline.get_pdu(event_id).await.ok() @@ -1158,7 +1247,7 @@ async fn join_room_by_id_helper_remote( &state_res::RoomVersion::new(&room_version_id)?, &parsed_join_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |k, s| state_fetch(k.clone(), s.into()), ) .await .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; @@ -1168,7 +1257,7 @@ async fn join_room_by_id_helper_remote( } info!("Compressing state from send_join"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) @@ -1216,7 +1305,7 @@ async fn join_room_by_id_helper_remote( .append_pdu( &parsed_join_pdu, join_event, - vec![(*parsed_join_pdu.event_id).to_owned()], + once(parsed_join_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -1398,7 +1487,7 @@ async fn join_room_by_id_helper_local( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1540,7 +1629,7 @@ pub(crate) async fn invite_helper( reason: Option, is_direct: bool, ) -> Result { - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { info!( "User {sender_user} is not an admin and attempted to send an invite to room \ {room_id}" @@ -1607,19 +1696,18 @@ pub(crate) async fn invite_helper( })?; if pdu.event_id != event_id { - return Err!(Request(BadJson( - warn!(%pdu.event_id, %event_id, "Server {} sent event with wrong event ID", user_id.server_name()) - ))); + return Err!(Request(BadJson(warn!( + %pdu.event_id, %event_id, + "Server {} sent event with wrong event ID", + user_id.server_name() + )))); } - let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, - ) - .expect("CanonicalJson is valid json value"), - ) + let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, + )?) .map_err(|e| { err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) })?; @@ -1675,8 +1763,8 @@ pub(crate) async fn invite_helper( Ok(()) } -// Make a user leave all their joined rooms, forgets all rooms, and ignores -// errors +// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, +// and ignores errors pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { let rooms_joined = services .rooms @@ -1690,7 +1778,17 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { .rooms_invited(user_id) .map(|(r, _)| r); - let all_rooms: Vec<_> = rooms_joined.chain(rooms_invited).collect().await; + let rooms_knocked = services + .rooms + .state_cache + .rooms_knocked(user_id) + .map(|(r, _)| r); + + let all_rooms: Vec<_> = rooms_joined + .chain(rooms_invited) + .chain(rooms_knocked) + .collect() + .await; for room_id in all_rooms { // ignore errors @@ -1707,19 +1805,60 @@ pub async fn leave_room( user_id: &UserId, room_id: &RoomId, reason: Option, -) -> Result<()> { - // Ask a remote server if we don't have this room and are not knocking on it - if !services +) -> Result { + let default_member_content = RoomMemberEventContent { + membership: MembershipState::Leave, + reason: reason.clone(), + join_authorized_via_users_server: None, + is_direct: None, + avatar_url: None, + displayname: None, + third_party_invite: None, + blurhash: None, + }; + + let is_banned = services.rooms.metadata.is_banned(room_id); + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + pin_mut!(is_banned, is_disabled); + if is_banned.or(is_disabled).await { + // the room is banned/disabled, the room must be rejected locally since we + // cant/dont want to federate with this server + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await?; + + return Ok(()); + } + + let dont_have_room = services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) - .await && !services + .eq(&false); + + let not_knocked = services .rooms .state_cache .is_knocked(user_id, room_id) - .await - { - if let Err(e) = remote_leave_room(services, user_id, room_id).await { + .eq(&false); + + // Ask a remote server if we don't have this room and are not knocking on it + if dont_have_room.and(not_knocked).await { + if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) + .boxed() + .await + { warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); // Don't tell the client about this error } @@ -1740,7 +1879,7 @@ pub async fn leave_room( .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, last_state, None, @@ -1760,26 +1899,23 @@ pub async fn leave_room( ) .await else { - // Fix for broken rooms - warn!( + debug_warn!( "Trying to leave a room you are not a member of, marking room as left locally." ); - services + return services .rooms .state_cache .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, None, None, true, ) - .await?; - - return Ok(()); + .await; }; services @@ -1807,9 +1943,10 @@ async fn remote_leave_room( services: &Services, user_id: &UserId, room_id: &RoomId, + reason: Option, ) -> Result<()> { let mut make_leave_response_and_server = - Err!(BadServerResponse("No server available to assist in leaving.")); + Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); let mut servers: HashSet = services .rooms @@ -1819,38 +1956,46 @@ async fn remote_leave_room( .collect() .await; - if let Ok(invite_state) = services + match services .rooms .state_cache .invite_state(user_id, room_id) .await { - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - } else if let Ok(knock_state) = services - .rooms - .state_cache - .knock_state(user_id, room_id) - .await - { - servers.extend( - knock_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .filter_map(|sender| { - if !services.globals.user_is_local(sender) { - Some(sender.server_name().to_owned()) - } else { - None - } - }), - ); + | Ok(invite_state) => { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + }, + | _ => { + match services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + | Ok(knock_state) => { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + }, + | _ => {}, + } + }, } if let Some(room_id_server_name) = room_id.server_name() { @@ -1881,20 +2026,25 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let Some(room_version_id) = make_leave_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + return Err!(BadServerResponse(warn!( + "No room version was returned by {remote_server} for {room_id}, room version is \ + likely not supported by conduwuit" + ))); }; if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); + return Err!(BadServerResponse(warn!( + "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", + ))); } let mut leave_event_stub = serde_json::from_str::( make_leave_response.event.get(), ) .map_err(|e| { - err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}")) + err!(BadServerResponse(warn!( + "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" + ))) })?; // TODO: Is origin needed? @@ -1910,6 +2060,12 @@ async fn remote_leave_room( .expect("Timestamp is valid js_int value"), ), ); + // Inject the reason key into the event content dict if it exists + if let Some(reason) = reason { + if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { + content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); + } + } // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { @@ -1917,7 +2073,7 @@ async fn remote_leave_room( | _ => { leave_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -2195,7 +2351,7 @@ async fn knock_room_helper_local( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -2339,7 +2495,7 @@ async fn knock_room_helper_remote( } info!("Compressing state from send_knock"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) @@ -2394,7 +2550,7 @@ async fn knock_room_helper_remote( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 321d8013..16b1796a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,36 +1,44 @@ +use core::panic; + use axum::extract::State; use conduwuit::{ - at, is_equal_to, + Err, Result, at, + matrix::{ + Event, + pdu::{PduCount, PduEvent}, + }, utils::{ + IterStream, ReadyExt, result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, - IterStream, ReadyExt, }, - Event, PduCount, PduEvent, Result, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; -use ruma::{ - api::{ - client::{filter::RoomEventFilter, message::get_message_events}, - Direction, - }, - events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, - serde::Raw, - RoomId, UserId, -}; -use service::{ +use conduwuit_service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, timeline::PdusIterItem, }, - Services, +}; +use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; +use ruma::{ + DeviceId, RoomId, UserId, + api::{ + Direction, + client::{filter::RoomEventFilter, message::get_message_events}, + }, + events::{ + AnyStateEvent, StateEventType, + TimelineEventType::{self, *}, + }, + serde::Raw, }; use crate::Ruma; /// list of safe and common non-state events to ignore if the user is ignored -const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[ +const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[ Audio, CallInvite, Emote, @@ -64,11 +72,15 @@ pub(crate) async fn get_message_events_route( body: Ruma, ) -> Result { debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted"); - let sender = body.sender(); - let (sender_user, sender_device) = sender; + let sender_user = body.sender_user(); + let sender_device = body.sender_device.as_ref(); let room_id = &body.room_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + let from: PduCount = body .from .as_deref() @@ -125,7 +137,15 @@ pub(crate) async fn get_message_events_route( let lazy_loading_context = lazy_loading::Context { user_id: sender_user, - device_id: sender_device, + device_id: match sender_device { + | Some(device_id) => device_id, + | None => + if let Some(registration) = body.appservice_info.as_ref() { + <&DeviceId>::from(registration.registration.id.as_str()) + } else { + panic!("No device_id provided and no appservice registration found, this should be unreachable"); + }, + }, room_id, token: Some(from.into_unsigned()), options: Some(&filter.lazy_load_options), @@ -154,7 +174,7 @@ pub(crate) async fn get_message_events_route( let chunk = events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(); Ok(get_message_events::v3::Response { @@ -225,34 +245,50 @@ async fn get_member_event( .ok() } +#[inline] pub(crate) async fn ignored_filter( services: &Services, item: PdusIterItem, user_id: &UserId, ) -> Option { - let (_, pdu) = &item; + let (_, ref pdu) = item; + is_ignored_pdu(services, pdu, user_id) + .await + .eq(&false) + .then_some(item) +} + +#[inline] +pub(crate) async fn is_ignored_pdu( + services: &Services, + pdu: &PduEvent, + user_id: &UserId, +) -> bool { // exclude Synapse's dummy events from bloating up response bodies. clients // don't need to see this. if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { - return None; + return true; } - if IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok() - && (services.users.user_is_ignored(&pdu.sender, user_id).await - || services - .server - .config - .forbidden_remote_server_names - .iter() - .any(is_equal_to!(pdu.sender().server_name()))) + let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); + + let ignored_server = services + .moderation + .is_remote_server_ignored(pdu.sender().server_name()); + + if ignored_type + && (ignored_server + || (!services.config.send_messages_from_ignored_users_to_client + && services.users.user_is_ignored(&pdu.sender, user_id).await)) { - return None; + return true; } - Some(item) + false } +#[inline] pub(crate) async fn visibility_filter( services: &Services, item: PdusIterItem, @@ -268,7 +304,16 @@ pub(crate) async fn visibility_filter( .then_some(item) } +#[inline] pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { let (_, pdu) = &item; pdu.matches(filter).then_some(item) } + +#[cfg_attr(debug_assertions, conduwuit::ctor)] +fn _is_sorted() { + debug_assert!( + IGNORED_MESSAGE_TYPES.is_sorted(), + "IGNORED_MESSAGE_TYPES must be sorted by the developer" + ); +} diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 671d0c6d..8d2de68d 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -1,14 +1,14 @@ use std::time::Duration; use axum::extract::State; -use conduwuit::utils; +use conduwuit::{Error, Result, utils}; use ruma::{ api::client::{account, error::ErrorKind}, authentication::TokenType, }; use super::TOKEN_LENGTH; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/v3/user/{userId}/openid/request_token` /// diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index d19e6ae1..548e5cce 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -1,12 +1,10 @@ use std::time::Duration; use axum::extract::State; -use ruma::api::client::{ - error::ErrorKind, - presence::{get_presence, set_presence}, -}; +use conduwuit::{Err, Result}; +use ruma::api::client::presence::{get_presence, set_presence}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// @@ -15,24 +13,17 @@ pub(crate) async fn set_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server"))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if sender_user != &body.user_id && body.appservice_info.is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to set presence of other users", - )); + if body.sender_user() != body.user_id && body.appservice_info.is_none() { + return Err!(Request(InvalidParam("Not allowed to set presence of other users"))); } services .presence - .set_presence(sender_user, &body.presence, None, None, body.status_msg.clone()) + .set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone()) .await?; Ok(set_presence::v3::Response {}) @@ -47,21 +38,15 @@ pub(crate) async fn get_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server",))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut presence_event = None; - let has_shared_rooms = services .rooms .state_cache - .user_sees_user(sender_user, &body.user_id) + .user_sees_user(body.sender_user(), &body.user_id) .await; if has_shared_rooms { @@ -70,37 +55,35 @@ pub(crate) async fn get_presence_route( } } - if let Some(presence) = presence_event { - let status_msg = if presence - .content - .status_msg - .as_ref() - .is_some_and(String::is_empty) - { - None - } else { - presence.content.status_msg - }; - - let last_active_ago = match presence.content.currently_active { - | Some(true) => None, - | _ => presence + match presence_event { + | Some(presence) => { + let status_msg = if presence .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), - }; + .status_msg + .as_ref() + .is_some_and(String::is_empty) + { + None + } else { + presence.content.status_msg + }; - Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? - status_msg, - currently_active: presence.content.currently_active, - last_active_ago, - presence: presence.content.presence, - }) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "Presence state for this user was not found", - )) + let last_active_ago = match presence.content.currently_active { + | Some(true) => None, + | _ => presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + }; + + Ok(get_presence::v3::Response { + // TODO: Should ruma just use the presenceeventcontent type here? + status_msg, + currently_active: presence.content.currently_active, + last_active_ago, + presence: presence.content.presence, + }) + }, + | _ => Err!(Request(NotFound("Presence state for this user was not found"))), } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 584adfc1..3699b590 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -2,12 +2,15 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - pdu::PduBuilder, - utils::{stream::TryIgnore, IterStream}, - warn, Err, Error, Result, + Err, Error, Result, + matrix::pdu::PduBuilder, + utils::{IterStream, stream::TryIgnore}, + warn, }; -use futures::{future::join3, StreamExt, TryStreamExt}; +use conduwuit_service::Services; +use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ + OwnedMxcUri, OwnedRoomId, UserId, api::{ client::{ error::ErrorKind, @@ -19,9 +22,7 @@ use ruma::{ }, events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, - OwnedMxcUri, OwnedRoomId, UserId, }; -use service::Services; use crate::Ruma; @@ -51,7 +52,7 @@ pub(crate) async fn set_displayname_route( update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -146,7 +147,7 @@ pub(crate) async fn set_avatar_url_route( ) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence diff --git a/src/api/client/push.rs b/src/api/client/push.rs index ed7371e4..81020ffa 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,6 +1,8 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, Error, Result, err}; +use conduwuit_service::Services; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, api::client::{ error::ErrorKind, push::{ @@ -10,18 +12,16 @@ use ruma::{ }, }, events::{ - push_rules::{PushRulesEvent, PushRulesEventContent}, GlobalAccountDataEventType, + push_rules::{PushRulesEvent, PushRulesEventContent}, }, push::{ InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, RemovePushRuleError, Ruleset, }, - CanonicalJsonObject, CanonicalJsonValue, }; -use service::Services; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/pushrules/` /// @@ -503,7 +503,7 @@ pub(crate) async fn set_pushers_route( services .pusher - .set_pusher(sender_user, &body.action) + .set_pusher(sender_user, body.sender_device(), &body.action) .await?; Ok(set_pusher::v3::Response::new()) diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 89fe003a..fbfc8fea 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,17 +1,17 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err, PduCount}; +use conduwuit::{Err, PduCount, Result, err}; use ruma::{ + MilliSecondsSinceUnixEpoch, api::client::{read_marker::set_read_marker, receipt::create_receipt}, events::{ - receipt::{ReceiptThread, ReceiptType}, RoomAccountDataEventType, + receipt::{ReceiptThread, ReceiptType}, }, - MilliSecondsSinceUnixEpoch, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// @@ -50,7 +50,7 @@ pub(crate) async fn set_read_marker_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) @@ -126,7 +126,7 @@ pub(crate) async fn create_receipt_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) @@ -197,11 +197,12 @@ pub(crate) async fn create_receipt_route( .read_receipt .private_read_set(&body.room_id, sender_user, count); }, - | _ => + | _ => { return Err!(Request(InvalidParam(warn!( "Received unknown read receipt type: {}", &body.receipt_type - )))), + )))); + }, } Ok(create_receipt::v3::Response {}) diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index ba59a010..8dbe47a6 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,9 +1,10 @@ use axum::extract::State; +use conduwuit::{Result, matrix::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; -use crate::{service::pdu::PduBuilder, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 87fb1eac..b8c2dd4d 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,22 +1,22 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{result::FlatOk, stream::WidebandExt, IterStream, ReadyExt}, - PduCount, Result, + Result, at, + matrix::pdu::PduCount, + utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; +use conduwuit_service::{Services, rooms::timeline::PdusIterItem}; use futures::StreamExt; use ruma::{ + EventId, RoomId, UInt, UserId, api::{ + Direction, client::relations::{ get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, }, - Direction, }, - events::{relation::RelationType, TimelineEventType}, - EventId, RoomId, UInt, UserId, + events::{TimelineEventType, relation::RelationType}, }; -use service::{rooms::timeline::PdusIterItem, Services}; use crate::Ruma; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 57de3f12..4ee8ebe5 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,23 +2,21 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, utils::ReadyExt, Err}; +use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; +use conduwuit_service::Services; use rand::Rng; use ruma::{ + EventId, RoomId, UserId, api::client::{ error::ErrorKind, room::{report_content, report_room}, }, events::room::message, - int, EventId, RoomId, UserId, + int, }; use tokio::time::sleep; -use crate::{ - debug_info, - service::{pdu::PduEvent, Services}, - Error, Result, Ruma, -}; +use crate::Ruma; /// # `POST /_matrix/client/v3/rooms/{roomId}/report` /// @@ -43,7 +41,7 @@ pub(crate) async fn report_room_route( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } delay_response().await; @@ -164,14 +162,14 @@ async fn is_event_report_valid( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); - }; + } if reason.as_ref().is_some_and(|s| s.len() > 750) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } if !services .rooms diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index a401b63d..be3fd23b 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,15 +2,20 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, + Err, Error, Result, debug_info, debug_warn, err, error, info, + matrix::{StateKey, pdu::PduBuilder}, + warn, }; +use conduwuit_service::{Services, appservice::RegistrationInfo}; use futures::FutureExt; use ruma::{ + CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, api::client::{ error::ErrorKind, room::{self, create_room}, }, events::{ + TimelineEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -22,16 +27,13 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - TimelineEventType, }, int, serde::{JsonObject, Raw}, - CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use service::{appservice::RegistrationInfo, Services}; -use crate::{client::invite_helper, Ruma}; +use crate::{Ruma, client::invite_helper}; /// # `POST /_matrix/client/v3/createRoom` /// @@ -68,10 +70,9 @@ pub(crate) async fn create_room_route( )); } - let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { - custom_room_id_check(&services, custom_room_id)? - } else { - RoomId::new(&services.server.name) + let room_id: OwnedRoomId = match &body.room_id { + | Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?, + | _ => RoomId::new(&services.server.name), }; // check if room ID doesn't already exist instead of erroring on auth check @@ -106,7 +107,6 @@ pub(crate) async fn create_room_route( return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed"))); } - let _short_id = services .rooms .short @@ -114,10 +114,10 @@ pub(crate) async fn create_room_route( .await; let state_lock = services.rooms.state.mutex.lock(&room_id).await; - let alias: Option = if let Some(alias) = body.room_alias_name.as_ref() { - Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?) - } else { - None + let alias: Option = match body.room_alias_name.as_ref() { + | Some(alias) => + Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?), + | _ => None, }; let room_version = match body.room_version.clone() { @@ -198,7 +198,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomCreate, content: to_raw_value(&create_content) .expect("create event content serialization"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -240,9 +240,7 @@ pub(crate) async fn create_room_route( if preset == RoomPreset::TrustedPrivateChat { for invite in &body.invite { if services.users.user_is_ignored(sender_user, invite).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(invite, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -267,7 +265,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("serialized power_levels event content"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -371,11 +369,11 @@ pub(crate) async fn create_room_route( } // Implicit state key defaults to "" - pdu_builder.state_key.get_or_insert_with(String::new); + pdu_builder.state_key.get_or_insert_with(StateKey::new); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption - && !services.globals.allow_encryption() + && !services.config.allow_encryption { continue; } @@ -421,9 +419,7 @@ pub(crate) async fn create_room_route( drop(state_lock); for user_id in &body.invite { if services.users.user_is_ignored(sender_user, user_id).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(user_id, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -609,24 +605,42 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result, + State(ref services): State, ref body: Ruma, ) -> Result { + let event_id = &body.event_id; + let room_id = &body.room_id; + let event = services .rooms .timeline - .get_pdu(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))); - - let token = services - .rooms - .timeline - .get_pdu_count(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event not found.")))); + .get_pdu(event_id) + .map_err(|_| err!(Request(NotFound("Event {} not found.", event_id)))); let visible = services .rooms .state_accessor - .user_can_see_event(body.sender_user(), &body.room_id, &body.event_id) + .user_can_see_event(body.sender_user(), room_id, event_id) .map(Ok); - let (token, mut event, visible) = try_join!(token, event, visible)?; + let (mut event, visible) = try_join(event, visible).await?; - if !visible - || ignored_filter(&services, (token, event.clone()), body.sender_user()) - .await - .is_none() - { + if !visible || is_ignored_pdu(services, &event, body.sender_user()).await { return Err!(Request(Forbidden("You don't have permission to view this event."))); } - if event.event_id() != &body.event_id || event.room_id() != body.room_id { - return Err!(Request(NotFound("Event not found"))); - } + debug_assert!( + event.event_id() == event_id && event.room_id() == room_id, + "Fetched PDU must match requested" + ); event.add_age().ok(); - let event = event.to_room_event(); - - Ok(get_room_event::v3::Response { event }) + Ok(get_room_event::v3::Response { event: event.into_room_event() }) } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 233d180f..ca63610b 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,8 +1,7 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{stream::TryTools, BoolExt}, - Err, PduEvent, Result, + Err, PduEvent, Result, at, + utils::{BoolExt, stream::TryTools}, }; use futures::TryStreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; @@ -56,7 +55,7 @@ pub(crate) async fn room_initial_sync_route( chunk: events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }; diff --git a/src/api/client/room/mod.rs b/src/api/client/room/mod.rs index 16fcadab..86d68f7e 100644 --- a/src/api/client/room/mod.rs +++ b/src/api/client/room/mod.rs @@ -2,9 +2,14 @@ mod aliases; mod create; mod event; mod initial_sync; +mod summary; mod upgrade; pub(crate) use self::{ - aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route, - initial_sync::room_initial_sync_route, upgrade::upgrade_room_route, + aliases::get_room_aliases_route, + create::create_room_route, + event::get_room_event_route, + initial_sync::room_initial_sync_route, + summary::{get_room_summary, get_room_summary_legacy}, + upgrade::upgrade_room_route, }; diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs new file mode 100644 index 00000000..67d2e2ad --- /dev/null +++ b/src/api/client/room/summary.rs @@ -0,0 +1,330 @@ +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, debug_warn, trace, + utils::{IterStream, future::TryExtExt}, +}; +use futures::{ + FutureExt, StreamExt, + future::{OptionFuture, join3}, + stream::FuturesUnordered, +}; +use ruma::{ + OwnedServerName, RoomId, UserId, + api::{ + client::room::get_summary, + federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, + }, + events::room::member::MembershipState, + space::SpaceRoomJoinRule::{self, *}, +}; +use service::Services; + +use crate::{Ruma, RumaResponse}; + +/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` +/// +/// Returns a short description of the state of a room. +/// +/// This is the "wrong" endpoint that some implementations/clients may use +/// according to the MSC. Request and response bodies are the same as +/// `get_room_summary`. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +pub(crate) async fn get_room_summary_legacy( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result> { + get_room_summary(State(services), InsecureClientIp(client), body) + .boxed() + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` +/// +/// Returns a short description of the state of a room. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] +pub(crate) async fn get_room_summary( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let (room_id, servers) = services + .rooms + .alias + .resolve_with_servers(&body.room_id_or_alias, Some(body.via.clone())) + .await?; + + if services.rooms.metadata.is_banned(&room_id).await { + return Err!(Request(Forbidden("This room is banned on this homeserver."))); + } + + room_summary_response(&services, &room_id, &servers, body.sender_user.as_deref()) + .boxed() + .await +} + +async fn room_summary_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + if services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await + { + return local_room_summary_response(services, room_id, sender_user) + .boxed() + .await; + } + + let room = + remote_room_summary_hierarchy_response(services, room_id, servers, sender_user).await?; + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias: room.canonical_alias, + avatar_url: room.avatar_url, + guest_can_join: room.guest_can_join, + name: room.name, + num_joined_members: room.num_joined_members, + topic: room.topic, + world_readable: room.world_readable, + join_rule: room.join_rule, + room_type: room.room_type, + room_version: room.room_version, + encryption: room.encryption, + allowed_room_ids: room.allowed_room_ids, + membership: sender_user.is_some().then_some(MembershipState::Leave), + }) +} + +async fn local_room_summary_response( + services: &Services, + room_id: &RoomId, + sender_user: Option<&UserId>, +) -> Result { + trace!(?sender_user, "Sending local room summary response for {room_id:?}"); + let join_rule = services.rooms.state_accessor.get_join_rules(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); + + let (join_rule, world_readable, guest_can_join) = + join3(join_rule, world_readable, guest_can_join).await; + trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}"); + + user_can_see_summary( + services, + room_id, + &join_rule.clone().into(), + guest_can_join, + world_readable, + join_rule.allowed_rooms(), + sender_user, + ) + .await?; + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(room_id) + .ok(); + + let name = services.rooms.state_accessor.get_name(room_id).ok(); + + let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); + + let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); + + let avatar_url = services + .rooms + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + + let room_version = services.rooms.state.get_room_version(room_id).ok(); + + let encryption = services + .rooms + .state_accessor + .get_room_encryption(room_id) + .ok(); + + let num_joined_members = services + .rooms + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let membership: OptionFuture<_> = sender_user + .map(|sender_user| { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .map_ok_or(MembershipState::Leave, |content| content.membership) + }) + .into(); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + membership, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + membership, + ); + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias, + avatar_url, + guest_can_join, + name, + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + topic, + world_readable, + room_type, + room_version, + encryption, + membership, + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.into(), + }) +} + +/// used by MSC3266 to fetch a room's info if we do not know about it +async fn remote_room_summary_hierarchy_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + trace!(?sender_user, ?servers, "Sending remote room summary response for {room_id:?}"); + if !services.config.allow_federation { + return Err!(Request(Forbidden("Federation is disabled."))); + } + + if services.rooms.metadata.is_disabled(room_id).await { + return Err!(Request(Forbidden( + "Federaton of room {room_id} is currently disabled on this server." + ))); + } + + let request = get_hierarchy::v1::Request::new(room_id.to_owned()); + + let mut requests: FuturesUnordered<_> = servers + .iter() + .map(|server| { + services + .sending + .send_federation_request(server, request.clone()) + }) + .collect(); + + while let Some(Ok(response)) = requests.next().await { + trace!("{response:?}"); + let room = response.room.clone(); + if room.room_id != room_id { + debug_warn!( + "Room ID {} returned does not belong to the requested room ID {}", + room.room_id, + room_id + ); + continue; + } + + return user_can_see_summary( + services, + room_id, + &room.join_rule, + room.guest_can_join, + room.world_readable, + room.allowed_room_ids.iter().map(AsRef::as_ref), + sender_user, + ) + .await + .map(|()| room); + } + + Err!(Request(NotFound( + "Room is unknown to this server and was unable to fetch over federation with the \ + provided servers available" + ))) +} + +async fn user_can_see_summary<'a, I>( + services: &Services, + room_id: &RoomId, + join_rule: &SpaceRoomJoinRule, + guest_can_join: bool, + world_readable: bool, + allowed_room_ids: I, + sender_user: Option<&UserId>, +) -> Result +where + I: Iterator + Send, +{ + let is_public_room = matches!(join_rule, Public | Knock | KnockRestricted); + match sender_user { + | Some(sender_user) => { + let user_can_see_state_events = services + .rooms + .state_accessor + .user_can_see_state_events(sender_user, room_id); + let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); + let user_in_allowed_restricted_room = allowed_room_ids + .stream() + .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); + + let (user_can_see_state_events, is_guest, user_in_allowed_restricted_room) = + join3(user_can_see_state_events, is_guest, user_in_allowed_restricted_room) + .boxed() + .await; + + if user_can_see_state_events + || (is_guest && guest_can_join) + || is_public_room + || user_in_allowed_restricted_room + { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable, not publicly accessible/joinable, restricted room \ + conditions not met, and guest access is forbidden. Not allowed to see details \ + of this room." + ))) + }, + | None => { + if is_public_room || world_readable { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable or publicly accessible/joinable, authentication is \ + required" + ))) + }, + } +} diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 2f9706f4..9ec0b3bb 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,19 +1,23 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{err, info, pdu::PduBuilder, Error, Result}; +use conduwuit::{ + Error, Result, err, info, + matrix::{StateKey, pdu::PduBuilder}, +}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, RoomId, RoomVersionId, api::client::{error::ErrorKind, room::upgrade_room}, events::{ + StateEventType, TimelineEventType, room::{ member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, tombstone::RoomTombstoneEventContent, }, - StateEventType, TimelineEventType, }, - int, CanonicalJsonObject, RoomId, RoomVersionId, + int, }; use serde_json::{json, value::to_raw_value}; @@ -77,7 +81,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomTombstoneEventContent { + PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), }), @@ -102,7 +106,7 @@ pub(crate) async fn upgrade_room_route( // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( body.room_id.clone(), - (*tombstone_event_id).to_owned(), + Some(tombstone_event_id), )); // Send a m.room.create event containing a predecessor field and the applicable @@ -159,7 +163,7 @@ pub(crate) async fn upgrade_room_route( content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), redacts: None, timestamp: None, }, @@ -188,7 +192,7 @@ pub(crate) async fn upgrade_room_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), redacts: None, timestamp: None, }, @@ -217,7 +221,7 @@ pub(crate) async fn upgrade_room_route( PduBuilder { event_type: event_type.to_string().into(), content: event_content, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -272,7 +276,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { + PduBuilder::state(StateKey::new(), &RoomPowerLevelsEventContent { events_default: new_level, invite: new_level, ..power_levels_event_content diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 898dfc7f..d4dcde57 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,23 +2,23 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - at, is_true, + Err, Result, at, is_true, + matrix::pdu::PduEvent, result::FlatOk, - utils::{stream::ReadyExt, IterStream}, - Err, PduEvent, Result, + utils::{IterStream, stream::ReadyExt}, }; -use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use conduwuit_service::{Services, rooms::search::RoomQuery}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; use ruma::{ + OwnedRoomId, RoomId, UInt, UserId, api::client::search::search_events::{ self, v3::{Criteria, EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, }, events::AnyStateEvent, serde::Raw, - OwnedRoomId, RoomId, UInt, UserId, }; use search_events::v3::{Request, Response}; -use service::{rooms::search::RoomQuery, Services}; use crate::Ruma; @@ -144,7 +144,7 @@ async fn category_room_events( .map(at!(2)) .flatten() .stream() - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .map(|result| SearchResult { rank: None, result: Some(result), diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 39340070..f753fa65 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; -use crate::{service::pdu::PduBuilder, utils, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` /// @@ -25,8 +25,7 @@ pub(crate) async fn send_message_event_route( let appservice_info = body.appservice_info.as_ref(); // Forbid m.room.encrypted if encryption is disabled - if MessageLikeEventType::RoomEncrypted == body.event_type - && !services.globals.allow_encryption() + if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption has been disabled"))); } diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 7155351c..2499a43d 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,11 +2,15 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{debug, err, info, utils::ReadyExt, warn, Err}; +use conduwuit::{ + Err, Error, Result, debug, err, info, utils, + utils::{ReadyExt, hash}, +}; +use conduwuit_service::uiaa::SESSION_ID_LENGTH; use futures::StreamExt; use ruma::{ + UserId, api::client::{ - error::ErrorKind, session::{ get_login_token, get_login_types::{ @@ -21,12 +25,10 @@ use ruma::{ }, uiaa, }, - OwnedUserId, UserId, }; -use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{utils, utils::hash, Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/v3/login` /// @@ -67,6 +69,8 @@ pub(crate) async fn login_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { + let emergency_mode_enabled = services.config.emergency_password.is_some(); + // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -78,36 +82,67 @@ pub(crate) async fn login_route( .. }) => { debug!("Got password login type"); - let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = - identifier - { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) - } else if let Some(user) = user { - OwnedUserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err!(Request(Forbidden("Bad login type."))); - } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name(user_id, &services.config.server_name) + } else if let Some(user) = user { + UserId::parse_with_server_name(user, &services.config.server_name) + } else { + return Err!(Request(Unknown( + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") + ))); + } + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + let lowercased_user_id = UserId::parse_with_server_name( + user_id.localpart().to_lowercase(), + &services.config.server_name, + )?; + + if !services.globals.user_is_local(&user_id) + || !services.globals.user_is_local(&lowercased_user_id) + { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } + + // first try the username as-is let hash = services .users .password_hash(&user_id) .await - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + .inspect_err(|e| debug!("{e}")); - if hash.is_empty() { - return Err!(Request(UserDeactivated("The user has been deactivated"))); + match hash { + | Ok(hash) => { + if hash.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + user_id + }, + | Err(_e) => { + let hash_lowercased_user_id = services + .users + .password_hash(&lowercased_user_id) + .await + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + if hash_lowercased_user_id.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash_lowercased_user_id) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + lowercased_user_id + }, } - - if hash::verify_password(password, &hash).is_err() { - return Err!(Request(Forbidden("Wrong username or password."))); - } - - user_id }, | login::v3::LoginInfo::Token(login::v3::Token { token }) => { debug!("Got token login type"); @@ -122,46 +157,38 @@ pub(crate) async fn login_route( user, }) => { debug!("Got appservice login type"); + + let Some(ref info) = body.appservice_info else { + return Err!(Request(MissingToken("Missing appservice token."))); + }; + let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) + UserId::parse_with_server_name(user_id, &services.config.server_name) } else if let Some(user) = user { - OwnedUserId::parse(user) + UserId::parse_with_server_name(user, &services.config.server_name) } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + return Err!(Request(Unknown( + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") + ))); } - .map_err(|e| { - warn!("Failed to parse username from appservice logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); - } - } else { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + if !services.globals.user_is_local(&user_id) { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } + + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } user_id }, | _ => { - warn!("Unsupported or unknown login type: {:?}", &body.login_info); - debug!("JSON body: {:?}", &body.json_body); - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Unsupported or unknown login type.", - )); + debug!("/login json_body: {:?}", &body.json_body); + return Err!(Request(Unknown( + debug_warn!(?body.login_info, "Invalid or unsupported login type") + ))); }, }; @@ -214,9 +241,6 @@ pub(crate) async fn login_route( info!("{user_id} logged in"); - // home_server is deprecated but apparently must still be sent despite it being - // deprecated over 6 years ago. initially i thought this macro was unnecessary, - // but ruma uses this same macro for the same reason so... #[allow(deprecated)] Ok(login::v3::Response { user_id, @@ -224,7 +248,7 @@ pub(crate) async fn login_route( device_id, well_known: client_discovery_info, expires_in: None, - home_server: Some(services.globals.server_name().to_owned()), + home_server: Some(services.config.server_name.clone()), refresh_token: None, }) } @@ -259,26 +283,32 @@ pub(crate) async fn login_token_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body.as_ref() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); + // Success! + }, + | _ => match body.json_body.as_ref() { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("No JSON body was sent when required."))); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("No JSON body was sent when required."))); + }, + }, } let login_token = utils::random_string(TOKEN_LENGTH); diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 409c9083..92768926 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -1,12 +1,25 @@ -use std::str::FromStr; - -use axum::extract::State; -use ruma::{ - api::client::{error::ErrorKind, space::get_hierarchy}, - UInt, +use std::{ + collections::{BTreeSet, VecDeque}, + str::FromStr, }; -use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; +use axum::extract::State; +use conduwuit::{ + Err, Result, + utils::{future::TryExtExt, stream::IterStream}, +}; +use conduwuit_service::{ + Services, + rooms::spaces::{ + PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, + }, +}; +use futures::{StreamExt, TryFutureExt, future::OptionFuture}; +use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, +}; + +use crate::Ruma; /// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy` /// @@ -16,8 +29,6 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = body .limit .unwrap_or_else(|| UInt::from(10_u32)) @@ -36,23 +47,149 @@ pub(crate) async fn get_hierarchy_route( // Should prevent unexpeded behaviour in (bad) clients if let Some(ref token) = key { if token.suggested_only != body.suggested_only || token.max_depth != max_depth { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "suggested_only and max_depth cannot change on paginated requests", - )); + return Err!(Request(InvalidParam( + "suggested_only and max_depth cannot change on paginated requests" + ))); } } - services - .rooms - .spaces - .get_client_hierarchy( - sender_user, - &body.room_id, - limit.try_into().unwrap_or(10), - key.map_or(vec![], |token| token.short_room_ids), - max_depth.into(), - body.suggested_only, - ) - .await + get_client_hierarchy( + &services, + body.sender_user(), + &body.room_id, + limit.try_into().unwrap_or(10), + max_depth.try_into().unwrap_or(usize::MAX), + body.suggested_only, + key.as_ref() + .into_iter() + .flat_map(|t| t.short_room_ids.iter()), + ) + .await +} + +async fn get_client_hierarchy<'a, ShortRoomIds>( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + limit: usize, + max_depth: usize, + suggested_only: bool, + short_room_ids: ShortRoomIds, +) -> Result +where + ShortRoomIds: Iterator + Clone + Send + Sync + 'a, +{ + type Via = Vec; + type Entry = (OwnedRoomId, Via); + type Rooms = VecDeque; + + let mut queue: Rooms = [( + room_id.to_owned(), + room_id + .server_name() + .map(ToOwned::to_owned) + .into_iter() + .collect(), + )] + .into(); + + let mut rooms = Vec::with_capacity(limit); + let mut parents = BTreeSet::new(); + while let Some((current_room, via)) = queue.pop_front() { + let summary = services + .rooms + .spaces + .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) + .await?; + + match (summary, current_room == room_id) { + | (None | Some(SummaryAccessibility::Inaccessible), false) => { + // Just ignore other unavailable rooms + }, + | (None, true) => { + return Err!(Request(Forbidden("The requested room was not found"))); + }, + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err!(Request(Forbidden("The requested room is inaccessible"))); + }, + | (Some(SummaryAccessibility::Accessible(summary)), _) => { + let populate = parents.len() >= short_room_ids.clone().count(); + + let mut children: Vec = get_parent_children_via(&summary, suggested_only) + .filter(|(room, _)| !parents.contains(room)) + .rev() + .map(|(key, val)| (key, val.collect())) + .collect(); + + if !populate { + children = children + .iter() + .rev() + .stream() + .skip_while(|(room, _)| { + services + .rooms + .short + .get_shortroomid(room) + .map_ok(|short| { + Some(&short) != short_room_ids.clone().nth(parents.len()) + }) + .unwrap_or_else(|_| false) + }) + .map(Clone::clone) + .collect::>() + .await + .into_iter() + .rev() + .collect(); + } + + if populate { + rooms.push(summary_to_chunk(summary.clone())); + } else if queue.is_empty() && children.is_empty() { + return Err!(Request(InvalidParam("Room IDs in token were not found."))); + } + + parents.insert(current_room.clone()); + if rooms.len() >= limit { + break; + } + + if parents.len() > max_depth { + continue; + } + + queue.extend(children); + }, + } + } + + let next_batch: OptionFuture<_> = queue + .pop_front() + .map(|(room, _)| async move { + parents.insert(room); + + let next_short_room_ids: Vec<_> = parents + .iter() + .stream() + .filter_map(|room_id| services.rooms.short.get_shortroomid(room_id).ok()) + .collect() + .await; + + (next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty()) + .then_some(PaginationToken { + short_room_ids: next_short_room_ids, + limit: limit.try_into().ok()?, + max_depth: max_depth.try_into().ok()?, + suggested_only, + }) + .as_ref() + .map(PaginationToken::to_string) + }) + .into(); + + Ok(get_hierarchy::v1::Response { + next_batch: next_batch.await.flatten(), + rooms, + }) } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 8555f88b..2ddc8f14 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,21 +1,26 @@ use axum::extract::State; -use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, PduEvent, Result}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduBuilder, PduEvent}, + utils::BoolExt, +}; +use conduwuit_service::Services; use futures::TryStreamExt; use ruma::{ + OwnedEventId, RoomId, UserId, api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, events::{ + AnyStateEventContent, StateEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, }, - AnyStateEventContent, StateEventType, }, serde::Raw, - OwnedEventId, RoomId, UserId, }; -use service::Services; use crate::{Ruma, RumaResponse}; @@ -26,7 +31,7 @@ pub(crate) async fn send_state_event_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); Ok(send_state_event::v3::Response { event_id: send_state_event_for_key_helper( @@ -102,7 +107,7 @@ pub(crate) async fn get_state_events_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms @@ -110,7 +115,9 @@ pub(crate) async fn get_state_events_for_key_route( .user_can_see_state_events(sender_user, &body.room_id) .await { - return Err!(Request(Forbidden("You don't have permission to view the room state."))); + return Err!(Request(NotFound(debug_warn!( + "You don't have permission to view the room state." + )))); } let event = services @@ -172,7 +179,7 @@ async fn send_state_event_for_key_helper( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get())?, - state_key: Some(String::from(state_key)), + state_key: Some(state_key.into()), timestamp, ..Default::default() }, @@ -194,134 +201,200 @@ async fn allowed_to_send_state_event( ) -> Result { match event_type { | StateEventType::RoomCreate => { - return Err!(Request(BadJson( + return Err!(Request(BadJson(debug_warn!( + ?room_id, "You cannot update m.room.create after a room has been created." - ))); + )))); + }, + | StateEventType::RoomServerAcl => { + // prevents common ACL paw-guns as ACL management is difficult and prone to + // irreversible mistakes + match json.deserialize_as::() { + | Ok(acl_content) => { + if acl_content.allow_is_empty() { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with an empty allow key will permanently \ + brick the room for non-conduwuit's as this equates to no servers \ + being allowed to participate in this room." + )))); + } + + if acl_content.deny_contains("*") && acl_content.allow_contains("*") { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny and allow key value of \"*\" will \ + permanently brick the room for non-conduwuit's as this equates to \ + no servers being allowed to participate in this room." + )))); + } + + if acl_content.deny_contains("*") + && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny key value of \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + + if !acl_content.allow_contains("*") + && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event for an allow key without \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room server ACL event is invalid: {e}" + )))); + }, + } }, - // Forbid m.room.encryption if encryption is disabled | StateEventType::RoomEncryption => - if !services.globals.allow_encryption() { + // Forbid m.room.encryption if encryption is disabled + if !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption is disabled on this homeserver."))); }, - // admin room is a sensitive room, it should not ever be made public | StateEventType::RoomJoinRules => { + // admin room is a sensitive room, it should not ever be made public if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { - if let Ok(join_rule) = - serde_json::from_str::(json.json().get()) - { - if join_rule.join_rule == JoinRule::Public { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made public" - ))); - } + match json.deserialize_as::() { + | Ok(join_rule) => + if join_rule.join_rule == JoinRule::Public { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made public" + ))); + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room join rules event is invalid: {e}" + )))); + }, } } } }, - // admin room is a sensitive room, it should not ever be made world readable | StateEventType::RoomHistoryVisibility => { - if let Ok(visibility_content) = - serde_json::from_str::(json.json().get()) - { - if let Ok(admin_room_id) = services.admin.get_admin_room().await { - if admin_room_id == room_id - && visibility_content.history_visibility - == HistoryVisibility::WorldReadable - { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made world readable \ - (public room history)." - ))); - } + // admin room is a sensitive room, it should not ever be made world readable + if let Ok(admin_room_id) = services.admin.get_admin_room().await { + match json.deserialize_as::() { + | Ok(visibility_content) => { + if admin_room_id == room_id + && visibility_content.history_visibility + == HistoryVisibility::WorldReadable + { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made world \ + readable (public room history)." + ))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room history visibility event is invalid: {e}" + )))); + }, } } }, | StateEventType::RoomCanonicalAlias => { - if let Ok(canonical_alias) = - serde_json::from_str::(json.json().get()) - { - let mut aliases = canonical_alias.alt_aliases.clone(); + match json.deserialize_as::() { + | Ok(canonical_alias_content) => { + let mut aliases = canonical_alias_content.alt_aliases.clone(); - if let Some(alias) = canonical_alias.alias { - aliases.push(alias); - } + if let Some(alias) = canonical_alias_content.alias { + aliases.push(alias); + } - for alias in aliases { - if !services.globals.server_is_ours(alias.server_name()) { - return Err!(Request(Forbidden( - "canonical_alias must be for this server" + for alias in aliases { + let (alias_room_id, _servers) = services + .rooms + .alias + .resolve_alias(&alias, None) + .await + .map_err(|e| { + err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}"))) + })?; + + if alias_room_id != room_id { + return Err!(Request(BadAlias( + "Room alias {alias} does not belong to room {room_id}" + ))); + } + } + }, + | Err(e) => { + return Err!(Request(InvalidParam(debug_warn!( + "Room canonical alias event is invalid: {e}" + )))); + }, + } + }, + | StateEventType::RoomMember => match json.deserialize_as::() { + | Ok(membership_content) => { + let Ok(state_key) = UserId::parse(state_key) else { + return Err!(Request(BadJson( + "Membership event has invalid or non-existent state key" + ))); + }; + + if let Some(authorising_user) = + membership_content.join_authorized_via_users_server + { + if membership_content.membership != MembershipState::Join { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if services + .rooms + .state_cache + .is_joined(state_key, room_id) + .await + { + return Err!(Request(InvalidParam( + "{state_key} is already joined, an authorising user is not required." + ))); + } + + if !services.globals.user_is_local(&authorising_user) { + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} does not belong to this \ + homeserver" ))); } if !services .rooms - .alias - .resolve_local_alias(&alias) + .state_cache + .is_joined(&authorising_user, room_id) .await - .is_ok_and(|room| room == room_id) - // Make sure it's the right room { - return Err!(Request(Forbidden( - "You are only allowed to send canonical_alias events when its \ - aliases already exist" + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} is not in the room, they \ + cannot authorise the join." ))); } } - } - }, - | StateEventType::RoomMember => { - let Ok(membership_content) = - serde_json::from_str::(json.json().get()) - else { + }, + | Err(e) => { return Err!(Request(BadJson( "Membership content must have a valid JSON body with at least a valid \ - membership state." + membership state: {e}" ))); - }; - - let Ok(state_key) = UserId::parse(state_key) else { - return Err!(Request(BadJson( - "Membership event has invalid or non-existent state key" - ))); - }; - - if let Some(authorising_user) = membership_content.join_authorized_via_users_server { - if membership_content.membership != MembershipState::Join { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } - - if services - .rooms - .state_cache - .is_joined(state_key, room_id) - .await - { - return Err!(Request(InvalidParam( - "{state_key} is already joined, an authorising user is not required." - ))); - } - - if !services.globals.user_is_local(&authorising_user) { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} does not belong to this homeserver" - ))); - } - - if !services - .rooms - .state_cache - .is_joined(&authorising_user, room_id) - .await - { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} is not in the room, they cannot \ - authorise the join." - ))); - } - } + }, }, | _ => (), } diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 1967f4a2..40370160 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,25 +3,22 @@ mod v4; mod v5; use conduwuit::{ - utils::{ - stream::{BroadbandExt, ReadyExt, TryIgnore}, - IterStream, - }, - PduCount, + Error, PduCount, Result, + matrix::pdu::PduEvent, + utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, }; -use futures::{pin_mut, StreamExt}; +use conduwuit_service::Services; +use futures::{StreamExt, pin_mut}; use ruma::{ - directory::RoomTypeFilter, + RoomId, UserId, events::TimelineEventType::{ self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, }, - RoomId, UserId, }; pub(crate) use self::{ v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, }; -use crate::{service::Services, Error, PduEvent, Result}; pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; @@ -76,41 +73,13 @@ async fn share_encrypted_room( .state_cache .get_shared_rooms(sender_user, user_id) .ready_filter(|&room_id| Some(room_id) != ignore_room) - .broad_any(|other_room_id| { + .map(ToOwned::to_owned) + .broad_any(|other_room_id| async move { services .rooms .state_accessor - .is_encrypted_room(other_room_id) + .is_encrypted_room(&other_room_id) + .await }) .await } - -pub(crate) async fn filter_rooms<'a>( - services: &Services, - rooms: &[&'a RoomId], - filter: &[RoomTypeFilter], - negate: bool, -) -> Vec<&'a RoomId> { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index cd4dfc90..8eac6b66 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -1,72 +1,78 @@ use std::{ cmp::{self}, - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, time::Duration, }; use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, - pdu::EventHash, + Result, at, err, error, extract_variant, is_equal_to, + matrix::{ + Event, + pdu::{EventHash, PduCount, PduEvent}, + }, + pair_of, ref_at, result::FlatOk, utils::{ - self, - future::OptionExt, + self, BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::{OptionStream, ReadyEqExt}, math::ruma_from_u64, - stream::{BroadbandExt, Tools, WidebandExt}, - BoolExt, IterStream, ReadyExt, TryFutureExtExt, + stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, - Error, PduCount, PduEvent, Result, + warn, }; use conduwuit_service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, short::ShortStateHash, }, - Services, }; use futures::{ - future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, + pin_mut, }; use ruma::{ + DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, api::client::{ filter::FilterDefinition, sync::sync_events::{ - self, + self, DeviceLists, UnreadNotificationsCount, v3::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, KnockState, KnockedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice, }, - DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, }, events::{ - presence::PresenceEvent, - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + presence::{PresenceEvent, PresenceEventContent}, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, + uint, }; +use service::rooms::short::{ShortEventId, ShortStateKey}; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::ignored_filter}; #[derive(Default)] struct StateChanges { heroes: Option>, joined_member_count: Option, invited_member_count: Option, - joined_since_last_sync: bool, state_events: Vec, + device_list_updates: HashSet, + left_encrypted_users: HashSet, } -type PresenceUpdates = HashMap; +type PresenceUpdates = HashMap; /// # `GET /_matrix/client/r0/sync` /// @@ -118,7 +124,7 @@ pub(crate) async fn sync_events_route( let (sender_user, sender_device) = body.sender(); // Presence update - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &body.body.set_presence) @@ -166,8 +172,8 @@ pub(crate) async fn build_sync_events( let full_state = body.body.full_state; let filter = match body.body.filter.as_ref() { | None => FilterDefinition::default(), - | Some(Filter::FilterDefinition(ref filter)) => filter.clone(), - | Some(Filter::FilterId(ref filter_id)) => services + | Some(Filter::FilterDefinition(filter)) => filter.clone(), + | Some(Filter::FilterId(filter_id)) => services .users .get_filter(sender_user, filter_id) .await @@ -219,6 +225,7 @@ pub(crate) async fn build_sync_events( sender_user, next_batch, full_state, + filter.room.include_leave, &filter, ) .map_ok(move |left_room| (room_id, left_room)) @@ -278,27 +285,27 @@ pub(crate) async fn build_sync_events( }); let presence_updates: OptionFuture<_> = services - .globals - .allow_local_presence() + .config + .allow_local_presence .then(|| process_presence_updates(services, since, sender_user)) .into(); let account_data = services .account_data - .changes_since(None, sender_user, since) + .changes_since(None, sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect(); // Look for device list updates of this account let keys_changed = services .users - .keys_changed(sender_user, since, None) + .keys_changed(sender_user, since, Some(next_batch)) .map(ToOwned::to_owned) .collect::>(); let to_device_events = services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, Some(since), Some(next_batch)) .collect::>(); let device_one_time_keys_count = services @@ -325,18 +332,16 @@ pub(crate) async fn build_sync_events( // If the user doesn't share an encrypted room with the target anymore, we need // to tell them - let device_list_left = left_encrypted_users + let device_list_left: HashSet<_> = left_encrypted_users .into_iter() .stream() .broad_filter_map(|user_id| async move { - let no_shared_encrypted_room = - !share_encrypted_room(services, sender_user, &user_id, None).await; - no_shared_encrypted_room.then_some(user_id) - }) - .ready_fold(HashSet::new(), |mut device_list_left, user_id| { - device_list_left.insert(user_id); - device_list_left + share_encrypted_room(services, sender_user, &user_id, None) + .await + .eq(&false) + .then_some(user_id) }) + .collect() .await; let response = sync_events::v3::Response { @@ -351,9 +356,11 @@ pub(crate) async fn build_sync_events( next_batch: next_batch.to_string(), presence: Presence { events: presence_updates - .unwrap_or_default() - .into_values() - .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) + .into_iter() + .flat_map(IntoIterator::into_iter) + .map(|(sender, content)| PresenceEvent { content, sender }) + .map(|ref event| Raw::new(event)) + .filter_map(Result::ok) .collect(), }, rooms: Rooms { @@ -390,45 +397,8 @@ async fn process_presence_updates( .map_ok(move |event| (user_id, event)) .ok() }) - .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { - match updates.entry(user_id.into()) { - | Entry::Vacant(slot) => { - let mut new_event = event; - new_event.content.last_active_ago = match new_event.content.currently_active { - | Some(true) => None, - | _ => new_event.content.last_active_ago, - }; - - slot.insert(new_event); - }, - | Entry::Occupied(mut slot) => { - let curr_event = slot.get_mut(); - let curr_content = &mut curr_event.content; - let new_content = event.content; - - // Update existing presence event with more info - curr_content.presence = new_content.presence; - curr_content.status_msg = new_content - .status_msg - .or_else(|| curr_content.status_msg.take()); - curr_content.displayname = new_content - .displayname - .or_else(|| curr_content.displayname.take()); - curr_content.avatar_url = new_content - .avatar_url - .or_else(|| curr_content.avatar_url.take()); - curr_content.currently_active = new_content - .currently_active - .or(curr_content.currently_active); - curr_content.last_active_ago = match curr_content.currently_active { - | Some(true) => None, - | _ => new_content.last_active_ago.or(curr_content.last_active_ago), - }; - }, - }; - - updates - }) + .map(|(user_id, event)| (user_id.to_owned(), event.content)) + .collect() .await } @@ -449,6 +419,7 @@ async fn handle_left_room( sender_user: &UserId, next_batch: u64, full_state: bool, + include_leave: bool, filter: &FilterDefinition, ) -> Result> { let left_count = services @@ -463,9 +434,16 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(room_id).await { + let is_not_found = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(is_not_found, is_disabled, is_banned); + if is_not_found.or(is_disabled).or(is_banned).await { // This is just a rejected invite, not a room we know - // Insert a leave event anyways + // Insert a leave event anyways for the client let event = PduEvent { event_id: EventId::new(services.globals.server_name()), sender: sender_user.to_owned(), @@ -476,7 +454,7 @@ async fn handle_left_room( kind: RoomMember, content: serde_json::from_str(r#"{"membership":"leave"}"#) .expect("this is valid JSON"), - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), unsigned: None, // The following keys are dropped on conversion room_id: room_id.clone(), @@ -496,7 +474,7 @@ async fn handle_left_room( events: Vec::new(), }, state: RoomState { - events: vec![event.to_sync_state_event()], + events: vec![event.into_sync_state_event()], }, })); } @@ -524,7 +502,7 @@ async fn handle_left_room( .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) .await else { - error!("Left room but no left state event"); + warn!("Left {room_id} but no left state event"); return Ok(None); }; @@ -534,7 +512,7 @@ async fn handle_left_room( .pdu_shortstatehash(&left_event_id) .await else { - error!(event_id = %left_event_id, "Leave event has no state"); + warn!(event_id = %left_event_id, "Leave event has no state in {room_id}"); return Ok(None); }; @@ -577,7 +555,11 @@ async fn handle_left_room( continue; }; - left_state_events.push(pdu.to_sync_state_event()); + if !include_leave && pdu.sender == sender_user { + continue; + } + + left_state_events.push(pdu.into_sync_state_event()); } } @@ -657,6 +639,40 @@ async fn load_joined_room( .await?; let (timeline_pdus, limited) = timeline; + let initial = since_shortstatehash.is_none(); + let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + let lazy_loading_context = &lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(since), + options: Some(&filter.room.state.lazy_load_options), + }; + + // Reset lazy loading because this is an initial sync + let lazy_load_reset: OptionFuture<_> = initial + .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) + .into(); + + lazy_load_reset.await; + let witness: OptionFuture<_> = lazy_loading_enabled + .then(|| { + let witness: Witness = timeline_pdus + .iter() + .map(ref_at!(1)) + .map(Event::sender) + .map(Into::into) + .chain(receipt_events.keys().map(Into::into)) + .collect(); + + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + }) + .into(); let last_notification_read: OptionFuture<_> = timeline_pdus .is_empty() @@ -668,10 +684,6 @@ async fn load_joined_room( }) .into(); - let no_state_changes = timeline_pdus.is_empty() - && (since_shortstatehash.is_none() - || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); - let since_sender_member: OptionFuture<_> = since_shortstatehash .map(|short| { services @@ -682,125 +694,85 @@ async fn load_joined_room( }) .into(); + let (last_notification_read, since_sender_member, witness) = + join3(last_notification_read, since_sender_member, witness).await; + let joined_since_last_sync = since_sender_member - .await .flatten() .is_none_or(|content: RoomMemberEventContent| { content.membership != MembershipState::Join }); - let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - let generate_witness = - lazy_loading_enabled && (since_shortstatehash.is_none() || joined_since_last_sync); - - let lazy_reset = lazy_loading_enabled && since_shortstatehash.is_none(); - - let lazy_loading_context = &lazy_loading::Context { - user_id: sender_user, - device_id: sender_device, - room_id, - token: None, - options: Some(&filter.room.state.lazy_load_options), - }; - - // Reset lazy loading because this is an initial sync - let lazy_load_reset: OptionFuture<_> = lazy_reset - .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) - .into(); - - lazy_load_reset.await; - let witness: Option = generate_witness.then(|| { - timeline_pdus - .iter() - .map(|(_, pdu)| pdu.sender.clone()) - .chain(receipt_events.keys().cloned()) - .collect() - }); - - let witness: OptionFuture<_> = witness - .map(|witness| { - services - .rooms - .lazy_loading - .witness_retain(witness, lazy_loading_context) - }) - .into(); - - let witness = witness.await; - let mut device_list_updates = HashSet::::new(); - let mut left_encrypted_users = HashSet::::new(); let StateChanges { heroes, joined_member_count, invited_member_count, + mut state_events, + mut device_list_updates, + left_encrypted_users, + } = calculate_state_changes( + services, + sender_user, + room_id, + full_state, + filter, + since_shortstatehash, + current_shortstatehash, joined_since_last_sync, - state_events, - } = if no_state_changes { - StateChanges::default() - } else { - calculate_state_changes( - services, - sender_user, - room_id, - full_state, - filter, - &mut device_list_updates, - &mut left_encrypted_users, - since_shortstatehash, - current_shortstatehash, - joined_since_last_sync, - witness.as_ref(), - ) - .boxed() - .await? + witness.as_ref(), + ) + .boxed() + .await?; + + let is_sender_membership = |pdu: &PduEvent| { + pdu.kind == StateEventType::RoomMember.into() + && pdu + .state_key + .as_deref() + .is_some_and(is_equal_to!(sender_user.as_str())) }; + let joined_sender_member: Option<_> = (joined_since_last_sync && timeline_pdus.is_empty()) + .then(|| { + state_events + .iter() + .position(is_sender_membership) + .map(|pos| state_events.swap_remove(pos)) + }) + .flatten(); + + let prev_batch = timeline_pdus.first().map(at!(0)).or_else(|| { + joined_sender_member + .is_some() + .then_some(since) + .map(Into::into) + }); + + let room_events = timeline_pdus + .into_iter() + .stream() + .wide_filter_map(|item| ignored_filter(services, item, sender_user)) + .map(at!(1)) + .chain(joined_sender_member.into_iter().stream()) + .map(|pdu| pdu.to_sync_room_event()) + .collect::>(); + let account_data_events = services .account_data - .changes_since(Some(room_id), sender_user, since) + .changes_since(Some(room_id), sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect(); // Look for device list updates in this room let device_updates = services .users - .room_keys_changed(room_id, since, None) + .room_keys_changed(room_id, since, Some(next_batch)) .map(|(user_id, _)| user_id) .map(ToOwned::to_owned) .collect::>(); - let room_events = timeline_pdus - .iter() - .stream() - .wide_filter_map(|item| ignored_filter(services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let typing_events = services - .rooms - .typing - .last_typing_update(room_id) - .and_then(|count| async move { - if count <= since { - return Ok(Vec::>::new()); - } - - let typings = services - .rooms - .typing - .typings_all(room_id, sender_user) - .await?; - - Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) - }) - .unwrap_or(Vec::new()); - - let send_notification_counts = last_notification_read - .is_none_or(|&count| count > since) - .await; + let send_notification_counts = last_notification_read.is_none_or(|count| count > since); let notification_count: OptionFuture<_> = send_notification_counts .then(|| { @@ -824,8 +796,27 @@ async fn load_joined_room( }) .into(); - let events = join3(room_events, account_data_events, typing_events); + let typing_events = services + .rooms + .typing + .last_typing_update(room_id) + .and_then(|count| async move { + if count <= since { + return Ok(Vec::>::new()); + } + + let typings = services + .rooms + .typing + .typings_all(room_id, sender_user) + .await?; + + Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) + }) + .unwrap_or(Vec::new()); + let unread_notifications = join(notification_count, highlight_count); + let events = join3(room_events, account_data_events, typing_events); let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) .boxed() @@ -882,17 +873,13 @@ async fn load_joined_room( unread_notifications: UnreadNotificationsCount { highlight_count, notification_count }, timeline: Timeline { limited: limited || joined_since_last_sync, + prev_batch: prev_batch.as_ref().map(ToString::to_string), events: room_events, - prev_batch: timeline_pdus - .first() - .map(at!(0)) - .as_ref() - .map(ToString::to_string), }, state: RoomState { events: state_events - .iter() - .map(PduEvent::to_sync_state_event) + .into_iter() + .map(PduEvent::into_sync_state_event) .collect(), }, ephemeral: Ephemeral { events: edus }, @@ -919,14 +906,12 @@ async fn calculate_state_changes( room_id: &RoomId, full_state: bool, filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, witness: Option<&Witness>, ) -> Result { - if since_shortstatehash.is_none() || joined_since_last_sync { + if since_shortstatehash.is_none() { calculate_state_initial( services, sender_user, @@ -944,11 +929,10 @@ async fn calculate_state_changes( room_id, full_state, filter, - device_list_updates, - left_encrypted_users, since_shortstatehash, current_shortstatehash, joined_since_last_sync, + witness, ) .await } @@ -961,7 +945,7 @@ async fn calculate_state_initial( sender_user: &UserId, room_id: &RoomId, full_state: bool, - filter: &FilterDefinition, + _filter: &FilterDefinition, current_shortstatehash: ShortStateHash, witness: Option<&Witness>, ) -> Result { @@ -979,20 +963,14 @@ async fn calculate_state_initial( .zip(event_ids.into_iter().stream()) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { - let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - if lazy_load_enabled + let lazy = !full_state && event_type == StateEventType::RoomMember - && !full_state && state_key.as_str().try_into().is_ok_and(|user_id: &UserId| { sender_user != user_id && witness.is_some_and(|witness| !witness.contains(user_id)) - }) { - return None; - } + }); - Some(event_id) + lazy.or_some(event_id) }) .broad_filter_map(|event_id: OwnedEventId| async move { services.rooms.timeline.get_pdu(&event_id).await.ok() @@ -1011,130 +989,137 @@ async fn calculate_state_initial( heroes, joined_member_count, invited_member_count, - joined_since_last_sync: true, state_events, + ..Default::default() }) } #[tracing::instrument(name = "incremental", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] -async fn calculate_state_incremental( +async fn calculate_state_incremental<'a>( services: &Services, - sender_user: &UserId, + sender_user: &'a UserId, room_id: &RoomId, full_state: bool, _filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, + witness: Option<&'a Witness>, ) -> Result { - // Incremental /sync - let since_shortstatehash = - since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); + let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash); - let mut delta_state_events = Vec::new(); - - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .collect(); - - let since_state_ids = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect(); - - let (current_state_ids, since_state_ids): ( - HashMap<_, OwnedEventId>, - HashMap<_, OwnedEventId>, - ) = join(current_state_ids, since_state_ids).await; - - current_state_ids - .iter() - .stream() - .ready_filter(|(key, id)| full_state || since_state_ids.get(key) != Some(id)) - .wide_filter_map(|(_, id)| services.rooms.timeline.get_pdu(id).ok()) - .ready_for_each(|pdu| delta_state_events.push(pdu)) - .await; - } + let state_changed = since_shortstatehash != current_shortstatehash; let encrypted_room = services .rooms .state_accessor .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); + .is_ok() + .await; - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); - - let (encrypted_room, since_encryption) = join(encrypted_room, since_encryption).await; - - // Calculations: - let new_encrypted_room = encrypted_room && !since_encryption; - - let send_member_count = delta_state_events - .iter() - .any(|event| event.kind == RoomMember); - - if encrypted_room { - for state_event in &delta_state_events { - if state_event.kind != RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let content: RoomMemberEventContent = state_event.get_content()?; - - match content.membership { - | MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .await - { - device_list_updates.insert(user_id.into()); - } - }, - | MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id.into()); - }, - | _ => {}, - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - let updates: Vec = services + let state_get_shorteventid = |user_id: &'a UserId| { + services .rooms - .state_cache - .room_members(room_id) - .ready_filter(|user_id| sender_user != *user_id) - .filter_map(|user_id| { - share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect() - .await; + .state_accessor + .state_get_shortid( + current_shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + ) + .ok() + }; - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend(updates); - } + let lazy_state_ids: OptionFuture<_> = witness + .filter(|_| !full_state && !encrypted_room) + .map(|witness| { + StreamExt::into_future( + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_shorteventid(user_id)), + ) + }) + .into(); + + let state_diff_ids: OptionFuture<_> = (!full_state && state_changed) + .then(|| { + StreamExt::into_future( + services + .rooms + .state_accessor + .state_added((since_shortstatehash, current_shortstatehash)) + .boxed(), + ) + }) + .into(); + + let current_state_ids: OptionFuture<_> = full_state + .then(|| { + StreamExt::into_future( + services + .rooms + .state_accessor + .state_full_shortids(current_shortstatehash) + .expect_ok() + .boxed(), + ) + }) + .into(); + + let state_events = current_state_ids + .stream() + .chain(state_diff_ids.stream()) + .broad_filter_map(|(shortstatekey, shorteventid)| async move { + if witness.is_none() || encrypted_room { + return Some(shorteventid); + } + + lazy_filter(services, sender_user, shortstatekey, shorteventid).await + }) + .chain(lazy_state_ids.stream()) + .broad_filter_map(|shorteventid| { + services + .rooms + .short + .get_eventid_from_short(shorteventid) + .ok() + }) + .broad_filter_map(|event_id: OwnedEventId| async move { + services.rooms.timeline.get_pdu(&event_id).await.ok() + }) + .collect::>() + .await; + + let (device_list_updates, left_encrypted_users) = state_events + .iter() + .stream() + .ready_filter(|_| encrypted_room) + .ready_filter(|state_event| state_event.kind == RoomMember) + .ready_filter_map(|state_event| { + let content: RoomMemberEventContent = state_event.get_content().ok()?; + let user_id: OwnedUserId = state_event.state_key.as_ref()?.parse().ok()?; + + Some((content, user_id)) + }) + .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { + use MembershipState::*; + + let shares_encrypted_room = + |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); + + match content.membership { + | Leave => leu.insert(user_id), + | Join if joined_since_last_sync || !shares_encrypted_room(&user_id).await => + dlu.insert(user_id), + | _ => false, + }; + + (dlu, leu) + }) + .await; + + let send_member_count = state_events.iter().any(|event| event.kind == RoomMember); let (joined_member_count, invited_member_count, heroes) = if send_member_count { calculate_counts(services, room_id, sender_user).await? @@ -1146,11 +1131,29 @@ async fn calculate_state_incremental( heroes, joined_member_count, invited_member_count, - joined_since_last_sync, - state_events: delta_state_events, + state_events, + device_list_updates, + left_encrypted_users, }) } +async fn lazy_filter( + services: &Services, + sender_user: &UserId, + shortstatekey: ShortStateKey, + shorteventid: ShortEventId, +) -> Option { + let (event_type, state_key) = services + .rooms + .short + .get_statekey_from_short(shortstatekey) + .await + .ok()?; + + (event_type != StateEventType::RoomMember || state_key == sender_user.as_str()) + .then_some(shorteventid) +} + async fn calculate_counts( services: &Services, room_id: &RoomId, diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index b7967498..f153b2da 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,40 +6,44 @@ use std::{ use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, + Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, + matrix::TypeStateKey, utils::{ - math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, }, - warn, Error, PduCount, Result, + warn, +}; +use conduwuit_service::{ + Services, + rooms::read_receipt::pack_receipts, + sync::{into_db_key, into_snake_key}, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ - api::client::{ - error::ErrorKind, - sync::sync_events::{ - self, - v4::{SlidingOp, SlidingSyncRoomHero}, - DeviceLists, UnreadNotificationsCount, - }, + MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + api::client::sync::sync_events::{ + self, DeviceLists, UnreadNotificationsCount, + v4::{SlidingOp, SlidingSyncRoomHero}, }, + directory::RoomTypeFilter, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, + uint, }; -use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; use crate::{ - client::{filter_rooms, ignored_filter, sync::v5::TodoRooms, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, ignored_filter}, }; -pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; +type TodoRooms = BTreeMap, usize, u64)>; +const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` /// @@ -50,10 +54,11 @@ pub(crate) async fn sync_events_v4_route( ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut body = body.body; + // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(sender_user, &sender_device); + let watcher = services.sync.watch(sender_user, sender_device); let next_batch = services.globals.next_count()?; @@ -68,33 +73,21 @@ pub(crate) async fn sync_events_v4_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services - .sync - .remembered(sender_user.clone(), sender_device.clone(), conn_id.clone()) - { + let db_key = into_db_key(sender_user, sender_device, conn_id.clone()); + if globalsince != 0 && !services.sync.remembered(&db_key) { debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + return Err!(Request(UnknownPos("Connection data lost since last time"))); } if globalsince == 0 { - services.sync.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_sync_request_connection(&db_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone()); + let known_rooms = services + .sync + .update_sync_request_with_cache(&snake_key, &mut body); let all_joined_rooms: Vec<_> = services .rooms @@ -136,7 +129,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.to_device.enabled.unwrap_or(false) { services .users - .remove_to_device_events(sender_user, &sender_device, globalsince) + .remove_to_device_events(sender_user, sender_device, globalsince) .await; } @@ -153,7 +146,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.account_data.enabled.unwrap_or(false) { account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -164,7 +157,7 @@ pub(crate) async fn sync_events_v4_route( room.clone(), services .account_data - .changes_since(Some(&room), sender_user, globalsince) + .changes_since(Some(&room), sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -258,13 +251,10 @@ pub(crate) async fn sync_events_v4_route( continue; }; if pdu.kind == RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == *sender_user { + if let Some(Ok(user_id)) = + pdu.state_key.as_deref().map(UserId::parse) + { + if user_id == sender_user { continue; } @@ -275,18 +265,18 @@ pub(crate) async fn sync_events_v4_route( if !share_encrypted_room( &services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } @@ -302,7 +292,7 @@ pub(crate) async fn sync_events_v4_route( .state_cache .room_members(room_id) // Don't send key updates from the sender to the sender - .ready_filter(|user_id| sender_user != user_id) + .ready_filter(|&user_id| sender_user != user_id) // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { @@ -398,9 +388,12 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date @@ -425,10 +418,9 @@ pub(crate) async fn sync_events_v4_route( }); if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, list_id.clone(), new_known_rooms, globalsince, @@ -438,7 +430,10 @@ pub(crate) async fn sync_events_v4_route( let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { continue; } let todo_room = @@ -452,7 +447,11 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( @@ -471,23 +470,20 @@ pub(crate) async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, ); } - if let Some(conn_id) = &body.conn_id { - services.sync.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); + if let Some(conn_id) = body.conn_id.clone() { + let db_key = into_db_key(sender_user, sender_device, conn_id); + services + .sync + .update_sync_subscriptions(&db_key, body.room_subscriptions); } let mut rooms = BTreeMap::new(); @@ -531,7 +527,7 @@ pub(crate) async fn sync_events_v4_route( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -630,7 +626,7 @@ pub(crate) async fn sync_events_v4_route( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() @@ -641,7 +637,7 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_cache .room_members(room_id) - .ready_filter(|member| member != sender_user) + .ready_filter(|&member| member != sender_user) .filter_map(|user_id| { services .rooms @@ -696,14 +692,13 @@ pub(crate) async fn sync_events_v4_route( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, @@ -779,7 +774,12 @@ pub(crate) async fn sync_events_v4_route( Some(sync_events::v4::ToDevice { events: services .users - .get_to_device_events(sender_user, &sender_device) + .get_to_device_events( + sender_user, + sender_device, + Some(globalsince), + Some(next_batch), + ) .collect() .await, next_batch: next_batch.to_string(), @@ -794,7 +794,7 @@ pub(crate) async fn sync_events_v4_route( }, device_one_time_keys_count: services .users - .count_one_time_keys(sender_user, &sender_device) + .count_one_time_keys(sender_user, sender_device) .await, // Fallback keys are not yet supported device_unused_fallback_key_types: None, @@ -806,3 +806,33 @@ pub(crate) async fn sync_events_v4_route( delta_token: None, }) } + +async fn filter_rooms<'a>( + services: &Services, + rooms: &[&'a RoomId], + filter: &[RoomTypeFilter], + negate: bool, +) -> Vec<&'a RoomId> { + rooms + .iter() + .stream() + .filter_map(|r| async move { + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r) + }) + .collect() + .await +} diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 66647f0e..f3fc0f44 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -1,41 +1,52 @@ use std::{ cmp::{self, Ordering}, collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + ops::Deref, time::Duration, }; use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, trace, + Err, Error, Result, error, extract_variant, is_equal_to, + matrix::{ + TypeStateKey, + pdu::{PduCount, PduEvent}, + }, + trace, utils::{ + BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::ReadyEqExt, math::{ruma_from_usize, usize_from_ruma}, - BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - warn, Error, Result, + warn, +}; +use conduwuit_service::{Services, rooms::read_receipt::pack_receipts, sync::into_snake_key}; +use futures::{ + FutureExt, Stream, StreamExt, TryFutureExt, + future::{OptionFuture, join3, try_join4}, + pin_mut, }; -use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ - api::client::{ - error::ErrorKind, - sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, - }, + DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + directory::RoomTypeFilter, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - state_res::TypeStateKey, - uint, DeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, + uint, }; -use service::{rooms::read_receipt::pack_receipts, PduCount}; -use super::{filter_rooms, share_encrypted_room}; +use super::share_encrypted_room; use crate::{ - client::{ignored_filter, sync::load_timeline, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, }; type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); +type TodoRooms = BTreeMap, usize, u64)>; +type KnownRooms = BTreeMap>; /// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync` /// ([MSC4186]) @@ -48,7 +59,7 @@ type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request /// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 /// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 pub(crate) async fn sync_events_v5_route( - State(services): State, + State(ref services): State, body: Ruma, ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); @@ -69,95 +80,95 @@ pub(crate) async fn sync_events_v5_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services.sync.snake_connection_cached( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) { - debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + let snake_key = into_snake_key(sender_user, sender_device, conn_id); + + if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) { + return Err!(Request(UnknownPos( + "Connection data unknown to server; restarting sync stream." + ))); } // Client / User requested an initial sync if globalsince == 0 { - services.sync.forget_snake_sync_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_snake_sync_connection(&snake_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_snake_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let known_rooms = services + .sync + .update_snake_sync_request_with_cache(&snake_key, &mut body); - let all_joined_rooms: Vec<_> = services + let all_joined_rooms = services .rooms .state_cache .rooms_joined(sender_user) .map(ToOwned::to_owned) - .collect() - .await; + .collect::>(); - let all_invited_rooms: Vec<_> = services + let all_invited_rooms = services .rooms .state_cache .rooms_invited(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_knocked_rooms: Vec<_> = services + let all_knocked_rooms = services .rooms .state_cache .rooms_knocked(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_rooms: Vec<&RoomId> = all_joined_rooms - .iter() - .map(AsRef::as_ref) - .chain(all_invited_rooms.iter().map(AsRef::as_ref)) - .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) - .collect(); + let (all_joined_rooms, all_invited_rooms, all_knocked_rooms) = + join3(all_joined_rooms, all_invited_rooms, all_knocked_rooms).await; - let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); - let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref); + let all_knocked_rooms = all_knocked_rooms.iter().map(AsRef::as_ref); + let all_rooms = all_joined_rooms + .clone() + .chain(all_invited_rooms.clone()) + .chain(all_knocked_rooms.clone()); let pos = next_batch.clone().to_string(); let mut todo_rooms: TodoRooms = BTreeMap::new(); let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body); + + let account_data = collect_account_data(services, sync_info).map(Ok); + + let e2ee = collect_e2ee(services, sync_info, all_joined_rooms.clone()); + + let to_device = collect_to_device(services, sync_info, next_batch).map(Ok); + + let receipts = collect_receipts(services).map(Ok); + + let (account_data, e2ee, to_device, receipts) = + try_join4(account_data, e2ee, to_device, receipts).await?; + + let extensions = sync_events::v5::response::Extensions { + account_data, + e2ee, + to_device, + receipts, + typing: sync_events::v5::response::Typing::default(), + }; + let mut response = sync_events::v5::Response { txn_id: body.txn_id.clone(), pos, lists: BTreeMap::new(), rooms: BTreeMap::new(), - extensions: sync_events::v5::response::Extensions { - account_data: collect_account_data(services, sync_info).await, - e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?, - to_device: collect_to_device(services, sync_info, next_batch).await, - receipts: collect_receipts(services).await, - typing: sync_events::v5::response::Typing::default(), - }, + extensions, }; handle_lists( services, sync_info, - &all_invited_rooms, - &all_joined_rooms, - &all_rooms, + all_invited_rooms.clone(), + all_joined_rooms.clone(), + all_rooms, &mut todo_rooms, &known_rooms, &mut response, @@ -170,7 +181,7 @@ pub(crate) async fn sync_events_v5_route( services, sender_user, next_batch, - &all_invited_rooms, + all_invited_rooms.clone(), &todo_rooms, &mut response, &body, @@ -195,28 +206,33 @@ pub(crate) async fn sync_events_v5_route( } trace!( - rooms=?response.rooms.len(), - account_data=?response.extensions.account_data.rooms.len(), - receipts=?response.extensions.receipts.rooms.len(), + rooms = ?response.rooms.len(), + account_data = ?response.extensions.account_data.rooms.len(), + receipts = ?response.extensions.receipts.rooms.len(), "responding to request with" ); Ok(response) } -type KnownRooms = BTreeMap>; -pub(crate) type TodoRooms = BTreeMap, usize, u64)>; - async fn fetch_subscriptions( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, known_rooms: &KnownRooms, todo_rooms: &mut TodoRooms, ) { let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + let not_exists = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(not_exists, is_disabled, is_banned); + if not_exists.or(is_disabled).or(is_banned).await { continue; } + let todo_room = todo_rooms .entry(room_id.clone()) @@ -224,7 +240,11 @@ async fn fetch_subscriptions( let limit: UInt = room.timeline_limit; - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(usize_from_ruma(limit)); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( @@ -242,11 +262,10 @@ async fn fetch_subscriptions( // body.room_subscriptions.remove(&r); //} - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, @@ -255,27 +274,39 @@ async fn fetch_subscriptions( } #[allow(clippy::too_many_arguments)] -async fn handle_lists<'a>( - services: crate::State, +async fn handle_lists<'a, Rooms, AllRooms>( + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, - all_invited_rooms: &Vec<&'a RoomId>, - all_joined_rooms: &Vec<&'a RoomId>, - all_rooms: &Vec<&'a RoomId>, + all_invited_rooms: Rooms, + all_joined_rooms: Rooms, + all_rooms: AllRooms, todo_rooms: &'a mut TodoRooms, known_rooms: &'a KnownRooms, response: &'_ mut sync_events::v5::Response, -) -> KnownRooms { +) -> KnownRooms +where + Rooms: Iterator + Clone + Send + 'a, + AllRooms: Iterator + Clone + Send + 'a, +{ for (list_id, list) in &body.lists { - let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { - | Some(true) => all_invited_rooms, - | Some(false) => all_joined_rooms, - | None => all_rooms, + let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) { + | None => all_rooms.clone().collect(), + | Some(true) => all_invited_rooms.clone().collect(), + | Some(false) => all_joined_rooms.clone().collect(), }; - let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { - | Some(filter) if filter.is_empty() => active_rooms, - | Some(value) => &filter_rooms(&services, active_rooms, &value, true).await, + let active_rooms = match list.filters.as_ref().map(|f| &f.not_room_types) { | None => active_rooms, + | Some(filter) if filter.is_empty() => active_rooms, + | Some(value) => + filter_rooms( + services, + value, + &true, + active_rooms.iter().stream().map(Deref::deref), + ) + .collect() + .await, }; let mut new_known_rooms: BTreeSet = BTreeSet::new(); @@ -293,6 +324,7 @@ async fn handle_lists<'a>( let new_rooms: BTreeSet = room_ids.clone().into_iter().map(From::from).collect(); + new_known_rooms.extend(new_rooms); //new_known_rooms.extend(room_ids..cloned()); for room_id in room_ids { @@ -304,9 +336,12 @@ async fn handle_lists<'a>( let limit: usize = usize_from_ruma(list.room_details.timeline_limit).min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date @@ -325,29 +360,32 @@ async fn handle_lists<'a>( count: ruma_from_usize(active_rooms.len()), }); - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, list_id.clone(), new_known_rooms, globalsince, ); } } + BTreeMap::default() } -async fn process_rooms( - services: crate::State, +async fn process_rooms<'a, Rooms>( + services: &Services, sender_user: &UserId, next_batch: u64, - all_invited_rooms: &[&RoomId], + all_invited_rooms: Rooms, todo_rooms: &TodoRooms, response: &mut sync_events::v5::Response, body: &sync_events::v5::Request, -) -> Result> { +) -> Result> +where + Rooms: Iterator + Clone + Send + 'a, +{ let mut rooms = BTreeMap::new(); for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms { let roomsincecount = PduCount::Normal(*roomsince); @@ -356,7 +394,7 @@ async fn process_rooms( let mut invite_state = None; let (timeline_pdus, limited); let new_room_id: &RoomId = (*room_id).as_ref(); - if all_invited_rooms.contains(&new_room_id) { + if all_invited_rooms.clone().any(is_equal_to!(new_room_id)) { // TODO: figure out a timestamp we can use for remote invites invite_state = services .rooms @@ -368,7 +406,7 @@ async fn process_rooms( (timeline_pdus, limited) = (Vec::new(), true); } else { (timeline_pdus, limited) = match load_timeline( - &services, + services, sender_user, room_id, roomsincecount, @@ -390,7 +428,7 @@ async fn process_rooms( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -401,18 +439,17 @@ async fn process_rooms( .rooms .read_receipt .last_privateread_update(sender_user, room_id) - .await > *roomsince; + .await; - let private_read_event = if last_privateread_update { - services - .rooms - .read_receipt - .private_read_get(room_id, sender_user) - .await - .ok() - } else { - None - }; + let private_read_event: OptionFuture<_> = (last_privateread_update > *roomsince) + .then(|| { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .ok() + }) + .into(); let mut receipts: Vec> = services .rooms @@ -428,7 +465,7 @@ async fn process_rooms( .collect() .await; - if let Some(private_read_event) = private_read_event { + if let Some(private_read_event) = private_read_event.await.flatten() { receipts.push(private_read_event); } @@ -477,7 +514,7 @@ async fn process_rooms( let room_events: Vec<_> = timeline_pdus .iter() .stream() - .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) + .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect() .await; @@ -500,7 +537,7 @@ async fn process_rooms( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() @@ -566,14 +603,13 @@ async fn process_rooms( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, @@ -630,7 +666,7 @@ async fn process_rooms( Ok(rooms) } async fn collect_account_data( - services: crate::State, + services: &Services, (sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request), ) -> sync_events::v5::response::AccountData { let mut account_data = sync_events::v5::response::AccountData { @@ -644,7 +680,7 @@ async fn collect_account_data( account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -655,7 +691,7 @@ async fn collect_account_data( room.clone(), services .account_data - .changes_since(Some(room), sender_user, globalsince) + .changes_since(Some(room), sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -666,16 +702,19 @@ async fn collect_account_data( account_data } -async fn collect_e2ee<'a>( - services: crate::State, +async fn collect_e2ee<'a, Rooms>( + services: &Services, (sender_user, sender_device, globalsince, body): ( &UserId, &DeviceId, u64, &sync_events::v5::Request, ), - all_joined_rooms: &'a Vec<&'a RoomId>, -) -> Result { + all_joined_rooms: Rooms, +) -> Result +where + Rooms: Iterator + Send + 'a, +{ if !body.extensions.e2ee.enabled.unwrap_or(false) { return Ok(sync_events::v5::response::E2EE::default()); } @@ -765,13 +804,9 @@ async fn collect_e2ee<'a>( continue; }; if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == *sender_user { + if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) + { + if user_id == sender_user { continue; } @@ -780,20 +815,20 @@ async fn collect_e2ee<'a>( | MembershipState::Join => { // A new user joined an encrypted room if !share_encrypted_room( - &services, + services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } @@ -813,7 +848,7 @@ async fn collect_e2ee<'a>( // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { - share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + share_encrypted_room(services, sender_user, user_id, Some(room_id)) .map(|res| res.or_some(user_id.to_owned())) }) .collect::>() @@ -836,7 +871,7 @@ async fn collect_e2ee<'a>( for user_id in left_encrypted_users { let dont_share_encrypted_room = - !share_encrypted_room(&services, sender_user, &user_id, None).await; + !share_encrypted_room(services, sender_user, &user_id, None).await; // If the user doesn't share an encrypted room with the target anymore, we need // to tell them @@ -846,20 +881,22 @@ async fn collect_e2ee<'a>( } Ok(sync_events::v5::response::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, + device_unused_fallback_key_types: None, + device_one_time_keys_count: services .users .count_one_time_keys(sender_user, sender_device) .await, - device_unused_fallback_key_types: None, + + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, }) } async fn collect_to_device( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, next_batch: u64, ) -> Option { @@ -876,13 +913,41 @@ async fn collect_to_device( next_batch: next_batch.to_string(), events: services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, None, Some(next_batch)) .collect() .await, }) } -async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts { +async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts { sync_events::v5::response::Receipts { rooms: BTreeMap::new() } // TODO: get explicitly requested read receipts } + +fn filter_rooms<'a, Rooms>( + services: &'a Services, + filter: &'a [RoomTypeFilter], + negate: &'a bool, + rooms: Rooms, +) -> impl Stream + Send + 'a +where + Rooms: Stream + Send + 'a, +{ + rooms.filter_map(async |room_id| { + let room_type = services.rooms.state_accessor.get_room_type(room_id).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if *negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(room_id) + }) +} diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 820ee4a1..caafe10d 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -1,15 +1,16 @@ use std::collections::BTreeMap; use axum::extract::State; +use conduwuit::Result; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ - tag::{TagEvent, TagEventContent}, RoomAccountDataEventType, + tag::{TagEvent, TagEventContent}, }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// diff --git a/src/api/client/thirdparty.rs b/src/api/client/thirdparty.rs index 790b27d3..0713a882 100644 --- a/src/api/client/thirdparty.rs +++ b/src/api/client/thirdparty.rs @@ -1,8 +1,9 @@ use std::collections::BTreeMap; +use conduwuit::Result; use ruma::api::client::thirdparty::get_protocols; -use crate::{Result, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse}; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index f0cbf467..5b838bef 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,9 +1,12 @@ use axum::extract::State; -use conduwuit::{at, PduCount, PduEvent}; +use conduwuit::{ + Result, at, + matrix::pdu::{PduCount, PduEvent}, +}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` pub(crate) async fn get_threads_route( @@ -53,7 +56,7 @@ pub(crate) async fn get_threads_route( chunk: threads .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }) } diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 1b942fba..8ad9dc99 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Error, Result}; +use conduwuit_service::sending::EduBuf; use futures::StreamExt; use ruma::{ api::{ @@ -10,7 +11,6 @@ use ruma::{ }, to_device::DeviceIdOrAllDevices, }; -use service::sending::EduBuf; use crate::Ruma; diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index b311295b..1d8d02fd 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::{utils::math::Tried, Err}; +use conduwuit::{Err, Result, utils, utils::math::Tried}; use ruma::api::client::typing::create_typing_event; -use crate::{utils, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// @@ -27,41 +27,44 @@ pub(crate) async fn create_typing_event_route( return Err!(Request(Forbidden("You are not in this room."))); } - if let Typing::Yes(duration) = body.state { - let duration = utils::clamp( - duration.as_millis().try_into().unwrap_or(u64::MAX), + match body.state { + | Typing::Yes(duration) => { + let duration = utils::clamp( + duration.as_millis().try_into().unwrap_or(u64::MAX), + services + .server + .config + .typing_client_timeout_min_s + .try_mul(1000)?, + services + .server + .config + .typing_client_timeout_max_s + .try_mul(1000)?, + ); services - .server - .config - .typing_client_timeout_min_s - .try_mul(1000)?, + .rooms + .typing + .typing_add( + sender_user, + &body.room_id, + utils::millis_since_unix_epoch() + .checked_add(duration) + .expect("user typing timeout should not get this high"), + ) + .await?; + }, + | _ => { services - .server - .config - .typing_client_timeout_max_s - .try_mul(1000)?, - ); - services - .rooms - .typing - .typing_add( - sender_user, - &body.room_id, - utils::millis_since_unix_epoch() - .checked_add(duration) - .expect("user typing timeout should not get this high"), - ) - .await?; - } else { - services - .rooms - .typing - .typing_remove(sender_user, &body.room_id) - .await?; + .rooms + .typing + .typing_remove(sender_user, &body.room_id) + .await?; + }, } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(&body.user_id, &ruma::presence::PresenceState::Online) diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 66cb31d5..e21eaf21 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -2,9 +2,10 @@ use std::collections::BTreeMap; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::Err; +use conduwuit::{Err, Error, Result}; use futures::StreamExt; use ruma::{ + OwnedRoomId, api::{ client::{ error::ErrorKind, @@ -13,17 +14,14 @@ use ruma::{ delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key, set_profile_key, set_timezone_key, }, - room::get_summary, }, federation, }, - events::room::member::MembershipState, presence::PresenceState, - OwnedRoomId, }; use super::{update_avatar_url, update_displayname}; -use crate::{Error, Result, Ruma, RumaResponse}; +use crate::Ruma; /// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms` /// @@ -38,13 +36,10 @@ pub(crate) async fn get_mutual_rooms_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); - if sender_user == &body.user_id { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "You cannot request rooms in common with yourself.", - )); + if sender_user == body.user_id { + return Err!(Request(Unknown("You cannot request rooms in common with yourself."))); } if !services.users.exists(&body.user_id).await { @@ -65,129 +60,6 @@ pub(crate) async fn get_mutual_rooms_route( }) } -/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` -/// -/// Returns a short description of the state of a room. -/// -/// This is the "wrong" endpoint that some implementations/clients may use -/// according to the MSC. Request and response bodies are the same as -/// `get_room_summary`. -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -pub(crate) async fn get_room_summary_legacy( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_room_summary(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` -/// -/// Returns a short description of the state of a room. -/// -/// TODO: support fetching remote room info if we don't know the room -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] -pub(crate) async fn get_room_summary( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref(); - - let room_id = services.rooms.alias.resolve(&body.room_id_or_alias).await?; - - if !services.rooms.metadata.exists(&room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); - } - - if sender_user.is_none() - && !services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Room is not world readable, authentication is required", - )); - } - - Ok(get_summary::msc3266::Response { - room_id: room_id.clone(), - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await - .unwrap_or(0) - .try_into()?, - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - join_rule: services - .rooms - .state_accessor - .get_join_rule(&room_id) - .await - .unwrap_or_default() - .0, - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), - room_version: services.rooms.state.get_room_version(&room_id).await.ok(), - membership: if let Some(sender_user) = sender_user { - services - .rooms - .state_accessor - .get_member(&room_id, sender_user) - .await - .map_or_else(|_| MembershipState::Leave, |content| content.membership) - .into() - } else { - None - }, - encryption: services - .rooms - .state_accessor - .get_room_encryption(&room_id) - .await - .ok(), - }) -} - /// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz` /// /// Deletes the `tz` (timezone) of a user, as per MSC4133 and MSC4175. @@ -205,7 +77,7 @@ pub(crate) async fn delete_timezone_key_route( services.users.set_timezone(&body.user_id, None); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -233,7 +105,7 @@ pub(crate) async fn set_timezone_key_route( services.users.set_timezone(&body.user_id, body.tz.clone()); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -272,7 +144,7 @@ pub(crate) async fn set_profile_key_route( ))); } - let Some(profile_key_value) = body.kv_pair.get(&body.key) else { + let Some(profile_key_value) = body.kv_pair.get(&body.key_name) else { return Err!(Request(BadJson( "The key does not match the URL field key, or JSON body is empty (use DELETE)" ))); @@ -290,7 +162,7 @@ pub(crate) async fn set_profile_key_route( return Err!(Request(BadJson("Key names cannot be longer than 128 bytes"))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -306,7 +178,7 @@ pub(crate) async fn set_profile_key_route( &all_joined_rooms, ) .await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); let all_joined_rooms: Vec = services @@ -319,12 +191,14 @@ pub(crate) async fn set_profile_key_route( update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await; } else { - services - .users - .set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone())); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(profile_key_value.clone()), + ); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -357,7 +231,7 @@ pub(crate) async fn delete_profile_key_route( ))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -367,7 +241,7 @@ pub(crate) async fn delete_profile_key_route( .await; update_displayname(&services, &body.user_id, None, &all_joined_rooms).await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -380,10 +254,10 @@ pub(crate) async fn delete_profile_key_route( } else { services .users - .set_profile_key(&body.user_id, &body.key, None); + .set_profile_key(&body.user_id, &body.key_name, None); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -497,13 +371,18 @@ pub(crate) async fn get_profile_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - if let Some(value) = response.custom_profile_fields.get(&body.key) { - profile_key_value.insert(body.key.clone(), value.clone()); - services - .users - .set_profile_key(&body.user_id, &body.key, Some(value.clone())); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + match response.custom_profile_fields.get(&body.key_name) { + | Some(value) => { + profile_key_value.insert(body.key_name.clone(), value.clone()); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(value.clone()), + ); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { @@ -520,10 +399,17 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("Profile was not found."))); } - if let Ok(value) = services.users.profile_key(&body.user_id, &body.key).await { - profile_key_value.insert(body.key.clone(), value); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + match services + .users + .profile_key(&body.user_id, &body.key_name) + .await + { + | Ok(value) => { + profile_key_value.insert(body.key_name.clone(), value); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 904f1d2f..232d5b28 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,10 +1,11 @@ use std::collections::BTreeMap; -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::Result; use futures::StreamExt; use ruma::api::client::discovery::get_supported_versions; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/versions` /// diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 182e30db..748fc049 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,15 +1,22 @@ use axum::extract::State; -use conduwuit::utils::TryFutureExtExt; -use futures::{pin_mut, StreamExt}; -use ruma::{ - api::client::user_directory::search_users, - events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, - StateEventType, +use conduwuit::{ + Result, + utils::{ + future::BoolExt, + stream::{BroadbandExt, ReadyExt}, }, }; +use futures::{FutureExt, StreamExt, pin_mut}; +use ruma::{ + api::client::user_directory::search_users::{self}, + events::room::join_rules::JoinRule, +}; -use crate::{Result, Ruma}; +use crate::Ruma; + +// conduwuit can handle a lot more results than synapse +const LIMIT_MAX: usize = 500; +const LIMIT_DEFAULT: usize = 10; /// # `POST /_matrix/client/r0/user_directory/search` /// @@ -21,78 +28,61 @@ pub(crate) async fn search_users_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = usize::try_from(body.limit).map_or(10, usize::from).min(100); // default limit is 10 + let sender_user = body.sender_user(); + let limit = usize::try_from(body.limit) + .map_or(LIMIT_DEFAULT, usize::from) + .min(LIMIT_MAX); - let users = services.users.stream().filter_map(|user_id| async { - // Filter out buggy users (they should not exist, but you never know...) - let user = search_users::v3::User { - user_id: user_id.to_owned(), - display_name: services.users.displayname(user_id).await.ok(), - avatar_url: services.users.avatar_url(user_id).await.ok(), - }; + let search_term = body.search_term.to_lowercase(); + let mut users = services + .users + .stream() + .ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term)) + .map(ToOwned::to_owned) + .broad_filter_map(async |user_id| { + let display_name = services.users.displayname(&user_id).await.ok(); - let user_id_matches = user - .user_id - .to_string() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); + let display_name_matches = display_name + .as_deref() + .map(str::to_lowercase) + .is_some_and(|display_name| display_name.contains(&search_term)); - let user_displayname_matches = user - .display_name - .as_ref() - .filter(|name| { - name.to_lowercase() - .contains(&body.search_term.to_lowercase()) - }) - .is_some(); + if !display_name_matches { + return None; + } - if !user_id_matches && !user_displayname_matches { - return None; - } - - // It's a matching user, but is the sender allowed to see them? - let mut user_visible = false; - - let user_is_in_public_rooms = services - .rooms - .state_cache - .rooms_joined(&user.user_id) - .any(|room| { - services - .rooms - .state_accessor - .room_state_get_content::( - room, - &StateEventType::RoomJoinRules, - "", - ) - .map_ok_or(false, |content| content.join_rule == JoinRule::Public) - }) - .await; - - if user_is_in_public_rooms { - user_visible = true; - } else { - let user_is_in_shared_rooms = services + let user_in_public_room = services .rooms .state_cache - .user_sees_user(sender_user, &user.user_id) - .await; + .rooms_joined(&user_id) + .map(ToOwned::to_owned) + .broad_any(async |room_id| { + services + .rooms + .state_accessor + .get_join_rules(&room_id) + .map(|rule| matches!(rule, JoinRule::Public)) + .await + }); - if user_is_in_shared_rooms { - user_visible = true; - } - } + let user_sees_user = services + .rooms + .state_cache + .user_sees_user(sender_user, &user_id); - user_visible.then_some(user) - }); + pin_mut!(user_in_public_room, user_sees_user); + user_in_public_room + .or(user_sees_user) + .await + .then_some(search_users::v3::User { + user_id: user_id.clone(), + display_name, + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }) + }); - pin_mut!(users); - - let limited = users.by_ref().next().await.is_some(); - - let results = users.take(limit).collect().await; + let results = users.by_ref().take(limit).collect().await; + let limited = users.next().await.is_some(); Ok(search_users::v3::Response { results, limited }) } diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 70ad4913..91991d24 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -1,13 +1,13 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{utils, Err}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{Err, Result, utils}; use hmac::{Hmac, Mac}; -use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch, UserId}; +use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; use sha1::Sha1; -use crate::{Result, Ruma}; +use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index 5c53d013..eedab981 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,4 +1,5 @@ -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Error, Result}; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, @@ -7,7 +8,7 @@ use ruma::api::client::{ error::ErrorKind, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/client` /// diff --git a/src/api/mod.rs b/src/api/mod.rs index 80e34f10..9ca24e72 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "16384"] //TODO: reduce me #![allow(clippy::toplevel_ref_arg)] pub mod client; @@ -7,8 +8,6 @@ pub mod server; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::{debug_info, pdu::PduEvent, utils, Error, Result}; - pub(crate) use self::router::{Ruma, RumaResponse, State}; conduwuit::mod_ctor! {} diff --git a/src/api/router.rs b/src/api/router.rs index 7855ddfa..3fbef275 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -8,12 +8,12 @@ pub mod state; use std::str::FromStr; use axum::{ + Router, response::{IntoResponse, Redirect}, routing::{any, get, post}, - Router, }; -use conduwuit::{err, Server}; -use http::{uri, Uri}; +use conduwuit::{Server, err}; +use http::{Uri, uri}; use self::handler::RouterExt; pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State}; diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 582f0c56..26713dcc 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -1,16 +1,17 @@ use std::{mem, ops::Deref}; -use axum::{async_trait, body::Body, extract::FromRequest}; +use async_trait::async_trait; +use axum::{body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; -use conduwuit::{debug, debug_warn, err, trace, utils::string::EMPTY, Error, Result}; +use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; use ruma::{ - api::IncomingRequest, CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, - OwnedServerName, OwnedUserId, ServerName, UserId, + CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, + OwnedUserId, ServerName, UserId, api::IncomingRequest, }; use service::Services; use super::{auth, auth::Auth, request, request::Request}; -use crate::{service::appservice::RegistrationInfo, State}; +use crate::{State, service::appservice::RegistrationInfo}; /// Extractor for Ruma request structs pub(crate) struct Args { diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index ecea305b..01254c32 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -1,12 +1,14 @@ use axum::RequestPartsExt; use axum_extra::{ - headers::{authorization::Bearer, Authorization}, - typed_header::TypedHeaderRejectionReason, TypedHeader, + headers::{Authorization, authorization::Bearer}, + typed_header::TypedHeaderRejectionReason, }; -use conduwuit::{debug_error, err, warn, Err, Error, Result}; +use conduwuit::{Err, Error, Result, debug_error, err, warn}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, api::{ + AuthScheme, IncomingRequest, Metadata, client::{ directory::get_public_rooms, error::ErrorKind, @@ -15,15 +17,12 @@ use ruma::{ }, voip::get_turn_server_info, }, - federation::openid::get_openid_userinfo, - AuthScheme, IncomingRequest, Metadata, + federation::{authentication::XMatrix, openid::get_openid_userinfo}, }, - server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, }; use service::{ - server_keys::{PubKeyMap, PubKeys}, Services, + server_keys::{PubKeyMap, PubKeys}, }; use super::request::Request; @@ -56,12 +55,12 @@ pub(super) async fn auth( }; let token = if let Some(token) = token { - if let Some(reg_info) = services.appservice.find_from_token(token).await { - Token::Appservice(Box::new(reg_info)) - } else if let Ok((user_id, device_id)) = services.users.find_from_token(token).await { - Token::User((user_id, device_id)) - } else { - Token::Invalid + match services.appservice.find_from_token(token).await { + | Some(reg_info) => Token::Appservice(Box::new(reg_info)), + | _ => match services.users.find_from_token(token).await { + | Ok((user_id, device_id)) => Token::User((user_id, device_id)), + | _ => Token::Invalid, + }, } } else { Token::None @@ -110,7 +109,7 @@ pub(super) async fn auth( } }, | _ => {}, - }; + } } match (metadata.authentication, token) { @@ -307,7 +306,7 @@ async fn auth_server( } fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { - if !services.server.config.allow_federation { + if !services.config.allow_federation { return Err!(Config("allow_federation", "Federation is disabled.")); } @@ -317,12 +316,7 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { } let origin = &x_matrix.origin; - if services - .server - .config - .forbidden_remote_server_names - .contains(origin) - { + if services.moderation.is_remote_server_forbidden(origin) { return Err!(Request(Forbidden(debug_warn!( "Federation requests from {origin} denied." )))); diff --git a/src/api/router/handler.rs b/src/api/router/handler.rs index cfb8fb6e..ab013945 100644 --- a/src/api/router/handler.rs +++ b/src/api/router/handler.rs @@ -1,8 +1,8 @@ use axum::{ + Router, extract::FromRequestParts, response::IntoResponse, - routing::{on, MethodFilter}, - Router, + routing::{MethodFilter, on}, }; use conduwuit::Result; use futures::{Future, TryFutureExt}; diff --git a/src/api/router/request.rs b/src/api/router/request.rs index 615a8bff..3cdc452b 100644 --- a/src/api/router/request.rs +++ b/src/api/router/request.rs @@ -1,8 +1,8 @@ use std::str; -use axum::{extract::Path, RequestExt, RequestPartsExt}; +use axum::{RequestExt, RequestPartsExt, extract::Path}; use bytes::Bytes; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use http::request::Parts; use serde::Deserialize; use service::Services; diff --git a/src/api/router/response.rs b/src/api/router/response.rs index a10560f1..03c9060e 100644 --- a/src/api/router/response.rs +++ b/src/api/router/response.rs @@ -1,9 +1,9 @@ use axum::response::{IntoResponse, Response}; use bytes::BytesMut; -use conduwuit::{error, Error}; +use conduwuit::{Error, error}; use http::StatusCode; use http_body_util::Full; -use ruma::api::{client::uiaa::UiaaResponse, OutgoingResponse}; +use ruma::api::{OutgoingResponse, client::uiaa::UiaaResponse}; pub(crate) struct RumaResponse(pub(crate) T) where diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index b44db67c..3cfbcedc 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -2,15 +2,21 @@ use std::cmp; use axum::extract::State; use conduwuit::{ - utils::{stream::TryTools, IterStream, ReadyExt}, PduCount, Result, + utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::backfill::get_backfill, uint, MilliSecondsSinceUnixEpoch}; +use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill}; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 100 and we can handle lots of these +/// anyways +const LIMIT_MAX: usize = 150; +/// no spec defined number but we can handle a lot of these +const LIMIT_DEFAULT: usize = 50; + /// # `GET /_matrix/federation/v1/backfill/` /// /// Retrieves events from before the sender joined the room, if the room's @@ -30,9 +36,9 @@ pub(crate) async fn get_backfill_route( let limit = body .limit - .min(uint!(100)) .try_into() - .expect("UInt could not be converted to usize"); + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let from = body .v diff --git a/src/api/server/event.rs b/src/api/server/event.rs index 629dd6a2..5846c6d7 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -1,6 +1,6 @@ use axum::extract::State; -use conduwuit::{err, Result}; -use ruma::{api::federation::event::get_event, MilliSecondsSinceUnixEpoch, RoomId}; +use conduwuit::{Result, err}; +use ruma::{MilliSecondsSinceUnixEpoch, RoomId, api::federation::event::get_event}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 93e867a0..c9e210f5 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,11 +1,11 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{Error, Result}; +use conduwuit::{Error, Result, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ - api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, RoomId, + api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, }; use super::AccessCheck; @@ -48,7 +48,7 @@ pub(crate) async fn get_event_authorization_route( .rooms .auth_chain .event_ids_iter(room_id, once(body.event_id.borrow())) - .await? + .ready_filter_map(Result::ok) .filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() }) .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) .collect() diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index ea06015a..04dc30ed 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,13 +1,15 @@ use axum::extract::State; -use conduwuit::{Error, Result}; -use ruma::{ - api::{client::error::ErrorKind, federation::event::get_missing_events}, - CanonicalJsonValue, EventId, RoomId, -}; +use conduwuit::{Result, debug, debug_error, utils::to_canonical_object}; +use ruma::api::federation::event::get_missing_events; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 20 and we can handle lots of these anyways +const LIMIT_MAX: usize = 50; +/// spec says default is 10 +const LIMIT_DEFAULT: usize = 10; + /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. @@ -24,7 +26,11 @@ pub(crate) async fn get_missing_events_route( .check() .await?; - let limit = body.limit.try_into()?; + let limit = body + .limit + .try_into() + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let mut queued_events = body.latest_events.clone(); // the vec will never have more entries the limit @@ -32,60 +38,52 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { - if let Ok(pdu) = services + let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { + debug!( + ?body.origin, + "Event {} does not exist locally, skipping", &queued_events[i] + ); + i = i.saturating_add(1); + continue; + }; + + if body.earliest_events.contains(&queued_events[i]) { + i = i.saturating_add(1); + continue; + } + + if !services .rooms - .timeline - .get_pdu_json(&queued_events[i]) + .state_accessor + .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database."))?; - - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; - - if event_room_id != body.room_id { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event from wrong room.")); - } - - if body.earliest_events.contains(&queued_events[i]) { - i = i.saturating_add(1); - continue; - } - - if !services - .rooms - .state_accessor - .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) - .await - { - i = i.saturating_add(1); - continue; - } - - let prev_events = pdu - .get("prev_events") - .and_then(CanonicalJsonValue::as_array) - .unwrap_or_default(); - - queued_events.extend( - prev_events - .iter() - .map(<&EventId>::try_from) - .filter_map(Result::ok) - .map(ToOwned::to_owned), - ); - - events.push( - services - .sending - .convert_to_outgoing_federation_event(pdu) - .await, + debug!( + ?body.origin, + "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id ); + i = i.saturating_add(1); + continue; } - i = i.saturating_add(1); + + let Ok(event) = to_canonical_object(&pdu) else { + debug_error!( + ?body.origin, + "Failed to convert PDU in database to canonical JSON: {pdu:?}" + ); + i = i.saturating_add(1); + continue; + }; + + let prev_events = pdu.prev_events.iter().map(ToOwned::to_owned); + + let event = services + .sending + .convert_to_outgoing_federation_event(event) + .await; + + queued_events.extend(prev_events); + events.push(event); } Ok(get_missing_events::v1::Response { events }) diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index a10df6ac..42c348f9 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,7 +1,15 @@ use axum::extract::State; -use ruma::api::{client::error::ErrorKind, federation::space::get_hierarchy}; +use conduwuit::{ + Err, Result, + utils::stream::{BroadbandExt, IterStream}, +}; +use conduwuit_service::rooms::spaces::{ + Identifier, SummaryAccessibility, get_parent_children_via, +}; +use futures::{FutureExt, StreamExt}; +use ruma::api::federation::space::get_hierarchy; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/hierarchy/{roomId}` /// @@ -11,13 +19,56 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - if services.rooms.metadata.exists(&body.room_id).await { - services - .rooms - .spaces - .get_federation_hierarchy(&body.room_id, body.origin(), body.suggested_only) - .await - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Room does not exist.")) + if !services.rooms.metadata.exists(&body.room_id).await { + return Err!(Request(NotFound("Room does not exist."))); + } + + let room_id = &body.room_id; + let suggested_only = body.suggested_only; + let ref identifier = Identifier::ServerName(body.origin()); + match services + .rooms + .spaces + .get_summary_and_children_local(room_id, identifier) + .await? + { + | None => Err!(Request(NotFound("The requested room was not found"))), + + | Some(SummaryAccessibility::Inaccessible) => { + Err!(Request(NotFound("The requested room is inaccessible"))) + }, + + | Some(SummaryAccessibility::Accessible(room)) => { + let (children, inaccessible_children) = + get_parent_children_via(&room, suggested_only) + .stream() + .broad_filter_map(|(child, _via)| async move { + match services + .rooms + .spaces + .get_summary_and_children_local(&child, identifier) + .await + .ok()? + { + | None => None, + + | Some(SummaryAccessibility::Inaccessible) => + Some((None, Some(child))), + + | Some(SummaryAccessibility::Accessible(summary)) => + Some((Some(summary), None)), + } + }) + .unzip() + .map(|(children, inaccessible_children): (Vec<_>, Vec<_>)| { + ( + children.into_iter().flatten().map(Into::into).collect(), + inaccessible_children.into_iter().flatten().collect(), + ) + }) + .await; + + Ok(get_hierarchy::v1::Response { room, children, inaccessible_children }) + }, } } diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 27a4485c..f53e1a15 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,14 +1,15 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{err, utils, utils::hash::sha256, warn, Err, Error, PduEvent, Result}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{ + Err, Error, PduEvent, Result, err, pdu::gen_event_id, utils, utils::hash::sha256, warn, +}; use ruma::{ + CanonicalJsonValue, OwnedUserId, UserId, api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, OwnedUserId, UserId, }; -use service::pdu::gen_event_id; use crate::Ruma; @@ -36,21 +37,14 @@ pub(crate) async fn create_invite_route( } if let Some(server) = body.room_id.server_name() { - if services - .server - .config - .forbidden_remote_server_names - .contains(&server.to_owned()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } if services - .server - .config - .forbidden_remote_server_names - .contains(body.origin()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", @@ -103,8 +97,7 @@ pub(crate) async fn create_invite_route( return Err!(Request(Forbidden("This room is banned on this homeserver."))); } - if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await - { + if services.config.block_non_admin_invites && !services.users.is_admin(&invited_user).await { return Err!(Request(Forbidden("This server does not allow room invites."))); } diff --git a/src/api/server/key.rs b/src/api/server/key.rs index 75801a7a..f9bd0926 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -3,15 +3,15 @@ use std::{ time::{Duration, SystemTime}, }; -use axum::{extract::State, response::IntoResponse, Json}; -use conduwuit::{utils::timepoint_from_now, Result}; +use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Result, utils::timepoint_from_now}; use ruma::{ + MilliSecondsSinceUnixEpoch, Signatures, api::{ - federation::discovery::{get_server_keys, OldVerifyKey, ServerSigningKeys}, OutgoingResponse, + federation::discovery::{OldVerifyKey, ServerSigningKeys, get_server_keys}, }, serde::Raw, - MilliSecondsSinceUnixEpoch, Signatures, }; /// # `GET /_matrix/key/v2/server` diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index b753346c..3204c30c 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,23 +1,23 @@ use axum::extract::State; -use conduwuit::{debug_info, utils::IterStream, warn, Err}; +use conduwuit::{ + Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn, +}; +use conduwuit_service::Services; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, api::{client::error::ErrorKind, federation::membership::prepare_join_event}, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, }, - CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; -use crate::{ - service::{pdu::PduBuilder, Services}, - Error, Result, Ruma, -}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// @@ -42,10 +42,8 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .server - .config - .forbidden_remote_server_names - .contains(body.origin()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried joining room ID {} which has a server name that \ @@ -58,12 +56,7 @@ pub(crate) async fn create_join_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .server - .config - .forbidden_remote_server_names - .contains(&server.to_owned()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden(warn!( "Room ID server name {server} is banned on this homeserver." )))); diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 423e202d..423c8e81 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,15 +1,14 @@ +use RoomVersionId::*; use axum::extract::State; -use conduwuit::{debug_warn, Err}; +use conduwuit::{Err, Error, Result, debug_warn, matrix::pdu::PduBuilder, warn}; use ruma::{ + RoomVersionId, api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, - RoomVersionId, }; use serde_json::value::to_raw_value; -use tracing::warn; -use RoomVersionId::*; -use crate::{service::pdu::PduBuilder, Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` /// @@ -34,10 +33,8 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .server - .config - .forbidden_remote_server_names - .contains(body.origin()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried knocking room ID {} which has a server name \ @@ -50,12 +47,7 @@ pub(crate) async fn create_knock_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .server - .config - .forbidden_remote_server_names - .contains(&server.to_owned()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 936e0fbb..cb6bd2fa 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{Err, Result}; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use ruma::{ api::federation::membership::prepare_leave_event, events::room::member::{MembershipState, RoomMemberEventContent}, @@ -7,7 +7,7 @@ use ruma::{ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; -use crate::{service::pdu::PduBuilder, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/media.rs b/src/api/server/media.rs index e56f5b9d..cbe8595b 100644 --- a/src/api/server/media.rs +++ b/src/api/server/media.rs @@ -1,12 +1,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{utils::content_disposition::make_content_disposition, Err, Result}; +use conduwuit::{Err, Result, utils::content_disposition::make_content_disposition}; use conduwuit_service::media::{Dim, FileMeta}; use ruma::{ - api::federation::authenticated_media::{ - get_content, get_content_thumbnail, Content, ContentMetadata, FileOrLocation, - }, Mxc, + api::federation::authenticated_media::{ + Content, ContentMetadata, FileOrLocation, get_content, get_content_thumbnail, + }, }; use crate::Ruma; diff --git a/src/api/server/openid.rs b/src/api/server/openid.rs index 4833fbe1..a09cd7ad 100644 --- a/src/api/server/openid.rs +++ b/src/api/server/openid.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::Result; use ruma::api::federation::openid::get_openid_userinfo; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/openid/userinfo` /// diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index ff74574a..cf66ea71 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -1,5 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; +use conduwuit::{Error, Result}; use ruma::{ api::{ client::error::ErrorKind, @@ -8,7 +9,7 @@ use ruma::{ directory::Filter, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/federation/v1/publicRooms` /// diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 69f62e94..9d4fcf73 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -1,16 +1,16 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Error, Result}; +use conduwuit::{Error, Result, err}; use futures::StreamExt; use get_profile_information::v1::ProfileField; use rand::seq::SliceRandom; use ruma::{ + OwnedServerName, api::{ client::error::ErrorKind, federation::query::{get_profile_information, get_room_information}, }, - OwnedServerName, }; use crate::Ruma; diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 2e615a0c..9c5bfd2b 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,20 +3,25 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_warn, err, error, result::LogErr, trace, utils::{ - stream::{automatic_width, BroadbandExt, TryBroadbandExt}, - IterStream, ReadyExt, + IterStream, ReadyExt, millis_since_unix_epoch, + stream::{BroadbandExt, TryBroadbandExt, automatic_width}, }, - warn, Err, Error, Result, + warn, +}; +use conduwuit_service::{ + Services, + sending::{EDU_LIMIT, PDU_LIMIT}, }; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use ruma::{ + CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, api::{ client::error::ErrorKind, federation::transactions::{ @@ -31,18 +36,9 @@ use ruma::{ events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, serde::Raw, to_device::DeviceIdOrAllDevices, - CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, }; -use service::{ - sending::{EDU_LIMIT, PDU_LIMIT}, - Services, -}; -use utils::millis_since_unix_epoch; -use crate::{ - utils::{self}, - Ruma, -}; +use crate::Ruma; type ResolvedMap = BTreeMap; type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); @@ -585,12 +581,10 @@ async fn handle_edu_signing_key_update( return; } - if let Some(master_key) = master_key { - services - .users - .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) - .await - .log_err() - .ok(); - } + services + .users + .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) + .await + .log_err() + .ok(); } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 2b8a0eef..895eca81 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -4,23 +4,23 @@ use std::borrow::Borrow; use axum::extract::State; use conduwuit::{ - at, err, + Err, Result, at, err, pdu::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, - warn, Err, Result, + warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::federation::membership::create_join_event, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, + api::federation::membership::create_join_event, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use service::Services; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use crate::Ruma; @@ -135,7 +135,7 @@ async fn create_join_event( if state_key != sender { return Err!(Request(BadJson("State key does not match sender user."))); - }; + } if let Some(authorising_user) = content.join_authorized_via_users_server { use ruma::RoomVersionId::*; @@ -238,8 +238,6 @@ async fn create_join_event( .rooms .auth_chain .event_ids_iter(room_id, starting_events) - .await? - .map(Ok) .broad_and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) @@ -270,10 +268,8 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .server - .config - .forbidden_remote_server_names - .contains(body.origin()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried joining room ID {} through us who has a server name that is \ @@ -285,12 +281,7 @@ pub(crate) async fn create_join_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .server - .config - .forbidden_remote_server_names - .contains(&server.to_owned()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", @@ -318,21 +309,14 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .server - .config - .forbidden_remote_server_names - .contains(body.origin()) + .moderation + .is_remote_server_forbidden(body.origin()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { - if services - .server - .config - .forbidden_remote_server_names - .contains(&server.to_owned()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index b07620af..8d3697d2 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,15 +1,19 @@ use axum::extract::State; -use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, PduEvent, Result}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduEvent, gen_event_id_canonical_json}, + warn, +}; use futures::FutureExt; use ruma::{ - api::federation::knock::send_knock, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, - serde::JsonObject, OwnedServerName, OwnedUserId, RoomVersionId::*, + api::federation::knock::send_knock, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, + serde::JsonObject, }; use crate::Ruma; @@ -22,10 +26,8 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .server - .config - .forbidden_remote_server_names - .contains(body.origin()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried knocking room ID {} who has a server name that is globally \ @@ -37,12 +39,7 @@ pub(crate) async fn create_knock_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .server - .config - .forbidden_remote_server_names - .contains(&server.to_owned()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried knocking room ID {} which has a server name that is globally \ forbidden. Rejecting.", @@ -137,7 +134,7 @@ pub(crate) async fn create_knock_event_v1_route( if state_key != sender { return Err!(Request(InvalidParam("state_key does not match sender user of event."))); - }; + } let origin: OwnedServerName = serde_json::from_value( value diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index e955a267..d3dc994c 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,22 +1,20 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err, matrix::pdu::gen_event_id_canonical_json}; +use conduwuit_service::Services; use futures::FutureExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, ServerName, api::federation::membership::create_leave_event, events::{ - room::member::{MembershipState, RoomMemberEventContent}, StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, - OwnedRoomId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; -use crate::{ - service::{pdu::gen_event_id_canonical_json, Services}, - Ruma, -}; +use crate::Ruma; /// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/state.rs b/src/api/server/state.rs index eab1f138..8c786815 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, utils::IterStream, Result}; +use conduwuit::{Result, at, err, utils::IterStream}; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::event::get_room_state, OwnedEventId}; +use ruma::{OwnedEventId, api::federation::event::get_room_state}; use super::AccessCheck; use crate::Ruma; @@ -56,8 +56,6 @@ pub(crate) async fn get_room_state_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(Ok) .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) .and_then(|pdu| { services diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 4973dd3a..648d4575 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, Result}; -use futures::StreamExt; -use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; +use conduwuit::{Result, at, err}; +use futures::{StreamExt, TryStreamExt}; +use ruma::{OwnedEventId, api::federation::event::get_room_state_ids}; use super::AccessCheck; use crate::Ruma; @@ -44,10 +44,8 @@ pub(crate) async fn get_room_state_ids_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(|id| (*id).to_owned()) - .collect() - .await; + .try_collect() + .await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids, pdu_ids }) } diff --git a/src/api/server/user.rs b/src/api/server/user.rs index 321d0b66..80c353ab 100644 --- a/src/api/server/user.rs +++ b/src/api/server/user.rs @@ -10,8 +10,8 @@ use ruma::api::{ }; use crate::{ - client::{claim_keys_helper, get_keys_helper}, Ruma, + client::{claim_keys_helper, get_keys_helper}, }; /// # `GET /_matrix/federation/v1/user/devices/{userId}` diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs index 4f3fa245..5696e44b 100644 --- a/src/api/server/utils.rs +++ b/src/api/server/utils.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, is_false, Err, Result}; +use conduwuit::{Err, Result, implement, is_false}; use conduwuit_service::Services; -use futures::{future::OptionFuture, join, FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, future::OptionFuture, join}; use ruma::{EventId, RoomId, ServerName}; pub(super) struct AccessCheck<'a> { diff --git a/src/api/server/version.rs b/src/api/server/version.rs index 036b61f7..b08ff77a 100644 --- a/src/api/server/version.rs +++ b/src/api/server/version.rs @@ -1,6 +1,7 @@ +use conduwuit::Result; use ruma::api::federation::discovery::get_server_version; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/version` /// diff --git a/src/api/server/well_known.rs b/src/api/server/well_known.rs index 48caa7d6..75c7cf5d 100644 --- a/src/api/server/well_known.rs +++ b/src/api/server/well_known.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::{Error, Result}; use ruma::api::{client::error::ErrorKind, federation::discovery::discover_homeserver}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/server` /// diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index ef2df4ff..f42b049b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -17,17 +17,24 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "reqwest/brotli", +] +conduwuit_mods = [ + "dep:libloading" +] +gzip_compression = [ + "reqwest/gzip", +] +hardened_malloc = [ + "dep:hardened_malloc-rs" ] jemalloc = [ "dep:tikv-jemalloc-sys", "dep:tikv-jemalloc-ctl", "dep:tikv-jemallocator", ] +jemalloc_conf = [] jemalloc_prof = [ "tikv-jemalloc-sys/profiling", ] @@ -36,29 +43,23 @@ jemalloc_stats = [ "tikv-jemalloc-ctl/stats", "tikv-jemallocator/stats", ] -jemalloc_conf = [] -hardened_malloc = [ - "dep:hardened_malloc-rs" -] -gzip_compression = [ - "reqwest/gzip", -] -brotli_compression = [ - "reqwest/brotli", +perf_measurements = [] +release_max_log_level = [ + "tracing/max_level_trace", + "tracing/release_max_level_info", + "log/max_level_trace", + "log/release_max_level_info", ] +sentry_telemetry = [] zstd_compression = [ "reqwest/zstd", ] -perf_measurements = [] -sentry_telemetry = [] -conduwuit_mods = [ - "dep:libloading" -] [dependencies] argon2.workspace = true arrayvec.workspace = true axum.workspace = true +axum-extra.workspace = true bytes.workspace = true bytesize.workspace = true cargo_toml.workspace = true @@ -92,6 +93,8 @@ serde_json.workspace = true serde_regex.workspace = true serde_yaml.workspace = true serde.workspace = true +smallvec.workspace = true +smallstr.workspace = true thiserror.workspace = true tikv-jemallocator.optional = true tikv-jemallocator.workspace = true @@ -114,5 +117,8 @@ nix.workspace = true hardened_malloc-rs.workspace = true hardened_malloc-rs.optional = true +[dev-dependencies] +maplit.workspace = true + [lints] workspace = true diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 57143e85..2424e99c 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -2,26 +2,24 @@ use std::{ cell::OnceCell, - ffi::{c_char, c_void, CStr}, + ffi::{CStr, c_char, c_void}, fmt::Debug, sync::RwLock, }; use arrayvec::ArrayVec; -use const_str::concat_bytes; use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; use crate::{ - err, is_equal_to, is_nonzero, + Result, err, is_equal_to, is_nonzero, utils::{math, math::Tried}, - Result, }; #[cfg(feature = "jemalloc_conf")] #[unsafe(no_mangle)] -pub static malloc_conf: &[u8] = concat_bytes!( +pub static malloc_conf: &[u8] = const_str::concat_bytes!( "lg_extent_max_active_fit:4", ",oversize_threshold:16777216", ",tcache_max:2097152", @@ -128,7 +126,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { } macro_rules! mallctl { - ($name:expr) => {{ + ($name:expr_2021) => {{ thread_local! { static KEY: OnceCell = OnceCell::default(); }; @@ -141,7 +139,7 @@ macro_rules! mallctl { } pub mod this_thread { - use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; + use super::{Debug, Key, OnceCell, Result, is_nonzero, key, math}; thread_local! { static ALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; @@ -261,18 +259,18 @@ pub fn decay>>(arena: I) -> Result { } pub fn set_muzzy_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms), } } pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.dirty_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.dirty_decay_ms"), decay_ms), } } @@ -337,6 +335,12 @@ where Ok(res) } +#[tracing::instrument( + name = "get", + level = "trace" + skip_all, + fields(?key) +)] fn get(key: &Key) -> Result where T: Copy + Debug, @@ -348,6 +352,12 @@ where unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) } +#[tracing::instrument( + name = "xchg", + level = "trace" + skip_all, + fields(?key, ?val) +)] fn xchg(key: &Key, val: T) -> Result where T: Copy + Debug, diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 988d4143..f9d51eeb 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -4,7 +4,7 @@ use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; -use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; +use crate::{Config, Err, Result, Server, debug, debug_info, debug_warn, error, warn}; /// Performs check() with additional checks specific to reloading old config /// with new config. @@ -28,6 +28,10 @@ pub fn check(config: &Config) -> Result { warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); } + if config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure { + warn!("\n\nWARNING: \n\nTLS CERTIFICATE VALIDATION IS DISABLED, THIS IS HIGHLY INSECURE AND SHOULD NOT BE USED IN PRODUCTION.\n\n"); + } + warn_deprecated(config); warn_unknown_key(config); @@ -38,7 +42,7 @@ pub fn check(config: &Config) -> Result { )); } - if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) { + if cfg!(all(feature = "hardened_malloc", feature = "jemalloc", not(target_env = "msvc"))) { debug_warn!( "hardened_malloc and jemalloc compile-time features are both enabled, this causes \ jemalloc to be used." @@ -126,6 +130,14 @@ pub fn check(config: &Config) -> Result { )); } + if config.emergency_password == Some(String::new()) { + return Err!(Config( + "emergency_password", + "Emergency password was set to an empty string, this is not valid. Unset \ + emergency_password to disable it or set it to a real password." + )); + } + // check if the user specified a registration token as `""` if config.registration_token == Some(String::new()) { return Err!(Config( diff --git a/src/core/config/manager.rs b/src/core/config/manager.rs index 0c95ca15..e55916ba 100644 --- a/src/core/config/manager.rs +++ b/src/core/config/manager.rs @@ -4,13 +4,13 @@ use std::{ ptr, ptr::null_mut, sync::{ - atomic::{AtomicPtr, Ordering}, Arc, + atomic::{AtomicPtr, Ordering}, }, }; use super::Config; -use crate::{implement, Result}; +use crate::{Result, implement}; /// The configuration manager is an indirection to reload the configuration for /// the server while it is running. In order to not burden or clutter the many diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 415c9ba9..5374c2c2 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -3,7 +3,7 @@ pub mod manager; pub mod proxy; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::{Path, PathBuf}, }; @@ -14,18 +14,18 @@ use either::{ Either::{Left, Right}, }; use figment::providers::{Env, Format, Toml}; -pub use figment::{value::Value as FigmentValue, Figment}; +pub use figment::{Figment, value::Value as FigmentValue}; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, - OwnedUserId, RoomVersionId, + OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId, + api::client::discovery::discover_support::ContactRole, }; -use serde::{de::IgnoredAny, Deserialize}; +use serde::{Deserialize, de::IgnoredAny}; use url::Url; use self::proxy::ProxyConfig; pub use self::{check::check, manager::Manager}; -use crate::{err, error::Error, utils::sys, Result}; +use crate::{Result, err, error::Error, utils::sys}; /// All the config options for conduwuit. #[allow(clippy::struct_excessive_bools)] @@ -52,7 +52,7 @@ use crate::{err, error::Error, utils::sys, Result}; ### For more information, see: ### https://conduwuit.puppyirl.gay/configuration.html "#, - ignore = "catchall well_known tls" + ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a @@ -161,14 +161,12 @@ pub struct Config { pub new_user_displayname_suffix: String, /// If enabled, conduwuit will send a simple GET request periodically to - /// `https://pupbrain.dev/check-for-updates/stable` for any new - /// announcements made. Despite the name, this is not an update check - /// endpoint, it is simply an announcement check endpoint. + /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new + /// announcements or major updates. This is not an update check endpoint. /// - /// This is disabled by default as this is rarely used except for security - /// updates or major updates. - #[serde(default, alias = "allow_announcements_check")] - pub allow_check_for_updates: bool, + /// default: true + #[serde(alias = "allow_check_for_updates", default = "true_fn")] + pub allow_announcements_check: bool, /// Set this to any float value to multiply conduwuit's in-memory LRU caches /// with such as "auth_chain_cache_capacity". @@ -252,14 +250,6 @@ pub struct Config { #[serde(default = "default_servernameevent_data_cache_capacity")] pub servernameevent_data_cache_capacity: u32, - /// default: varies by system - #[serde(default = "default_server_visibility_cache_capacity")] - pub server_visibility_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_user_visibility_cache_capacity")] - pub user_visibility_cache_capacity: u32, - /// default: varies by system #[serde(default = "default_stateinfo_cache_capacity")] pub stateinfo_cache_capacity: u32, @@ -480,6 +470,36 @@ pub struct Config { #[serde(default = "default_pusher_idle_timeout")] pub pusher_idle_timeout: u64, + /// Maximum time to receive a request from a client (seconds). + /// + /// default: 75 + #[serde(default = "default_client_receive_timeout")] + pub client_receive_timeout: u64, + + /// Maximum time to process a request received from a client (seconds). + /// + /// default: 180 + #[serde(default = "default_client_request_timeout")] + pub client_request_timeout: u64, + + /// Maximum time to transmit a response to a client (seconds) + /// + /// default: 120 + #[serde(default = "default_client_response_timeout")] + pub client_response_timeout: u64, + + /// Grace period for clean shutdown of client requests (seconds). + /// + /// default: 10 + #[serde(default = "default_client_shutdown_timeout")] + pub client_shutdown_timeout: u64, + + /// Grace period for clean shutdown of federation requests (seconds). + /// + /// default: 5 + #[serde(default = "default_sender_shutdown_timeout")] + pub sender_shutdown_timeout: u64, + /// Enables registration. If set to false, no users can register on this /// server. /// @@ -510,8 +530,9 @@ pub struct Config { /// display: sensitive pub registration_token: Option, - /// Path to a file on the system that gets read for the registration token. - /// this config option takes precedence/priority over "registration_token". + /// Path to a file on the system that gets read for additional registration + /// tokens. Multiple tokens can be added if you separate them with + /// whitespace /// /// conduwuit must be able to access the file, and it must not be empty /// @@ -527,9 +548,19 @@ pub struct Config { #[serde(default = "true_fn")] pub allow_federation: bool, + /// Allows federation requests to be made to itself + /// + /// This isn't intended and is very likely a bug if federation requests are + /// being sent to yourself. This currently mainly exists for development + /// purposes. #[serde(default)] pub federation_loopback: bool, + /// Always calls /forget on behalf of the user if leaving a room. This is a + /// part of MSC4267 "Automatically forgetting rooms on leave" + #[serde(default)] + pub forget_forced_upon_leave: bool, + /// Set this to true to require authentication on the normally /// unauthenticated profile retrieval endpoints (GET) /// "/_matrix/client/v3/profile/{userId}". @@ -607,9 +638,9 @@ pub struct Config { /// Default room version conduwuit will create rooms with. /// - /// Per spec, room version 10 is the default. + /// Per spec, room version 11 is the default. /// - /// default: 10 + /// default: 11 #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, @@ -682,7 +713,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// - /// example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] + /// example: ["matrix.org", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] @@ -944,7 +975,7 @@ pub struct Config { /// Type of RocksDB database compression to use. /// - /// Available options are "zstd", "zlib", "bz2", "lz4", or "none". + /// Available options are "zstd", "bz2", "lz4", or "none". /// /// It is best to use ZSTD as an overall good balance between /// speed/performance, storage, IO amplification, and CPU usage. For more @@ -1058,6 +1089,13 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_checksums: bool, + /// Enables the "atomic flush" mode in rocksdb. This option is not intended + /// for users. It may be removed or ignored in future versions. Atomic flush + /// may be enabled by the paranoid to possibly improve database integrity at + /// the cost of performance. + #[serde(default)] + pub rocksdb_atomic_flush: bool, + /// Database repair mode (for RocksDB SST corruption). /// /// Use this option when the server reports corruption while running or @@ -1093,10 +1131,10 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, - /// Disables RocksDB compaction. You should never ever have to set this - /// option to true. If you for some reason find yourself needing to use this - /// option as part of troubleshooting or a bug, please reach out to us in - /// the conduwuit Matrix room with information and details. + /// Enables RocksDB compaction. You should never ever have to set this + /// option to false. If you for some reason find yourself needing to use + /// this option as part of troubleshooting or a bug, please reach out to us + /// in the conduwuit Matrix room with information and details. /// /// Disabling compaction will lead to a significantly bloated and /// explosively large database, gradually poor performance, unnecessarily @@ -1321,33 +1359,81 @@ pub struct Config { #[serde(default)] pub prune_missing_media: bool, - /// Vector list of servers that conduwuit will refuse to download remote - /// media from. + /// List of forbidden server names via regex patterns that we will block + /// incoming AND outgoing federation with, and block client room joins / + /// remote user invites. /// - /// default: [] - #[serde(default)] - pub prevent_media_downloads_from: HashSet, - - /// List of forbidden server names that we will block incoming AND outgoing - /// federation with, and block client room joins / remote user invites. + /// Note that your messages can still make it to forbidden servers through + /// backfilling. Events we receive from forbidden servers via backfill + /// from servers we *do* federate with will be stored in the database. /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. /// - /// Basically "global" ACLs. + /// You can set this to ["*"] to block all servers by default, and then + /// use `allowed_remote_server_names` to allow only specific servers. + /// + /// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] - #[serde(default)] - pub forbidden_remote_server_names: HashSet, + #[serde(default, with = "serde_regex")] + pub forbidden_remote_server_names: RegexSet, - /// List of forbidden server names that we will block all outgoing federated - /// room directory requests for. Useful for preventing our users from - /// wandering into bad servers or spaces. + /// List of allowed server names via regex patterns that we will allow, + /// regardless of if they match `forbidden_remote_server_names`. + /// + /// This option has no effect if `forbidden_remote_server_names` is empty. + /// + /// example: ["goodserver\\.tld$", "goodphrase"] /// /// default: [] - #[serde(default = "HashSet::new")] - pub forbidden_remote_room_directory_server_names: HashSet, + #[serde(default, with = "serde_regex")] + pub allowed_remote_server_names: RegexSet, + + /// Vector list of regex patterns of server names that conduwuit will refuse + /// to download remote media from. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub prevent_media_downloads_from: RegexSet, + + /// List of forbidden server names via regex patterns that we will block all + /// outgoing federated room directory requests for. Useful for preventing + /// our users from wandering into bad servers or spaces. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub forbidden_remote_room_directory_server_names: RegexSet, + + /// Vector list of regex patterns of server names that conduwuit will not + /// send messages to the client from. + /// + /// Note that there is no way for clients to receive messages once a server + /// has become unignored without doing a full sync. This is a protocol + /// limitation with the current sync protocols. This means this is somewhat + /// of a nuclear option. + /// + /// example: ["reallybadserver\.tld$", "reallybadphrase", + /// "69dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub ignore_messages_from_server_names: RegexSet, + + /// Send messages from users that the user has ignored to the client. + /// + /// There is no way for clients to receive messages sent while a user was + /// ignored without doing a full sync. This is a protocol limitation with + /// the current sync protocols. Disabling this option will move + /// responsibility of ignoring messages to the client, which can avoid this + /// limitation. + #[serde(default)] + pub send_messages_from_ignored_users_to_client: bool, /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you /// do not want conduwuit to send outbound requests to. Defaults to @@ -1468,11 +1554,10 @@ pub struct Config { /// used, and startup as warnings if any room aliases in your database have /// a forbidden room alias/ID. /// - /// example: ["19dollarfortnitecards", "b[4a]droom"] + /// example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_alias_names: RegexSet, /// List of forbidden username patterns/strings. @@ -1484,11 +1569,10 @@ pub struct Config { /// startup as warnings if any local users in your database have a forbidden /// username. /// - /// example: ["administrator", "b[a4]dusernam[3e]"] + /// example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_usernames: RegexSet, /// Retry failed and incomplete messages to remote servers immediately upon @@ -1591,7 +1675,7 @@ pub struct Config { /// Sentry reporting URL, if a custom one is desired. /// /// display: sensitive - /// default: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" + /// default: "" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, @@ -1758,6 +1842,19 @@ pub struct Config { #[serde(default = "true_fn")] pub config_reload_signal: bool, + /// Toggles ignore checking/validating TLS certificates + /// + /// This applies to everything, including URL previews, federation requests, + /// etc. This is a hidden argument that should NOT be used in production as + /// it is highly insecure and I will personally yell at you if I catch you + /// using this. + #[serde(default)] + pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure: + bool, + + // external structure; separate section + #[serde(default)] + pub blurhashing: BlurhashConfig, #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime @@ -1808,6 +1905,31 @@ pub struct WellKnownConfig { pub support_mxid: Option, } +#[derive(Clone, Copy, Debug, Deserialize, Default)] +#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] +#[config_example_generator(filename = "conduwuit-example.toml", section = "global.blurhashing")] +pub struct BlurhashConfig { + /// blurhashing x component, 4 is recommended by https://blurha.sh/ + /// + /// default: 4 + #[serde(default = "default_blurhash_x_component")] + pub components_x: u32, + /// blurhashing y component, 3 is recommended by https://blurha.sh/ + /// + /// default: 3 + #[serde(default = "default_blurhash_y_component")] + pub components_y: u32, + /// Max raw size that the server will blurhash, this is the size of the + /// image after converting it to raw data, it should be higher than the + /// upload limit but not too high. The higher it is the higher the + /// potential load will be for clients requesting blurhashes. The default + /// is 33.55MB. Setting it to 0 disables blurhashing. + /// + /// default: 33554432 + #[serde(default = "default_blurhash_max_raw_size")] + pub blurhash_max_raw_size: u64, +} + #[derive(Deserialize, Clone, Debug)] #[serde(transparent)] struct ListeningPort { @@ -1871,7 +1993,7 @@ impl Config { let mut addrs = Vec::with_capacity( self.get_bind_hosts() .len() - .saturating_add(self.get_bind_ports().len()), + .saturating_mul(self.get_bind_ports().len()), ); for host in &self.get_bind_hosts() { for port in &self.get_bind_ports() { @@ -1949,10 +2071,6 @@ fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(500) } - -fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } - fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } @@ -2072,7 +2190,12 @@ fn default_rocksdb_max_log_file_size() -> usize { fn default_rocksdb_parallelism_threads() -> usize { 0 } -fn default_rocksdb_compression_algo() -> String { "zstd".to_owned() } +fn default_rocksdb_compression_algo() -> String { + cfg!(feature = "zstd_compression") + .then_some("zstd") + .unwrap_or("none") + .to_owned() +} /// Default RocksDB compression level is 32767, which is internally read by /// RocksDB as the default magic number and translated to the library's default @@ -2091,7 +2214,7 @@ fn default_rocksdb_stats_level() -> u8 { 1 } // I know, it's a great name #[must_use] #[inline] -pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 } +pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 } fn default_ip_range_denylist() -> Vec { vec![ @@ -2123,9 +2246,7 @@ fn default_url_preview_max_spider_size() -> usize { fn default_new_user_displayname_suffix() -> String { "🏳️‍⚧️".to_owned() } -fn default_sentry_endpoint() -> Option { - Url::parse("https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536").ok() -} +fn default_sentry_endpoint() -> Option { None } fn default_sentry_traces_sample_rate() -> f32 { 0.15 } @@ -2169,3 +2290,23 @@ fn default_stream_width_default() -> usize { 32 } fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } + +fn default_client_receive_timeout() -> u64 { 75 } + +fn default_client_request_timeout() -> u64 { 180 } + +fn default_client_response_timeout() -> u64 { 120 } + +fn default_client_shutdown_timeout() -> u64 { 15 } + +fn default_sender_shutdown_timeout() -> u64 { 5 } + +// blurhashing defaults recommended by https://blurha.sh/ +// 2^25 +pub(super) fn default_blurhash_max_raw_size() -> u64 { 33_554_432 } + +pub(super) fn default_blurhash_x_component() -> u32 { 4 } + +pub(super) fn default_blurhash_y_component() -> u32 { 3 } + +// end recommended & blurhashing defaults diff --git a/src/core/debug.rs b/src/core/debug.rs index ca0f2f2e..21a5ada4 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -1,6 +1,6 @@ #![allow(clippy::disallowed_macros)] -use std::{any::Any, panic}; +use std::{any::Any, env, panic, sync::LazyLock}; // Export debug proc_macros pub use conduwuit_macros::recursion_depth; @@ -12,8 +12,9 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// Log event at given level in debug-mode (when debug-assertions are enabled). /// In release-mode it becomes DEBUG level, and possibly subject to elision. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! debug_event { - ( $level:expr, $($x:tt)+ ) => { + ( $level:expr_2021, $($x:tt)+ ) => { if $crate::debug::logging() { ::tracing::event!( $level, _debug = true, $($x)+ ) } else { @@ -58,16 +59,26 @@ pub const INFO_SPAN_LEVEL: Level = if cfg!(debug_assertions) { Level::DEBUG }; -pub fn set_panic_trap() { +pub static DEBUGGER: LazyLock = + LazyLock::new(|| env::var("_").unwrap_or_default().ends_with("gdb")); + +#[cfg_attr(debug_assertions, crate::ctor)] +#[cfg_attr(not(debug_assertions), allow(dead_code))] +fn set_panic_trap() { + if !*DEBUGGER { + return; + } + let next = panic::take_hook(); panic::set_hook(Box::new(move |info| { panic_handler(info, &next); })); } -#[inline(always)] +#[cold] +#[inline(never)] #[allow(deprecated_in_future)] -fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { +pub fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { trap(); next(info); } diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 60fa5bff..2eb6823a 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -33,6 +33,7 @@ //! option of replacing `error!` with `debug_error!`. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! Err { ($($args:tt)*) => { Err($crate::err!($($args)*)) @@ -40,6 +41,7 @@ macro_rules! Err { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err { (Request(Forbidden($level:ident!($($args:tt)+)))) => {{ let mut buf = String::new(); @@ -109,6 +111,7 @@ macro_rules! err { /// can share the same callsite metadata for the source of our Error and the /// associated logging and tracing event dispatches. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_log { ($out:ident, $level:ident, $($fields:tt)+) => {{ use $crate::tracing::{ @@ -136,6 +139,7 @@ macro_rules! err_log { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_lev { (debug_warn) => { if $crate::debug::logging() { @@ -165,10 +169,10 @@ macro_rules! err_lev { use std::{fmt, fmt::Write}; use tracing::{ - level_enabled, Callsite, Event, __macro_support, __tracing_log, + __macro_support, __tracing_log, Callsite, Event, Level, callsite::DefaultCallsite, field::{Field, ValueSet, Visit}, - Level, + level_enabled, }; struct Visitor<'a>(&'a mut String); diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 88ac6d09..e46edf09 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -81,6 +81,8 @@ pub enum Error { #[error("Tracing reload error: {0}")] TracingReload(#[from] tracing_subscriber::reload::Error), #[error(transparent)] + TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection), + #[error(transparent)] Yaml(#[from] serde_yaml::Error), // ruma/conduwuit @@ -121,7 +123,7 @@ pub enum Error { #[error(transparent)] Signatures(#[from] ruma::signatures::Error), #[error(transparent)] - StateRes(#[from] ruma::state_res::Error), + StateRes(#[from] crate::state_res::Error), #[error("uiaa")] Uiaa(ruma::api::client::uiaa::UiaaInfo), @@ -152,8 +154,8 @@ impl Error { /// Generate the error message string. pub fn message(&self) -> String { match self { - | Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), - | Self::Ruma(ref error) => response::ruma_error_message(error), + | Self::Federation(origin, error) => format!("Answer from {origin}: {error}"), + | Self::Ruma(error) => response::ruma_error_message(error), | _ => format!("{self}"), } } diff --git a/src/core/error/panic.rs b/src/core/error/panic.rs index c6a83ae0..2e63105b 100644 --- a/src/core/error/panic.rs +++ b/src/core/error/panic.rs @@ -1,6 +1,6 @@ use std::{ any::Any, - panic::{panic_any, RefUnwindSafe, UnwindSafe}, + panic::{RefUnwindSafe, UnwindSafe, panic_any}, }; use super::Error; diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 75e4050d..ae6fce62 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -2,11 +2,11 @@ use bytes::BytesMut; use http::StatusCode; use http_body_util::Full; use ruma::api::{ + OutgoingResponse, client::{ error::{ErrorBody, ErrorKind}, uiaa::UiaaResponse, }, - OutgoingResponse, }; use super::Error; @@ -86,7 +86,7 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode { pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String { if let ErrorBody::Standard { message, .. } = &error.body { - return message.to_string(); + return message.clone(); } format!("{error}") diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index c5a1d167..28c6590e 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -31,7 +31,7 @@ const ROUTER_MANIFEST: &'static str = (); #[cargo_manifest(crate = "main")] const MAIN_MANIFEST: &'static str = (); -/// Processed list of features access all project crates. This is generated from +/// Processed list of features across all project crates. This is generated from /// the data in the MANIFEST strings and contains all possible project features. /// For *enabled* features see the info::rustc module instead. static FEATURES: OnceLock> = OnceLock::new(); diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs index b33a8562..51d5d3c6 100644 --- a/src/core/info/room_version.rs +++ b/src/core/info/room_version.rs @@ -2,7 +2,7 @@ use std::iter::once; -use ruma::{api::client::discovery::get_capabilities::RoomVersionStability, RoomVersionId}; +use ruma::{RoomVersionId, api::client::discovery::get_capabilities::RoomVersionStability}; use crate::{at, is_equal_to}; diff --git a/src/core/info/version.rs b/src/core/info/version.rs index 37580210..6abb6e13 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -7,7 +7,7 @@ use std::sync::OnceLock; -static BRANDING: &str = "conduwuit"; +static BRANDING: &str = "continuwuity"; static SEMANTIC: &str = env!("CARGO_PKG_VERSION"); static VERSION: OnceLock = OnceLock::new(); diff --git a/src/core/log/capture/data.rs b/src/core/log/capture/data.rs index 0ad7a6c2..a4a1225b 100644 --- a/src/core/log/capture/data.rs +++ b/src/core/log/capture/data.rs @@ -1,7 +1,7 @@ use tracing::Level; -use tracing_core::{span::Current, Event}; +use tracing_core::{Event, span::Current}; -use super::{layer::Value, Layer}; +use super::{Layer, layer::Value}; use crate::{info, utils::string::EMPTY}; pub struct Data<'a> { diff --git a/src/core/log/capture/util.rs b/src/core/log/capture/util.rs index 8bad4ba0..65524be5 100644 --- a/src/core/log/capture/util.rs +++ b/src/core/log/capture/util.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use super::{ - super::{fmt, Level}, + super::{Level, fmt}, Closure, Data, }; use crate::Result; diff --git a/src/core/log/console.rs b/src/core/log/console.rs index 0bc44fa7..d91239ac 100644 --- a/src/core/log/console.rs +++ b/src/core/log/console.rs @@ -1,18 +1,67 @@ +use std::{env, io, sync::LazyLock}; + use tracing::{ - field::{Field, Visit}, Event, Level, Subscriber, + field::{Field, Visit}, }; use tracing_subscriber::{ field::RecordFields, fmt, fmt::{ + FmtContext, FormatEvent, FormatFields, MakeWriter, format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, - FmtContext, FormatEvent, FormatFields, }, registry::LookupSpan, }; -use crate::{Config, Result}; +use crate::{Config, Result, apply}; + +static SYSTEMD_MODE: LazyLock = + LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); + +pub struct ConsoleWriter { + stdout: io::Stdout, + stderr: io::Stderr, + _journal_stream: [u64; 2], + use_stderr: bool, +} + +impl ConsoleWriter { + #[must_use] + pub fn new(_config: &Config) -> Self { + let journal_stream = get_journal_stream(); + Self { + stdout: io::stdout(), + stderr: io::stderr(), + _journal_stream: journal_stream.into(), + use_stderr: journal_stream.0 != 0, + } + } +} + +impl<'a> MakeWriter<'a> for ConsoleWriter { + type Writer = &'a Self; + + fn make_writer(&'a self) -> Self::Writer { self } +} + +impl io::Write for &'_ ConsoleWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.use_stderr { + self.stderr.lock().write(buf) + } else { + self.stdout.lock().write(buf) + } + } + + fn flush(&mut self) -> io::Result<()> { + if self.use_stderr { + self.stderr.lock().flush() + } else { + self.stdout.lock().flush() + } + } +} pub struct ConsoleFormat { _compact: Format, @@ -20,10 +69,6 @@ pub struct ConsoleFormat { pretty: Format, } -struct ConsoleVisitor<'a> { - visitor: DefaultVisitor<'a>, -} - impl ConsoleFormat { #[must_use] pub fn new(config: &Config) -> Self { @@ -68,6 +113,10 @@ where } } +struct ConsoleVisitor<'a> { + visitor: DefaultVisitor<'a>, +} + impl<'writer> FormatFields<'writer> for ConsoleFormat { fn format_fields(&self, writer: Writer<'writer>, fields: R) -> Result<(), std::fmt::Error> where @@ -92,3 +141,19 @@ impl Visit for ConsoleVisitor<'_> { self.visitor.record_debug(field, value); } } + +#[must_use] +fn get_journal_stream() -> (u64, u64) { + is_systemd_mode() + .then(|| env::var("JOURNAL_STREAM").ok()) + .flatten() + .as_deref() + .and_then(|s| s.split_once(':')) + .map(apply!(2, str::parse)) + .map(apply!(2, Result::unwrap_or_default)) + .unwrap_or((0, 0)) +} + +#[inline] +#[must_use] +pub fn is_systemd_mode() -> bool { *SYSTEMD_MODE } diff --git a/src/core/log/fmt.rs b/src/core/log/fmt.rs index 353d4442..b73d0c9b 100644 --- a/src/core/log/fmt.rs +++ b/src/core/log/fmt.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use super::{color, Level}; +use super::{Level, color}; use crate::Result; pub fn html(out: &mut S, level: &Level, span: &str, msg: &str) -> Result<()> diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 0c51a383..f7b2521a 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -2,14 +2,14 @@ pub mod capture; pub mod color; -mod console; +pub mod console; pub mod fmt; pub mod fmt_span; mod reload; mod suppress; pub use capture::Capture; -pub use console::ConsoleFormat; +pub use console::{ConsoleFormat, ConsoleWriter, is_systemd_mode}; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; @@ -33,8 +33,9 @@ pub struct Log { // the crate namespace like these. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! event { - ( $level:expr, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } + ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } } #[macro_export] diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index 12d14f48..e6a16c9f 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -3,9 +3,9 @@ use std::{ sync::{Arc, Mutex}, }; -use tracing_subscriber::{reload, EnvFilter}; +use tracing_subscriber::{EnvFilter, reload}; -use crate::{error, Result}; +use crate::{Result, error}; /// We need to store a reload::Handle value, but can't name it's type explicitly /// because the S type parameter depends on the subscriber's previous layers. In diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs new file mode 100644 index 00000000..29153334 --- /dev/null +++ b/src/core/matrix/event.rs @@ -0,0 +1,73 @@ +use std::{ + borrow::Borrow, + fmt::{Debug, Display}, + hash::Hash, +}; + +use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; +use serde_json::value::RawValue as RawJsonValue; + +/// Abstraction of a PDU so users can have their own PDU types. +pub trait Event { + type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow; + + /// The `EventId` of this event. + fn event_id(&self) -> &Self::Id; + + /// The `RoomId` of this event. + fn room_id(&self) -> &RoomId; + + /// The `UserId` of this event. + fn sender(&self) -> &UserId; + + /// The time of creation on the originating server. + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; + + /// The event type. + fn event_type(&self) -> &TimelineEventType; + + /// The event's content. + fn content(&self) -> &RawJsonValue; + + /// The state key for this event. + fn state_key(&self) -> Option<&str>; + + /// The events before this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// All the authenticating events for this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// If this event is a redaction event this is the event it redacts. + fn redacts(&self) -> Option<&Self::Id>; +} + +impl Event for &T { + type Id = T::Id; + + fn event_id(&self) -> &Self::Id { (*self).event_id() } + + fn room_id(&self) -> &RoomId { (*self).room_id() } + + fn sender(&self) -> &UserId { (*self).sender() } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (*self).origin_server_ts() } + + fn event_type(&self) -> &TimelineEventType { (*self).event_type() } + + fn content(&self) -> &RawJsonValue { (*self).content() } + + fn state_key(&self) -> Option<&str> { (*self).state_key() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).prev_events() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).auth_events() + } + + fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } +} diff --git a/src/core/matrix/mod.rs b/src/core/matrix/mod.rs new file mode 100644 index 00000000..8c978173 --- /dev/null +++ b/src/core/matrix/mod.rs @@ -0,0 +1,9 @@ +//! Core Matrix Library + +pub mod event; +pub mod pdu; +pub mod state_res; + +pub use event::Event; +pub use pdu::{PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; +pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; diff --git a/src/core/pdu/mod.rs b/src/core/matrix/pdu.rs similarity index 68% rename from src/core/pdu/mod.rs rename to src/core/matrix/pdu.rs index 1a8f6a70..7e1ecfa8 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/matrix/pdu.rs @@ -1,13 +1,13 @@ mod builder; mod content; mod count; -mod event; mod event_id; mod filter; mod id; mod raw_id; mod redact; mod relation; +mod state_key; mod strip; #[cfg(test)] mod tests; @@ -16,21 +16,22 @@ mod unsigned; use std::cmp::Ordering; use ruma::{ - events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, - OwnedRoomId, OwnedUserId, UInt, + CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, UInt, UserId, events::TimelineEventType, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; pub use self::{ + Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, - event::Event, event_id::*, id::*, raw_id::*, - Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, + state_key::{ShortStateKey, StateKey}, }; +use super::Event; use crate::Result; /// Persistent Data Unit (Event) @@ -40,13 +41,13 @@ pub struct Pdu { pub room_id: OwnedRoomId, pub sender: OwnedUserId, #[serde(skip_serializing_if = "Option::is_none")] - pub origin: Option, + pub origin: Option, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: TimelineEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] - pub state_key: Option, + pub state_key: Option, pub prev_events: Vec, pub depth: UInt, pub auth_events: Vec, @@ -77,6 +78,36 @@ impl Pdu { } } +impl Event for Pdu { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { &self.room_id } + + fn sender(&self) -> &UserId { &self.sender } + + fn event_type(&self) -> &TimelineEventType { &self.kind } + + fn content(&self) -> &RawJsonValue { &self.content } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } + + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter() + } + + fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } +} + /// Prevent derived equality which wouldn't limit itself to event_id impl Eq for Pdu {} @@ -85,12 +116,12 @@ impl PartialEq for Pdu { fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } } -/// Ordering determined by the Pdu's ID, not the memory representations. -impl PartialOrd for Pdu { - fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } -} - /// Ordering determined by the Pdu's ID, not the memory representations. impl Ord for Pdu { fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } } + +/// Ordering determined by the Pdu's ID, not the memory representations. +impl PartialOrd for Pdu { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} diff --git a/src/core/pdu/builder.rs b/src/core/matrix/pdu/builder.rs similarity index 86% rename from src/core/pdu/builder.rs rename to src/core/matrix/pdu/builder.rs index b25d4e9e..5aa0c9ca 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/matrix/pdu/builder.rs @@ -1,11 +1,13 @@ use std::collections::BTreeMap; use ruma::{ - events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, MilliSecondsSinceUnixEpoch, OwnedEventId, + events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; + +use super::StateKey; /// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] @@ -17,7 +19,7 @@ pub struct Builder { pub unsigned: Option, - pub state_key: Option, + pub state_key: Option, pub redacts: Option, @@ -29,15 +31,16 @@ pub struct Builder { type Unsigned = BTreeMap; impl Builder { - pub fn state(state_key: String, content: &T) -> Self + pub fn state(state_key: S, content: &T) -> Self where T: EventContent, + S: Into, { Self { event_type: content.event_type().into(), content: to_raw_value(content) .expect("Builder failed to serialize state event content to RawValue"), - state_key: Some(state_key), + state_key: Some(state_key.into()), ..Self::default() } } diff --git a/src/core/pdu/content.rs b/src/core/matrix/pdu/content.rs similarity index 92% rename from src/core/pdu/content.rs rename to src/core/matrix/pdu/content.rs index fa724cb2..4e60ce6e 100644 --- a/src/core/pdu/content.rs +++ b/src/core/matrix/pdu/content.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use serde_json::value::Value as JsonValue; -use crate::{err, implement, Result}; +use crate::{Result, err, implement}; #[must_use] #[implement(super::Pdu)] diff --git a/src/core/pdu/count.rs b/src/core/matrix/pdu/count.rs similarity index 99% rename from src/core/pdu/count.rs rename to src/core/matrix/pdu/count.rs index 0135cf28..b880278f 100644 --- a/src/core/pdu/count.rs +++ b/src/core/matrix/pdu/count.rs @@ -4,7 +4,7 @@ use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; use ruma::api::Direction; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] pub enum Count { diff --git a/src/core/pdu/event_id.rs b/src/core/matrix/pdu/event_id.rs similarity index 97% rename from src/core/pdu/event_id.rs rename to src/core/matrix/pdu/event_id.rs index 09b33edc..e9d868b1 100644 --- a/src/core/pdu/event_id.rs +++ b/src/core/matrix/pdu/event_id.rs @@ -1,7 +1,7 @@ use ruma::{CanonicalJsonObject, OwnedEventId, RoomVersionId}; use serde_json::value::RawValue as RawJsonValue; -use crate::{err, Result}; +use crate::{Result, err}; /// Generates a correct eventId for the incoming pdu. /// diff --git a/src/core/pdu/filter.rs b/src/core/matrix/pdu/filter.rs similarity index 100% rename from src/core/pdu/filter.rs rename to src/core/matrix/pdu/filter.rs diff --git a/src/core/pdu/id.rs b/src/core/matrix/pdu/id.rs similarity index 100% rename from src/core/pdu/id.rs rename to src/core/matrix/pdu/id.rs diff --git a/src/core/pdu/raw_id.rs b/src/core/matrix/pdu/raw_id.rs similarity index 97% rename from src/core/pdu/raw_id.rs rename to src/core/matrix/pdu/raw_id.rs index e1fd2381..318a0cd7 100644 --- a/src/core/pdu/raw_id.rs +++ b/src/core/matrix/pdu/raw_id.rs @@ -55,8 +55,8 @@ impl RawId { #[must_use] pub fn as_bytes(&self) -> &[u8] { match self { - | Self::Normal(ref raw) => raw, - | Self::Backfilled(ref raw) => raw, + | Self::Normal(raw) => raw, + | Self::Backfilled(raw) => raw, } } } diff --git a/src/core/pdu/redact.rs b/src/core/matrix/pdu/redact.rs similarity index 70% rename from src/core/pdu/redact.rs rename to src/core/matrix/pdu/redact.rs index 5d33eeca..409debfe 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/matrix/pdu/redact.rs @@ -1,15 +1,15 @@ use ruma::{ - canonical_json::redact_content_in_place, - events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, OwnedEventId, RoomVersionId, + canonical_json::redact_content_in_place, + events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, }; use serde::Deserialize; use serde_json::{ json, - value::{to_raw_value, RawValue as RawJsonValue}, + value::{RawValue as RawJsonValue, to_raw_value}, }; -use crate::{implement, Error, Result}; +use crate::{Error, Result, implement}; #[derive(Deserialize)] struct ExtractRedactedBecause { @@ -76,17 +76,42 @@ pub fn copy_redacts(&self) -> (Option, Box) { if let Ok(mut content) = serde_json::from_str::(self.content.get()) { - if let Some(redacts) = content.redacts { - return (Some(redacts), self.content.clone()); - } else if let Some(redacts) = self.redacts.clone() { - content.redacts = Some(redacts); - return ( - self.redacts.clone(), - to_raw_value(&content).expect("Must be valid, we only added redacts field"), - ); + match content.redacts { + | Some(redacts) => { + return (Some(redacts), self.content.clone()); + }, + | _ => match self.redacts.clone() { + | Some(redacts) => { + content.redacts = Some(redacts); + return ( + self.redacts.clone(), + to_raw_value(&content) + .expect("Must be valid, we only added redacts field"), + ); + }, + | _ => {}, + }, } } } (self.redacts.clone(), self.content.clone()) } + +#[implement(super::Pdu)] +#[must_use] +pub fn redacts_id(&self, room_version: &RoomVersionId) -> Option { + use RoomVersionId::*; + + if self.kind != TimelineEventType::RoomRedaction { + return None; + } + + match *room_version { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => self.redacts.clone(), + | _ => + self.get_content::() + .ok()? + .redacts, + } +} diff --git a/src/core/pdu/relation.rs b/src/core/matrix/pdu/relation.rs similarity index 100% rename from src/core/pdu/relation.rs rename to src/core/matrix/pdu/relation.rs diff --git a/src/core/matrix/pdu/state_key.rs b/src/core/matrix/pdu/state_key.rs new file mode 100644 index 00000000..4af4fcf7 --- /dev/null +++ b/src/core/matrix/pdu/state_key.rs @@ -0,0 +1,8 @@ +use smallstr::SmallString; + +use super::ShortId; + +pub type StateKey = SmallString<[u8; INLINE_SIZE]>; +pub type ShortStateKey = ShortId; + +const INLINE_SIZE: usize = 48; diff --git a/src/core/pdu/strip.rs b/src/core/matrix/pdu/strip.rs similarity index 63% rename from src/core/pdu/strip.rs rename to src/core/matrix/pdu/strip.rs index 7d2fb1d6..3683caaa 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/matrix/pdu/strip.rs @@ -1,8 +1,8 @@ use ruma::{ events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, }, serde::Raw, }; @@ -10,35 +10,18 @@ use serde_json::{json, value::Value as JsonValue}; use crate::implement; -#[must_use] -#[implement(super::Pdu)] -pub fn to_sync_room_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") -} - /// This only works for events that are also AnyRoomEvents. #[must_use] #[implement(super::Pdu)] -pub fn to_any_event(&self) -> Raw { +pub fn into_any_event(self) -> Raw { + serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works") +} + +/// This only works for events that are also AnyRoomEvents. +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_any_event_value(self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -59,12 +42,24 @@ pub fn to_any_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_room_event(self) -> Raw { self.to_room_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_room_event(&self) -> Raw { + serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_room_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -85,12 +80,25 @@ pub fn to_room_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_message_like_event(self) -> Raw { self.to_message_like_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_message_like_event(&self) -> Raw { + serde_json::from_value(self.to_message_like_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_message_like_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -111,11 +119,55 @@ pub fn to_message_like_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_sync_room_event(self) -> Raw { self.to_sync_room_event() } + +#[implement(super::Pdu)] +#[must_use] +pub fn to_sync_room_event(&self) -> Raw { + serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_sync_room_event_value(&self) -> JsonValue { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + json +} + +#[implement(super::Pdu)] +#[must_use] +pub fn into_state_event(self) -> Raw { + serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] pub fn into_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, @@ -134,15 +186,17 @@ pub fn into_state_event_value(self) -> JsonValue { json } -#[must_use] #[implement(super::Pdu)] -pub fn into_state_event(self) -> Raw { - serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +#[must_use] +pub fn into_sync_state_event(self) -> Raw { + serde_json::from_value(self.into_sync_state_event_value()) + .expect("Raw::from_value always works") } -#[must_use] #[implement(super::Pdu)] -pub fn to_sync_state_event(&self) -> Raw { +#[must_use] +#[inline] +pub fn into_sync_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -156,39 +210,65 @@ pub fn to_sync_state_event(&self) -> Raw { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_state_event(self) -> Raw { + self.to_stripped_state_event() +} + +#[implement(super::Pdu)] +#[must_use] pub fn to_stripped_state_event(&self) -> Raw { - let json = json!({ + serde_json::from_value(self.to_stripped_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_stripped_state_event_value(&self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] -pub fn to_stripped_spacechild_state_event(&self) -> Raw { - let json = json!({ +#[must_use] +pub fn into_stripped_spacechild_state_event(self) -> Raw { + serde_json::from_value(self.into_stripped_spacechild_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, "origin_server_ts": self.origin_server_ts, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] +#[must_use] pub fn into_member_event(self) -> Raw> { + serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_member_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -204,5 +284,5 @@ pub fn into_member_event(self) -> Raw> { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } diff --git a/src/core/pdu/tests.rs b/src/core/matrix/pdu/tests.rs similarity index 100% rename from src/core/pdu/tests.rs rename to src/core/matrix/pdu/tests.rs diff --git a/src/core/pdu/unsigned.rs b/src/core/matrix/pdu/unsigned.rs similarity index 63% rename from src/core/pdu/unsigned.rs rename to src/core/matrix/pdu/unsigned.rs index fa305d71..23897519 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/matrix/pdu/unsigned.rs @@ -2,18 +2,20 @@ use std::collections::BTreeMap; use ruma::MilliSecondsSinceUnixEpoch; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}; +use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value}; use super::Pdu; -use crate::{err, implement, is_true, Result}; +use crate::{Result, err, implement, is_true}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { + use BTreeMap as Map; + let Some(unsigned) = &self.unsigned else { return Ok(()); }; - let mut unsigned: BTreeMap> = serde_json::from_str(unsigned.get()) + let mut unsigned: Map<&str, Box> = serde_json::from_str(unsigned.get()) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; unsigned.remove("transaction_id"); @@ -26,10 +28,13 @@ pub fn remove_transaction_id(&mut self) -> Result { #[implement(Pdu)] pub fn add_age(&mut self) -> Result { - let mut unsigned: BTreeMap> = self + use BTreeMap as Map; + + let mut unsigned: Map<&str, Box> = self .unsigned - .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; // deliberately allowing for the possibility of negative age @@ -37,36 +42,35 @@ pub fn add_age(&mut self) -> Result { let then: i128 = self.origin_server_ts.into(); let this_age = now.saturating_sub(then); - unsigned.insert("age".to_owned(), to_raw_value(&this_age).expect("age is valid")); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + unsigned.insert("age", to_raw_value(&this_age)?); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } #[implement(Pdu)] -pub fn add_relation(&mut self, name: &str, pdu: &Pdu) -> Result { - let mut unsigned: BTreeMap = self +pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { + use serde_json::Map; + + let mut unsigned: Map = self .unsigned - .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - let relations: &mut JsonValue = unsigned.entry("m.relations".into()).or_default(); - if relations.as_object_mut().is_none() { - let mut object = serde_json::Map::::new(); - _ = relations.as_object_mut().insert(&mut object); - } + let pdu = pdu + .map(serde_json::to_value) + .transpose()? + .unwrap_or_else(|| JsonValue::Object(Map::new())); - relations + unsigned + .entry("m.relations") + .or_insert(JsonValue::Object(Map::new())) .as_object_mut() - .expect("we just created it") - .insert(name.to_owned(), serde_json::to_value(pdu)?); + .map(|object| object.insert(name.to_owned(), pdu)); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } diff --git a/src/core/matrix/state_res/LICENSE b/src/core/matrix/state_res/LICENSE new file mode 100644 index 00000000..c103a044 --- /dev/null +++ b/src/core/matrix/state_res/LICENSE @@ -0,0 +1,17 @@ +//! Permission is hereby granted, free of charge, to any person obtaining a copy +//! of this software and associated documentation files (the "Software"), to +//! deal in the Software without restriction, including without limitation the +//! rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +//! sell copies of the Software, and to permit persons to whom the Software is +//! furnished to do so, subject to the following conditions: + +//! The above copyright notice and this permission notice shall be included in +//! all copies or substantial portions of the Software. + +//! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +//! FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +//! IN THE SOFTWARE. diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs new file mode 100644 index 00000000..01218b01 --- /dev/null +++ b/src/core/matrix/state_res/benches.rs @@ -0,0 +1,669 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::atomic::{AtomicU64, Ordering::SeqCst}, +}; + +use futures::{future, future::ready}; +use maplit::{btreemap, hashmap, hashset}; +use ruma::{ + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, Signatures, UserId, + events::{ + StateEventType, TimelineEventType, + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, + int, room_id, uint, user_id, +}; +use serde_json::{ + json, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, +}; + +use self::event::PduEvent; +use crate::state_res::{self as state_res, Error, Event, Result, StateMap}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn lexico_topo_sort(c: &mut test::Bencher) { + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + c.iter(|| { + let _ = state_res::lexicographical_topological_sort(&graph, &|_| { + future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }); + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolution_shallow_auth_chain(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut store = TestStore(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, _) = store.set_up(); + + c.iter(|| async { + let ev_map = store.0.clone(); + let state_sets = [&state_at_bob, &state_at_charlie]; + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).clone()); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolve_deeper_event_set(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut inner = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + c.iter(|| async { + let state_sets = [&state_set_a, &state_set_b]; + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetch = |id: OwnedEventId| ready(inner.get(&id).clone()); + let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(_) => panic!("resolution failed during benchmarking"), + }; + }); +} + +//*///////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION DETAILS AHEAD +// +/////////////////////////////////////////////////////////////////////*/ +struct TestStore(HashMap); + +#[allow(unused)] +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result { + self.0 + .get(event_id) + .cloned() + .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) + } + + /// Returns the events that correspond to the `event_ids` sorted in the same + /// order. + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result> { + let mut events = vec![]; + for id in event_ids { + events.push(self.get_event(room_id, id)?); + } + Ok(events) + } + + /// Returns a Vec of the related auth events to the given `event`. + fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } + + /// Returns a vector representing the difference in auth chains of the given + /// `events`. + fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + let mut auth_chain_sets = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_event_ids(room_id, ids)? + .into_iter() + .collect::>(); + auth_chain_sets.push(chain); + } + + if let Some(first) = auth_chain_sets.first().cloned() { + let common = auth_chain_sets + .iter() + .skip(1) + .fold(first, |a, b| a.intersection(b).cloned().collect::>()); + + Ok(auth_chain_sets + .into_iter() + .flatten() + .filter(|id| !common.contains(id.borrow())) + .collect()) + } else { + Ok(vec![]) + } + } +} + +impl TestStore { + #[allow(clippy::type_complexity)] + fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), create_event.clone()); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + format!("${}:foo", id).try_into().unwrap() +} + +fn alice() -> &'static UserId { user_id!("@alice:foo") } + +fn bob() -> &'static UserId { user_id!("@bob:foo") } + +fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +fn ella() -> &'static UserId { user_id!("@ella:foo") } + +fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> PduEvent +where + S: AsRef, +{ + // We don't care if the addition happens in order just that it is atomic + // (each event has its own value) + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${}:foo", id) + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: btreemap! {}, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new(String::new()), + signatures: Signatures::new(), + }), + } +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn INITIAL_EVENTS() -> HashMap { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn BAN_STATE_SET() -> HashMap { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +/// Convenience trait for adding event type plus state key to state maps. +trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +mod event { + use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, + }; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use super::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/matrix/state_res/error.rs b/src/core/matrix/state_res/error.rs new file mode 100644 index 00000000..7711d878 --- /dev/null +++ b/src/core/matrix/state_res/error.rs @@ -0,0 +1,23 @@ +use serde_json::Error as JsonError; +use thiserror::Error; + +/// Represents the various errors that arise when resolving state. +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum Error { + /// A deserialization error. + #[error(transparent)] + SerdeJson(#[from] JsonError), + + /// The given option or version is unsupported. + #[error("Unsupported room version: {0}")] + Unsupported(String), + + /// The given event was not found. + #[error("Not found error: {0}")] + NotFound(String), + + /// Invalid fields in the given PDU. + #[error("Invalid PDU: {0}")] + InvalidPdu(String), +} diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs new file mode 100644 index 00000000..c69db50e --- /dev/null +++ b/src/core/matrix/state_res/event_auth.rs @@ -0,0 +1,1453 @@ +use std::{borrow::Borrow, collections::BTreeSet}; + +use futures::{ + Future, + future::{OptionFuture, join3}, +}; +use ruma::{ + Int, OwnedUserId, RoomVersionId, UserId, + events::room::{ + create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, ThirdPartyInvite}, + power_levels::RoomPowerLevelsEventContent, + third_party_invite::RoomThirdPartyInviteEventContent, + }, + int, + serde::{Base64, Raw}, +}; +use serde::{ + Deserialize, + de::{Error as _, IgnoredAny}, +}; +use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; + +use super::{ + Error, Event, Result, StateEventType, StateKey, TimelineEventType, + power_levels::{ + deserialize_power_levels, deserialize_power_levels_content_fields, + deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, + }, + room_version::RoomVersion, +}; +use crate::{debug, error, trace, warn}; + +// FIXME: field extracting could be bundled for `content` +#[derive(Deserialize)] +struct GetMembership { + membership: MembershipState, +} + +#[derive(Deserialize, Debug)] +struct RoomMemberContentFields { + membership: Option>, + join_authorised_via_users_server: Option>, +} + +/// For the given event `kind` what are the relevant auth events that are needed +/// to authenticate this `content`. +/// +/// # Errors +/// +/// This function will return an error if the supplied `content` is not a JSON +/// object. +pub fn auth_types_for_event( + kind: &TimelineEventType, + sender: &UserId, + state_key: Option<&str>, + content: &RawJsonValue, +) -> serde_json::Result> { + if kind == &TimelineEventType::RoomCreate { + return Ok(vec![]); + } + + let mut auth_types = vec![ + (StateEventType::RoomPowerLevels, StateKey::new()), + (StateEventType::RoomMember, sender.as_str().into()), + (StateEventType::RoomCreate, StateKey::new()), + ]; + + if kind == &TimelineEventType::RoomMember { + #[derive(Deserialize)] + struct RoomMemberContentFields { + membership: Option>, + third_party_invite: Option>, + join_authorised_via_users_server: Option>, + } + + if let Some(state_key) = state_key { + let content: RoomMemberContentFields = from_json_str(content.get())?; + + if let Some(Ok(membership)) = content.membership.map(|m| m.deserialize()) { + if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] + .contains(&membership) + { + let key = (StateEventType::RoomJoinRules, StateKey::new()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if let Some(Ok(u)) = content + .join_authorised_via_users_server + .map(|m| m.deserialize()) + { + let key = (StateEventType::RoomMember, u.as_str().into()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + + let key = (StateEventType::RoomMember, state_key.into()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if membership == MembershipState::Invite { + if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { + let key = + (StateEventType::RoomThirdPartyInvite, t_id.signed.token.into()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + } + } + } + + Ok(auth_types) +} + +/// Authenticate the incoming `event`. +/// +/// The steps of authentication are: +/// +/// * check that the event is being authenticated for the correct room +/// * then there are checks for specific event types +/// +/// The `fetch_state` closure should gather state from a state snapshot. We need +/// to know if the event passes auth against some state not a recursive +/// collection of auth_events fields. +#[tracing::instrument( + level = "debug", + skip_all, + fields( + event_id = incoming_event.event_id().borrow().as_str() + ) +)] +pub async fn auth_check( + room_version: &RoomVersion, + incoming_event: &Incoming, + current_third_party_invite: Option<&Incoming>, + fetch_state: F, +) -> Result +where + F: Fn(&StateEventType, &str) -> Fut + Send, + Fut: Future> + Send, + Fetched: Event + Send, + Incoming: Event + Send + Sync, +{ + debug!( + event_id = format!("{}", incoming_event.event_id()), + event_type = format!("{}", incoming_event.event_type()), + "auth_check beginning" + ); + + // [synapse] check that all the events are in the same room as `incoming_event` + + // [synapse] do_sig_check check the event has valid signatures for member events + + // TODO do_size_check is false when called by `iterative_auth_check` + // do_size_check is also mostly accomplished by ruma with the exception of + // checking event_type, state_key, and json are below a certain size (255 and + // 65_536 respectively) + + let sender = incoming_event.sender(); + + // Implementation of https://spec.matrix.org/latest/rooms/v1/#authorization-rules + // + // 1. If type is m.room.create: + if *incoming_event.event_type() == TimelineEventType::RoomCreate { + #[derive(Deserialize)] + struct RoomCreateContentFields { + room_version: Option>, + creator: Option>, + } + + debug!("start m.room.create check"); + + // If it has any previous events, reject + if incoming_event.prev_events().next().is_some() { + warn!("the room creation event had previous events"); + return Ok(false); + } + + // If the domain of the room_id does not match the domain of the sender, reject + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of sender"); + return Ok(false); + } + + // If content.room_version is present and is not a recognized version, reject + let content: RoomCreateContentFields = from_json_str(incoming_event.content().get())?; + if content + .room_version + .is_some_and(|v| v.deserialize().is_err()) + { + warn!("invalid room version found in m.room.create event"); + return Ok(false); + } + + if !room_version.use_room_create_sender { + // If content has no creator field, reject + if content.creator.is_none() { + warn!("no creator field found in m.room.create content"); + return Ok(false); + } + } + + debug!("m.room.create event was allowed"); + return Ok(true); + } + + /* + // TODO: In the past this code caused problems federating with synapse, maybe this has been + // resolved already. Needs testing. + // + // 2. Reject if auth_events + // a. auth_events cannot have duplicate keys since it's a BTree + // b. All entries are valid auth events according to spec + let expected_auth = auth_types_for_event( + incoming_event.kind, + sender, + incoming_event.state_key, + incoming_event.content().clone(), + ); + + dbg!(&expected_auth); + + for ev_key in auth_events.keys() { + // (b) + if !expected_auth.contains(ev_key) { + warn!("auth_events contained invalid auth event"); + return Ok(false); + } + } + */ + + let (room_create_event, power_levels_event, sender_member_event) = join3( + fetch_state(&StateEventType::RoomCreate, ""), + fetch_state(&StateEventType::RoomPowerLevels, ""), + fetch_state(&StateEventType::RoomMember, sender.as_str()), + ) + .await; + + let room_create_event = match room_create_event { + | None => { + warn!("no m.room.create event in auth chain"); + return Ok(false); + }, + | Some(e) => e, + }; + + // 3. If event does not have m.room.create in auth_events reject + if !incoming_event + .auth_events() + .any(|id| id.borrow() == room_create_event.event_id().borrow()) + { + warn!("no m.room.create event in auth events"); + return Ok(false); + } + + // If the create event content has the field m.federate set to false and the + // sender domain of the event does not match the sender domain of the create + // event, reject. + #[derive(Deserialize)] + #[allow(clippy::items_after_statements)] + struct RoomCreateContentFederate { + #[serde(rename = "m.federate", default = "ruma::serde::default_true")] + federate: bool, + } + let room_create_content: RoomCreateContentFederate = + from_json_str(room_create_event.content().get())?; + if !room_create_content.federate + && room_create_event.sender().server_name() != incoming_event.sender().server_name() + { + warn!( + "room is not federated and event's sender domain does not match create event's \ + sender domain" + ); + return Ok(false); + } + + // Only in some room versions 6 and below + if room_version.special_case_aliases_auth { + // 4. If type is m.room.aliases + if *incoming_event.event_type() == TimelineEventType::RoomAliases { + debug!("starting m.room.aliases check"); + + // If sender's domain doesn't matches state_key, reject + if incoming_event.state_key() != Some(sender.server_name().as_str()) { + warn!("state_key does not match sender"); + return Ok(false); + } + + debug!("m.room.aliases event was allowed"); + return Ok(true); + } + } + + // If type is m.room.member + if *incoming_event.event_type() == TimelineEventType::RoomMember { + debug!("starting m.room.member check"); + let state_key = match incoming_event.state_key() { + | None => { + warn!("no statekey in member event"); + return Ok(false); + }, + | Some(s) => s, + }; + + let content: RoomMemberContentFields = from_json_str(incoming_event.content().get())?; + if content + .membership + .as_ref() + .and_then(|m| m.deserialize().ok()) + .is_none() + { + warn!("no valid membership field found for m.room.member event content"); + return Ok(false); + } + + let target_user = + <&UserId>::try_from(state_key).map_err(|e| Error::InvalidPdu(format!("{e}")))?; + + let user_for_join_auth = content + .join_authorised_via_users_server + .as_ref() + .and_then(|u| u.deserialize().ok()); + + let user_for_join_auth_event: OptionFuture<_> = user_for_join_auth + .as_ref() + .map(|auth_user| fetch_state(&StateEventType::RoomMember, auth_user.as_str())) + .into(); + + let target_user_member_event = + fetch_state(&StateEventType::RoomMember, target_user.as_str()); + + let join_rules_event = fetch_state(&StateEventType::RoomJoinRules, ""); + + let (join_rules_event, target_user_member_event, user_for_join_auth_event) = + join3(join_rules_event, target_user_member_event, user_for_join_auth_event).await; + + let user_for_join_auth_membership = user_for_join_auth_event + .and_then(|mem| from_json_str::(mem?.content().get()).ok()) + .map_or(MembershipState::Leave, |mem| mem.membership); + + if !valid_membership_change( + room_version, + target_user, + target_user_member_event.as_ref(), + sender, + sender_member_event.as_ref(), + incoming_event, + current_third_party_invite, + power_levels_event.as_ref(), + join_rules_event.as_ref(), + user_for_join_auth.as_deref(), + &user_for_join_auth_membership, + &room_create_event, + )? { + return Ok(false); + } + + debug!("m.room.member event was allowed"); + return Ok(true); + } + + // If the sender's current membership state is not join, reject + #[allow(clippy::manual_let_else)] + let sender_member_event = match sender_member_event { + | Some(mem) => mem, + | None => { + warn!("sender not found in room"); + return Ok(false); + }, + }; + + let sender_membership_event_content: RoomMemberContentFields = + from_json_str(sender_member_event.content().get())?; + let Some(membership_state) = sender_membership_event_content.membership else { + warn!( + sender_membership_event_content = format!("{sender_membership_event_content:?}"), + event_id = format!("{}", incoming_event.event_id()), + "Sender membership event content missing membership field" + ); + return Err(Error::InvalidPdu("Missing membership field".to_owned())); + }; + let membership_state = membership_state.deserialize()?; + + if !matches!(membership_state, MembershipState::Join) { + warn!("sender's membership is not join"); + return Ok(false); + } + + // If type is m.room.third_party_invite + let sender_power_level = match &power_levels_event { + | Some(pl) => { + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + match content.get_user_power(sender) { + | Some(level) => *level, + | _ => content.users_default, + } + }, + | _ => { + // If no power level event found the creator gets 100 everyone else gets 0 + let is_creator = if room_version.use_room_create_sender { + room_create_event.sender() == sender + } else { + #[allow(deprecated)] + from_json_str::(room_create_event.content().get()) + .is_ok_and(|create| create.creator.unwrap() == *sender) + }; + + if is_creator { int!(100) } else { int!(0) } + }, + }; + + // Allow if and only if sender's current power level is greater than + // or equal to the invite level + if *incoming_event.event_type() == TimelineEventType::RoomThirdPartyInvite { + let invite_level = match &power_levels_event { + | Some(power_levels) => + deserialize_power_levels_content_invite( + power_levels.content().get(), + room_version, + )? + .invite, + | None => int!(0), + }; + + if sender_power_level < invite_level { + warn!("sender's cannot send invites in this room"); + return Ok(false); + } + + debug!("m.room.third_party_invite event was allowed"); + return Ok(true); + } + + // If the event type's required power level is greater than the sender's power + // level, reject If the event has a state_key that starts with an @ and does + // not match the sender, reject. + if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) { + warn!("user cannot send event"); + return Ok(false); + } + + // If type is m.room.power_levels + if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { + debug!("starting m.room.power_levels check"); + + match check_power_levels( + room_version, + incoming_event, + power_levels_event.as_ref(), + sender_power_level, + ) { + | Some(required_pwr_lvl) => + if !required_pwr_lvl { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + }, + | _ => { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + }, + } + debug!("m.room.power_levels event allowed"); + } + + // Room version 3: Redaction events are always accepted (provided the event is + // allowed by `events` and `events_default` in the power levels). However, + // servers should not apply or send redaction's to clients until both the + // redaction event and original event have been seen, and are valid. Servers + // should only apply redaction's to events where the sender's domains match, or + // the sender of the redaction has the appropriate permissions per the + // power levels. + + if room_version.extra_redaction_checks + && *incoming_event.event_type() == TimelineEventType::RoomRedaction + { + let redact_level = match power_levels_event { + | Some(pl) => + deserialize_power_levels_content_redact(pl.content().get(), room_version)?.redact, + | None => int!(50), + }; + + if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? { + return Ok(false); + } + } + + debug!("allowing event passed all checks"); + Ok(true) +} + +// TODO deserializing the member, power, join_rules event contents is done in +// conduit just before this is called. Could they be passed in? +/// Does the user who sent this member event have required power levels to do +/// so. +/// +/// * `user` - Information about the membership event and user making the +/// request. +/// * `auth_events` - The set of auth events that relate to a membership event. +/// +/// This is generated by calling `auth_types_for_event` with the membership +/// event and the current State. +#[allow(clippy::too_many_arguments)] +#[allow(clippy::cognitive_complexity)] +fn valid_membership_change( + room_version: &RoomVersion, + target_user: &UserId, + target_user_membership_event: Option<&impl Event>, + sender: &UserId, + sender_membership_event: Option<&impl Event>, + current_event: impl Event, + current_third_party_invite: Option<&impl Event>, + power_levels_event: Option<&impl Event>, + join_rules_event: Option<&impl Event>, + user_for_join_auth: Option<&UserId>, + user_for_join_auth_membership: &MembershipState, + create_room: &impl Event, +) -> Result { + #[derive(Deserialize)] + struct GetThirdPartyInvite { + third_party_invite: Option>, + } + let content = current_event.content(); + + let target_membership = from_json_str::(content.get())?.membership; + let third_party_invite = + from_json_str::(content.get())?.third_party_invite; + + let sender_membership = match &sender_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + let sender_is_joined = sender_membership == MembershipState::Join; + + let target_user_current_membership = match &target_user_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + + let power_levels: RoomPowerLevelsEventContent = match &power_levels_event { + | Some(ev) => from_json_str(ev.content().get())?, + | None => RoomPowerLevelsEventContent::default(), + }; + + let sender_power = power_levels + .users + .get(sender) + .or_else(|| sender_is_joined.then_some(&power_levels.users_default)); + + let target_power = power_levels.users.get(target_user).or_else(|| { + (target_membership == MembershipState::Join).then_some(&power_levels.users_default) + }); + + let mut join_rules = JoinRule::Invite; + if let Some(jr) = &join_rules_event { + join_rules = from_json_str::(jr.content().get())?.join_rule; + } + + let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id); + let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id); + let target_user_membership_event_id = + target_user_membership_event.as_ref().map(Event::event_id); + + let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth { + // Is the authorised user allowed to invite users into this room + let (auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event { + // TODO Refactor all powerlevel parsing + let invite = + deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite; + + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + let user_pl = match content.get_user_power(user_for_join_auth) { + | Some(level) => *level, + | _ => content.users_default, + }; + + (user_pl, invite) + } else { + (int!(0), int!(0)) + }; + (user_for_join_auth_membership == &MembershipState::Join) + && (auth_user_pl >= invite_level) + } else { + // No auth user was given + false + }; + + Ok(match target_membership { + | MembershipState::Join => { + // 1. If the only previous event is an m.room.create and the state_key is the + // creator, + // allow + let mut prev_events = current_event.prev_events(); + + let prev_event_is_create_event = prev_events + .next() + .is_some_and(|event_id| event_id.borrow() == create_room.event_id().borrow()); + let no_more_prev_events = prev_events.next().is_none(); + + if prev_event_is_create_event && no_more_prev_events { + let is_creator = if room_version.use_room_create_sender { + let creator = create_room.sender(); + + creator == sender && creator == target_user + } else { + #[allow(deprecated)] + let creator = from_json_str::(create_room.content().get())? + .creator + .ok_or_else(|| serde_json::Error::missing_field("creator"))?; + + creator == sender && creator == target_user + }; + + if is_creator { + return Ok(true); + } + } + + if sender != target_user { + // If the sender does not match state_key, reject. + warn!("Can't make other user join"); + false + } else if target_user_current_membership == MembershipState::Ban { + // If the sender is banned, reject. + warn!(?target_user_membership_event_id, "Banned user can't join"); + false + } else if (join_rules == JoinRule::Invite + || room_version.allow_knocking && join_rules == JoinRule::Knock) + // If the join_rule is invite then allow if membership state is invite or join + && (target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite) + { + true + } else if room_version.restricted_join_rules + && matches!(join_rules, JoinRule::Restricted(_)) + || room_version.knock_restricted_join_rule + && matches!(join_rules, JoinRule::KnockRestricted(_)) + { + // If the join_rule is restricted or knock_restricted + if matches!( + target_user_current_membership, + MembershipState::Invite | MembershipState::Join + ) { + // If membership state is join or invite, allow. + true + } else { + // If the join_authorised_via_users_server key in content is not a user with + // sufficient permission to invite other users, reject. + // Otherwise, allow. + user_for_join_auth_is_valid + } + } else { + // If the join_rule is public, allow. + // Otherwise, reject. + join_rules == JoinRule::Public + } + }, + | MembershipState::Invite => { + // If content has third_party_invite key + match third_party_invite.and_then(|i| i.deserialize().ok()) { + | Some(tp_id) => + if target_user_current_membership == MembershipState::Ban { + warn!(?target_user_membership_event_id, "Can't invite banned user"); + false + } else { + let allow = verify_third_party_invite( + Some(target_user), + sender, + &tp_id, + current_third_party_invite, + ); + if !allow { + warn!("Third party invite invalid"); + } + allow + }, + | _ => { + if !sender_is_joined + || target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Ban + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't invite user if sender not joined or the user is currently \ + joined or banned", + ); + false + } else { + let allow = sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some(); + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to invite", + ); + } + allow + } + }, + } + }, + | MembershipState::Leave => + if sender == target_user { + let allow = target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite + || target_user_current_membership == MembershipState::Knock; + if !allow { + warn!( + ?target_user_membership_event_id, + ?target_user_current_membership, + "Can't leave if sender is not already invited, knocked, or joined" + ); + } + allow + } else if !sender_is_joined + || target_user_current_membership == MembershipState::Ban + && sender_power.filter(|&p| p < &power_levels.ban).is_some() + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't kick if sender not joined or user is already banned", + ); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.kick).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to kick", + ); + } + allow + }, + | MembershipState::Ban => + if !sender_is_joined { + warn!(?sender_membership_event_id, "Can't ban user if sender is not joined"); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.ban).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to ban", + ); + } + allow + }, + | MembershipState::Knock if room_version.allow_knocking => { + // 1. If the `join_rule` is anything other than `knock` or `knock_restricted`, + // reject. + if !matches!(join_rules, JoinRule::KnockRestricted(_) | JoinRule::Knock) { + warn!( + "Join rule is not set to knock or knock_restricted, knocking is not allowed" + ); + false + } else if matches!(join_rules, JoinRule::KnockRestricted(_)) + && !room_version.knock_restricted_join_rule + { + // 2. If the `join_rule` is `knock_restricted`, but the room does not support + // `knock_restricted`, reject. + warn!( + "Join rule is set to knock_restricted but room version does not support \ + knock_restricted, knocking is not allowed" + ); + false + } else if sender != target_user { + // 3. If `sender` does not match `state_key`, reject. + warn!( + ?sender, + ?target_user, + "Can't make another user knock, sender did not match target" + ); + false + } else if matches!( + sender_membership, + MembershipState::Ban | MembershipState::Invite | MembershipState::Join + ) { + // 4. If the `sender`'s current membership is not `ban`, `invite`, or `join`, + // allow. + // 5. Otherwise, reject. + warn!( + ?target_user_membership_event_id, + "Knocking with a membership state of ban, invite or join is invalid", + ); + false + } else { + true + } + }, + | _ => { + warn!("Unknown membership transition"); + false + }, + }) +} + +/// Is the user allowed to send a specific event based on the rooms power +/// levels. +/// +/// Does the event have the correct userId as its state_key if it's not the "" +/// state_key. +fn can_send_event(event: impl Event, ple: Option, user_level: Int) -> bool { + let event_type_power_level = get_send_level(event.event_type(), event.state_key(), ple); + + debug!( + required_level = i64::from(event_type_power_level), + user_level = i64::from(user_level), + state_key = ?event.state_key(), + "permissions factors", + ); + + if user_level < event_type_power_level { + return false; + } + + if event.state_key().is_some_and(|k| k.starts_with('@')) + && event.state_key() != Some(event.sender().as_str()) + { + return false; // permission required to post in this room + } + + true +} + +/// Confirm that the event sender has the required power levels. +fn check_power_levels( + room_version: &RoomVersion, + power_event: impl Event, + previous_power_event: Option, + user_level: Int, +) -> Option { + match power_event.state_key() { + | Some("") => {}, + | Some(key) => { + error!(state_key = key, "m.room.power_levels event has non-empty state key"); + return None; + }, + | None => { + error!("check_power_levels requires an m.room.power_levels *state* event argument"); + return None; + }, + } + + // - If any of the keys users_default, events_default, state_default, ban, + // redact, kick, or invite in content are present and not an integer, reject. + // - If either of the keys events or notifications in content are present and + // not a dictionary with values that are integers, reject. + // - If users key in content is not a dictionary with keys that are valid user + // IDs with values that are integers, reject. + let user_content: RoomPowerLevelsEventContent = + deserialize_power_levels(power_event.content().get(), room_version)?; + + // Validation of users is done in Ruma, synapse for loops validating user_ids + // and integers here + debug!("validation of power event finished"); + + #[allow(clippy::manual_let_else)] + let current_state = match previous_power_event { + | Some(current_state) => current_state, + // If there is no previous m.room.power_levels event in the room, allow + | None => return Some(true), + }; + + let current_content: RoomPowerLevelsEventContent = + deserialize_power_levels(current_state.content().get(), room_version)?; + + let mut user_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.users; + let user_list = &user_content.users; + for user in old_list.keys().chain(user_list.keys()) { + let user: &UserId = user; + user_levels_to_check.insert(user); + } + + trace!(set = ?user_levels_to_check, "user levels to check"); + + let mut event_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.events; + let new_list = &user_content.events; + for ev_id in old_list.keys().chain(new_list.keys()) { + event_levels_to_check.insert(ev_id); + } + + trace!(set = ?event_levels_to_check, "event levels to check"); + + let old_state = ¤t_content; + let new_state = &user_content; + + // synapse does not have to split up these checks since we can't combine UserIds + // and EventTypes we do 2 loops + + // UserId loop + for user in user_levels_to_check { + let old_level = old_state.users.get(user); + let new_level = new_state.users.get(user); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is equal to the sender's current power level, reject + if user != power_event.sender() && old_level == Some(&user_level) { + warn!("m.room.power_level cannot remove ops == to own"); + return Some(false); // cannot remove ops level == to own + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // EventType loop + for ev_type in event_levels_to_check { + let old_level = old_state.events.get(ev_type); + let new_level = new_state.events.get(ev_type); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // Notifications, currently there is only @room + if room_version.limit_notifications_power_levels { + let old_level = old_state.notifications.room; + let new_level = new_state.notifications.room; + if old_level != new_level { + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > user_level; + let new_level_too_big = new_level > user_level; + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + } + + let levels = [ + "users_default", + "events_default", + "state_default", + "ban", + "redact", + "kick", + "invite", + ]; + let old_state = serde_json::to_value(old_state).unwrap(); + let new_state = serde_json::to_value(new_state).unwrap(); + for lvl_name in &levels { + if let Some((old_lvl, new_lvl)) = get_deserialize_levels(&old_state, &new_state, lvl_name) + { + let old_level_too_big = old_lvl > user_level; + let new_level_too_big = new_lvl > user_level; + + if old_level_too_big || new_level_too_big { + warn!("cannot add ops > than own"); + return Some(false); + } + } + } + + Some(true) +} + +fn get_deserialize_levels( + old: &serde_json::Value, + new: &serde_json::Value, + name: &str, +) -> Option<(Int, Int)> { + Some(( + serde_json::from_value(old.get(name)?.clone()).ok()?, + serde_json::from_value(new.get(name)?.clone()).ok()?, + )) +} + +/// Does the event redacting come from a user with enough power to redact the +/// given event. +fn check_redaction( + _room_version: &RoomVersion, + redaction_event: impl Event, + user_level: Int, + redact_level: Int, +) -> Result { + if user_level >= redact_level { + debug!("redaction allowed via power levels"); + return Ok(true); + } + + // If the domain of the event_id of the event being redacted is the same as the + // domain of the event_id of the m.room.redaction, allow + if redaction_event.event_id().borrow().server_name() + == redaction_event + .redacts() + .as_ref() + .and_then(|&id| id.borrow().server_name()) + { + debug!("redaction event allowed via room version 1 rules"); + return Ok(true); + } + + Ok(false) +} + +/// Helper function to fetch the power level needed to send an event of type +/// `e_type` based on the rooms "m.room.power_level" event. +fn get_send_level( + e_type: &TimelineEventType, + state_key: Option<&str>, + power_lvl: Option, +) -> Int { + power_lvl + .and_then(|ple| { + from_json_str::(ple.content().get()) + .map(|content| { + content.events.get(e_type).copied().unwrap_or_else(|| { + if state_key.is_some() { + content.state_default + } else { + content.events_default + } + }) + }) + .ok() + }) + .unwrap_or_else(|| if state_key.is_some() { int!(50) } else { int!(0) }) +} + +fn verify_third_party_invite( + target_user: Option<&UserId>, + sender: &UserId, + tp_id: &ThirdPartyInvite, + current_third_party_invite: Option, +) -> bool { + // 1. Check for user being banned happens before this is called + // checking for mxid and token keys is done by ruma when deserializing + + // The state key must match the invitee + if target_user != Some(&tp_id.signed.mxid) { + return false; + } + + // If there is no m.room.third_party_invite event in the current room state with + // state_key matching token, reject + #[allow(clippy::manual_let_else)] + let current_tpid = match current_third_party_invite { + | Some(id) => id, + | None => return false, + }; + + if current_tpid.state_key() != Some(&tp_id.signed.token) { + return false; + } + + if sender != current_tpid.sender() { + return false; + } + + // If any signature in signed matches any public key in the + // m.room.third_party_invite event, allow + #[allow(clippy::manual_let_else)] + let tpid_ev = + match from_json_str::(current_tpid.content().get()) { + | Ok(ev) => ev, + | Err(_) => return false, + }; + + #[allow(clippy::manual_let_else)] + let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { + | Ok(tok) => tok, + // FIXME: Log a warning? + | Err(_) => return false, + }; + + // A list of public keys in the public_keys field + for key in tpid_ev.public_keys.unwrap_or_default() { + if key.public_key == decoded_invite_token { + return true; + } + } + + // A single public key in the public_key field + tpid_ev.public_key == decoded_invite_token +} + +#[cfg(test)] +mod tests { + use ruma::events::{ + StateEventType, TimelineEventType, + room::{ + join_rules::{ + AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, + }, + member::{MembershipState, RoomMemberEventContent}, + }, + }; + use serde_json::value::to_raw_value as to_raw_json_value; + + use crate::state_res::{ + Event, EventTypeExt, RoomVersion, StateMap, + event_auth::valid_membership_change, + test_utils::{ + INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, PduEvent, alice, charlie, ella, event_id, + member_content_ban, member_content_join, room_id, to_pdu_event, + }, + }; + + #[test] + fn test_ban_pass() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = alice(); + + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); + } + + #[test] + fn test_join_non_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = charlie(); + + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); + } + + #[test] + fn test_join_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = alice(); + + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); + } + + #[test] + fn test_ban_fail() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = charlie(); + + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); + } + + #[test] + fn test_restricted_join_rule() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Restricted( + Restricted::new(vec![AllowRule::RoomMembership(RoomMembership::new( + room_id().to_owned(), + ))]), + ))) + .unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let mut member = RoomMemberEventContent::new(MembershipState::Join); + member.join_authorized_via_users_server = Some(alice().to_owned()); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap(), + &["CREATE", "IJR", "IPOWER", "new"], + &["new"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!( + valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(alice()), + &MembershipState::Join, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); + + assert!( + !valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(ella()), + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); + } + + #[test] + fn test_knock() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Knock)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Knock)).unwrap(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!( + valid_membership_change( + &RoomVersion::V7, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); + } +} diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs new file mode 100644 index 00000000..2ab7cb64 --- /dev/null +++ b/src/core/matrix/state_res/mod.rs @@ -0,0 +1,1654 @@ +#![cfg_attr(test, allow(warnings))] + +pub(crate) mod error; +pub mod event_auth; +mod power_levels; +mod room_version; + +#[cfg(test)] +mod test_utils; + +#[cfg(test)] +mod benches; + +use std::{ + borrow::Borrow, + cmp::{Ordering, Reverse}, + collections::{BinaryHeap, HashMap, HashSet}, + hash::{BuildHasher, Hash}, +}; + +use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future}; +use ruma::{ + EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, + events::{ + StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, + int, +}; +use serde_json::from_str as from_json_str; + +pub(crate) use self::error::Error; +use self::power_levels::PowerLevelsContentFields; +pub use self::{ + event_auth::{auth_check, auth_types_for_event}, + room_version::RoomVersion, +}; +use crate::{ + debug, debug_error, + matrix::{event::Event, pdu::StateKey}, + trace, + utils::stream::{ + BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt, + }, + warn, +}; + +/// A mapping of event type and state_key to some value `T`, usually an +/// `EventId`. +pub type StateMap = HashMap; +pub type StateMapItem = (TypeStateKey, T); +pub type TypeStateKey = (StateEventType, StateKey); + +type Result = crate::Result; + +/// Resolve sets of state events as they come in. +/// +/// Internally `StateResolution` builds a graph and an auth chain to allow for +/// state conflict resolution. +/// +/// ## Arguments +/// +/// * `state_sets` - The incoming state to resolve. Each `StateMap` represents a +/// possible fork in the state of a room. +/// +/// * `auth_chain_sets` - The full recursive set of `auth_events` for each event +/// in the `state_sets`. +/// +/// * `event_fetch` - Any event not found in the `event_map` will defer to this +/// closure to find the event. +/// +/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight +/// for any given operation. +/// +/// ## Invariants +/// +/// The caller of `resolve` must ensure that all the events are from the same +/// room. Although this function takes a `RoomId` it does not check that each +/// event is part of the same room. +//#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, +//#[tracing::instrument(level event_fetch))] +pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( + room_version: &RoomVersionId, + state_sets: Sets, + auth_chain_sets: &'a [HashSet], + event_fetch: &Fetch, + event_exists: &Exists, + parallel_fetches: usize, +) -> Result> +where + Fetch: Fn(E::Id) -> FetchFut + Sync, + FetchFut: Future> + Send, + Exists: Fn(E::Id) -> ExistsFut + Sync, + ExistsFut: Future + Send, + Sets: IntoIterator + Send, + SetIter: Iterator> + Clone + Send, + Hasher: BuildHasher + Send + Sync, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Send + Sync, + for<'b> &'b E: Send, +{ + debug!("State resolution starting"); + + // Split non-conflicting and conflicting state + let (clean, conflicting) = separate(state_sets.into_iter()); + + debug!(count = clean.len(), "non-conflicting events"); + trace!(map = ?clean, "non-conflicting events"); + + if conflicting.is_empty() { + debug!("no conflicting state found"); + return Ok(clean); + } + + debug!(count = conflicting.len(), "conflicting events"); + trace!(map = ?conflicting, "conflicting events"); + + let conflicting_values = conflicting.into_values().flatten().stream(); + + // `all_conflicted` contains unique items + // synapse says `full_set = {eid for eid in full_conflicted_set if eid in + // event_map}` + let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets) + .chain(conflicting_values) + .broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id)) + .collect() + .await; + + debug!(count = all_conflicted.len(), "full conflicted set"); + trace!(set = ?all_conflicted, "full conflicted set"); + + // We used to check that all events are events from the correct room + // this is now a check the caller of `resolve` must make. + + // Get only the control events with a state_key: "" or ban/kick event (sender != + // state_key) + let control_events: Vec<_> = all_conflicted + .iter() + .stream() + .wide_filter_map(async |id| { + is_power_event_id(id, &event_fetch) + .await + .then_some(id.clone()) + }) + .collect() + .await; + + // Sort the control events based on power_level/clock/event_id and + // outgoing/incoming edges + let sorted_control_levels = reverse_topological_power_sort( + control_events, + &all_conflicted, + &event_fetch, + parallel_fetches, + ) + .await?; + + debug!(count = sorted_control_levels.len(), "power events"); + trace!(list = ?sorted_control_levels, "sorted power events"); + + let room_version = RoomVersion::new(room_version)?; + // Sequentially auth check each control event. + let resolved_control = iterative_auth_check( + &room_version, + sorted_control_levels.iter().stream(), + clean.clone(), + &event_fetch, + ) + .await?; + + debug!(count = resolved_control.len(), "resolved power events"); + trace!(map = ?resolved_control, "resolved power events"); + + // At this point the control_events have been resolved we now have to + // sort the remaining events using the mainline of the resolved power level. + let deduped_power_ev: HashSet<_> = sorted_control_levels.into_iter().collect(); + + // This removes the control events that passed auth and more importantly those + // that failed auth + let events_to_resolve: Vec<_> = all_conflicted + .iter() + .filter(|&id| !deduped_power_ev.contains(id.borrow())) + .cloned() + .collect(); + + debug!(count = events_to_resolve.len(), "events left to resolve"); + trace!(list = ?events_to_resolve, "events left to resolve"); + + // This "epochs" power level event + let power_levels_ty_sk = (StateEventType::RoomPowerLevels, StateKey::new()); + let power_event = resolved_control.get(&power_levels_ty_sk); + + debug!(event_id = ?power_event, "power event"); + + let sorted_left_events = + mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?; + + trace!(list = ?sorted_left_events, "events left, sorted"); + + let mut resolved_state = iterative_auth_check( + &room_version, + sorted_left_events.iter().stream(), + resolved_control, // The control events are added to the final resolved state + &event_fetch, + ) + .await?; + + // Add unconflicted state to the resolved state + // We priorities the unconflicting state + resolved_state.extend(clean); + + debug!("state resolution finished"); + + Ok(resolved_state) +} + +/// Split the events that have no conflicts from those that are conflicting. +/// +/// The return tuple looks like `(unconflicted, conflicted)`. +/// +/// State is determined to be conflicting if for the given key (StateEventType, +/// StateKey) there is not exactly one event ID. This includes missing events, +/// if one state_set includes an event that none of the other have this is a +/// conflicting event. +fn separate<'a, Id>( + state_sets_iter: impl Iterator>, +) -> (StateMap, StateMap>) +where + Id: Clone + Eq + Hash + 'a, +{ + let mut state_set_count: usize = 0; + let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); + + let state_sets_iter = + state_sets_iter.inspect(|_| state_set_count = state_set_count.saturating_add(1)); + for (k, v) in state_sets_iter.flatten() { + occurrences + .entry(k) + .or_default() + .entry(v) + .and_modify(|x: &mut usize| *x = x.saturating_add(1)) + .or_insert(1); + } + + let mut unconflicted_state = StateMap::new(); + let mut conflicted_state = StateMap::new(); + + for (k, v) in occurrences { + for (id, occurrence_count) in v { + if occurrence_count == state_set_count { + unconflicted_state.insert((k.0.clone(), k.1.clone()), id.clone()); + } else { + conflicted_state + .entry((k.0.clone(), k.1.clone())) + .and_modify(|x: &mut Vec<_>| x.push(id.clone())) + .or_insert_with(|| vec![id.clone()]); + } + } + } + + (unconflicted_state, conflicted_state) +} + +/// Returns a Vec of deduped EventIds that appear in some chains but not others. +#[allow(clippy::arithmetic_side_effects)] +fn get_auth_chain_diff( + auth_chain_sets: &[HashSet], +) -> impl Stream + Send + use +where + Id: Clone + Eq + Hash + Send, + Hasher: BuildHasher + Send + Sync, +{ + let num_sets = auth_chain_sets.len(); + let mut id_counts: HashMap = HashMap::new(); + for id in auth_chain_sets.iter().flatten() { + *id_counts.entry(id.clone()).or_default() += 1; + } + + id_counts + .into_iter() + .filter_map(move |(id, count)| (count < num_sets).then_some(id)) + .stream() +} + +/// Events are sorted from "earliest" to "latest". +/// +/// They are compared using the negative power level (reverse topological +/// ordering), the origin server timestamp and in case of a tie the `EventId`s +/// are compared lexicographically. +/// +/// The power level is negative because a higher power level is equated to an +/// earlier (further back in time) origin server timestamp. +#[tracing::instrument(level = "debug", skip_all)] +async fn reverse_topological_power_sort( + events_to_sort: Vec, + auth_diff: &HashSet, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send + Sync, + E::Id: Borrow + Send + Sync, +{ + debug!("reverse topological sort of power events"); + + let mut graph = HashMap::new(); + for event_id in events_to_sort { + add_event_and_auth_chain_to_graph(&mut graph, event_id, auth_diff, fetch_event).await; + } + + // This is used in the `key_fn` passed to the lexico_topo_sort fn + let event_to_pl = graph + .keys() + .stream() + .map(|event_id| { + get_power_level_for_sender(event_id.clone(), fetch_event) + .map(move |res| res.map(|pl| (event_id, pl))) + }) + .buffer_unordered(parallel_fetches) + .ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + debug!( + event_id = event_id.borrow().as_str(), + power_level = i64::from(pl), + "found the power level of an event's sender", + ); + + event_to_pl.insert(event_id.clone(), pl); + Ok(event_to_pl) + }) + .boxed() + .await?; + + let event_to_pl = &event_to_pl; + let fetcher = |event_id: E::Id| async move { + let pl = *event_to_pl + .get(event_id.borrow()) + .ok_or_else(|| Error::NotFound(String::new()))?; + let ev = fetch_event(event_id) + .await + .ok_or_else(|| Error::NotFound(String::new()))?; + Ok((pl, ev.origin_server_ts())) + }; + + lexicographical_topological_sort(&graph, &fetcher).await +} + +/// Sorts the event graph based on number of outgoing/incoming edges. +/// +/// `key_fn` is used as to obtain the power level and age of an event for +/// breaking ties (together with the event ID). +#[tracing::instrument(level = "debug", skip_all)] +pub async fn lexicographical_topological_sort( + graph: &HashMap>, + key_fn: &F, +) -> Result> +where + F: Fn(Id) -> Fut + Sync, + Fut: Future> + Send, + Id: Borrow + Clone + Eq + Hash + Ord + Send + Sync, + Hasher: BuildHasher + Default + Clone + Send + Sync, +{ + #[derive(PartialEq, Eq)] + struct TieBreaker<'a, Id> { + power_level: Int, + origin_server_ts: MilliSecondsSinceUnixEpoch, + event_id: &'a Id, + } + + impl Ord for TieBreaker<'_, Id> + where + Id: Ord, + { + fn cmp(&self, other: &Self) -> Ordering { + // NOTE: the power level comparison is "backwards" intentionally. + // See the "Mainline ordering" section of the Matrix specification + // around where it says the following: + // + // > for events `x` and `y`, `x < y` if [...] + // + // + other + .power_level + .cmp(&self.power_level) + .then(self.origin_server_ts.cmp(&other.origin_server_ts)) + .then(self.event_id.cmp(other.event_id)) + } + } + + impl PartialOrd for TieBreaker<'_, Id> + where + Id: Ord, + { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } + } + + debug!("starting lexicographical topological sort"); + + // NOTE: an event that has no incoming edges happened most recently, + // and an event that has no outgoing edges happened least recently. + + // NOTE: this is basically Kahn's algorithm except we look at nodes with no + // outgoing edges, c.f. + // https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + + // outdegree_map is an event referring to the events before it, the + // more outdegree's the more recent the event. + let mut outdegree_map = graph.clone(); + + // The number of events that depend on the given event (the EventId key) + // How many events reference this event in the DAG as a parent + let mut reverse_graph: HashMap<_, HashSet<_, Hasher>> = HashMap::new(); + + // Vec of nodes that have zero out degree, least recent events. + let mut zero_outdegree = Vec::new(); + + for (node, edges) in graph { + if edges.is_empty() { + let (power_level, origin_server_ts) = key_fn(node.clone()).await?; + // The `Reverse` is because rusts `BinaryHeap` sorts largest -> smallest we need + // smallest -> largest + zero_outdegree.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: node, + })); + } + + reverse_graph.entry(node).or_default(); + for edge in edges { + reverse_graph.entry(edge).or_default().insert(node); + } + } + + let mut heap = BinaryHeap::from(zero_outdegree); + + // We remove the oldest node (most incoming edges) and check against all other + let mut sorted = vec![]; + // Destructure the `Reverse` and take the smallest `node` each time + while let Some(Reverse(item)) = heap.pop() { + let node = item.event_id; + + for &parent in reverse_graph + .get(node) + .expect("EventId in heap is also in reverse_graph") + { + // The number of outgoing edges this node has + let out = outdegree_map + .get_mut(parent.borrow()) + .expect("outdegree_map knows of all referenced EventIds"); + + // Only push on the heap once older events have been cleared + out.remove(node.borrow()); + if out.is_empty() { + let (power_level, origin_server_ts) = key_fn(parent.clone()).await?; + heap.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: parent, + })); + } + } + + // synapse yields we push then return the vec + sorted.push(node.clone()); + } + + Ok(sorted) +} + +/// Find the power level for the sender of `event_id` or return a default value +/// of zero. +/// +/// Do NOT use this any where but topological sort, we find the power level for +/// the eventId at the eventId's generation (we walk backwards to `EventId`s +/// most recent previous power level event). +async fn get_power_level_for_sender( + event_id: E::Id, + fetch_event: &F, +) -> serde_json::Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + debug!("fetch event ({event_id}) senders power level"); + + let event = fetch_event(event_id).await; + + let auth_events = event.as_ref().map(Event::auth_events); + + let pl = auth_events + .into_iter() + .flatten() + .stream() + .broadn_filter_map(5, |aid| fetch_event(aid.clone())) + .ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")) + .await; + + let content: PowerLevelsContentFields = match pl { + | None => return Ok(int!(0)), + | Some(ev) => from_json_str(ev.content().get())?, + }; + + if let Some(ev) = event { + if let Some(&user_level) = content.get_user_power(ev.sender()) { + debug!("found {} at power_level {user_level}", ev.sender()); + return Ok(user_level); + } + } + + Ok(content.users_default) +} + +/// Check the that each event is authenticated based on the events before it. +/// +/// ## Returns +/// +/// The `unconflicted_state` combined with the newly auth'ed events. So any +/// event that fails the `event_auth::auth_check` will be excluded from the +/// returned state map. +/// +/// For each `events_to_check` event we gather the events needed to auth it from +/// the the `fetch_event` closure and verify each event using the +/// `event_auth::auth_check` function. +async fn iterative_auth_check<'a, E, F, Fut, S>( + room_version: &RoomVersion, + events_to_check: S, + unconflicted_state: StateMap, + fetch_event: &F, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, + S: Stream + Send + 'a, + E: Event + Clone + Send + Sync, +{ + debug!("starting iterative auth check"); + + let events_to_check: Vec<_> = events_to_check + .map(Result::Ok) + .broad_and_then(async |event_id| { + fetch_event(event_id.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) + }) + .try_collect() + .boxed() + .await?; + + let auth_event_ids: HashSet = events_to_check + .iter() + .flat_map(|event: &E| event.auth_events().map(Clone::clone)) + .collect(); + + let auth_events: HashMap = auth_event_ids + .into_iter() + .stream() + .broad_filter_map(fetch_event) + .map(|auth_event| (auth_event.event_id().clone(), auth_event)) + .collect() + .boxed() + .await; + + let auth_events = &auth_events; + let mut resolved_state = unconflicted_state; + for event in &events_to_check { + let state_key = event + .state_key() + .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; + + let auth_types = auth_types_for_event( + event.event_type(), + event.sender(), + Some(state_key), + event.content(), + )?; + + let mut auth_state = StateMap::new(); + for aid in event.auth_events() { + if let Some(ev) = auth_events.get(aid.borrow()) { + //TODO: synapse checks "rejected_reason" which is most likely related to + // soft-failing + auth_state.insert( + ev.event_type() + .with_state_key(ev.state_key().ok_or_else(|| { + Error::InvalidPdu("State event had no state key".to_owned()) + })?), + ev.clone(), + ); + } else { + warn!(event_id = aid.borrow().as_str(), "missing auth event"); + } + } + + auth_types + .iter() + .stream() + .ready_filter_map(|key| Some((key, resolved_state.get(key)?))) + .filter_map(|(key, ev_id)| async move { + if let Some(event) = auth_events.get(ev_id.borrow()) { + Some((key, event.clone())) + } else { + Some((key, fetch_event(ev_id.clone()).await?)) + } + }) + .ready_for_each(|(key, event)| { + //TODO: synapse checks "rejected_reason" is None here + auth_state.insert(key.to_owned(), event); + }) + .await; + + debug!("event to check {:?}", event.event_id()); + + // The key for this is (eventType + a state_key of the signed token not sender) + // so search for it + let current_third_party = auth_state.iter().find_map(|(_, pdu)| { + (*pdu.event_type() == TimelineEventType::RoomThirdPartyInvite).then_some(pdu) + }); + + let fetch_state = |ty: &StateEventType, key: &str| { + future::ready(auth_state.get(&ty.with_state_key(key))) + }; + + let auth_result = + auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await; + + match auth_result { + | Ok(true) => { + // add event to resolved state map + resolved_state.insert( + event.event_type().with_state_key(state_key), + event.event_id().clone(), + ); + }, + | Ok(false) => { + // synapse passes here on AuthError. We do not add this event to resolved_state. + warn!("event {} failed the authentication check", event.event_id()); + }, + | Err(e) => { + debug_error!("event {} failed the authentication check: {e}", event.event_id()); + return Err(e); + }, + } + } + + Ok(resolved_state) +} + +/// Returns the sorted `to_sort` list of `EventId`s based on a mainline sort +/// using the depth of `resolved_power_level`, the server timestamp, and the +/// eventId. +/// +/// The depth of the given event is calculated based on the depth of it's +/// closest "parent" power_level event. If there have been two power events the +/// after the most recent are depth 0, the events before (with the first power +/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth +/// 0. +async fn mainline_sort( + to_sort: &[E::Id], + resolved_power_level: Option, + fetch_event: &F, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, +{ + debug!("mainline sort of events"); + + // There are no EventId's to sort, bail. + if to_sort.is_empty() { + return Ok(vec![]); + } + + let mut mainline = vec![]; + let mut pl = resolved_power_level; + while let Some(p) = pl { + mainline.push(p.clone()); + + let event = fetch_event(p.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; + + pl = None; + for aid in event.auth_events() { + let ev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + + if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { + pl = Some(aid.to_owned()); + break; + } + } + } + + let mainline_map: HashMap<_, _> = mainline + .iter() + .rev() + .enumerate() + .map(|(idx, eid)| ((*eid).clone(), idx)) + .collect(); + + let order_map: HashMap<_, _> = to_sort + .iter() + .stream() + .broad_filter_map(async |ev_id| { + fetch_event(ev_id.clone()).await.map(|event| (event, ev_id)) + }) + .broad_filter_map(|(event, ev_id)| { + get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) + .map_ok(move |depth| (ev_id, (depth, event.origin_server_ts(), ev_id))) + .map(Result::ok) + }) + .collect() + .boxed() + .await; + + // Sort the event_ids by their depth, timestamp and EventId + // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) + let mut sort_event_ids: Vec<_> = order_map.keys().map(|&k| k.clone()).collect(); + + sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); + + Ok(sort_event_ids) +} + +/// Get the mainline depth from the `mainline_map` or finds a power_level event +/// that has an associated mainline depth. +async fn get_mainline_depth( + mut event: Option, + mainline_map: &HashMap, + fetch_event: &F, +) -> Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send + Sync, + E::Id: Borrow + Send + Sync, +{ + while let Some(sort_ev) = event { + debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + + let id = sort_ev.event_id(); + if let Some(depth) = mainline_map.get(id.borrow()) { + return Ok(*depth); + } + + event = None; + for aid in sort_ev.auth_events() { + let aev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + + if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { + event = Some(aev); + break; + } + } + } + // Did not find a power level event so we default to zero + Ok(0) +} + +async fn add_event_and_auth_chain_to_graph( + graph: &mut HashMap>, + event_id: E::Id, + auth_diff: &HashSet, + fetch_event: &F, +) where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, +{ + let mut state = vec![event_id]; + while let Some(eid) = state.pop() { + graph.entry(eid.clone()).or_default(); + let event = fetch_event(eid.clone()).await; + let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + + // Prefer the store to event as the store filters dedups the events + for aid in auth_events { + if auth_diff.contains(aid.borrow()) { + if !graph.contains_key(aid.borrow()) { + state.push(aid.to_owned()); + } + + // We just inserted this at the start of the while loop + graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned()); + } + } + } +} + +async fn is_power_event_id(event_id: &E::Id, fetch: &F) -> bool +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send + Sync, +{ + match fetch(event_id.clone()).await.as_ref() { + | Some(state) => is_power_event(state), + | _ => false, + } +} + +fn is_type_and_key(ev: impl Event, ev_type: &TimelineEventType, state_key: &str) -> bool { + ev.event_type() == ev_type && ev.state_key() == Some(state_key) +} + +fn is_power_event(event: impl Event) -> bool { + match event.event_type() { + | TimelineEventType::RoomPowerLevels + | TimelineEventType::RoomJoinRules + | TimelineEventType::RoomCreate => event.state_key() == Some(""), + | TimelineEventType::RoomMember => { + if let Ok(content) = from_json_str::(event.content().get()) { + if [MembershipState::Leave, MembershipState::Ban].contains(&content.membership) { + return Some(event.sender().as_str()) != event.state_key(); + } + } + + false + }, + | _ => false, + } +} + +/// Convenience trait for adding event type plus state key to state maps. +pub trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey); +} + +impl EventTypeExt for StateEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self, state_key.into()) + } +} + +impl EventTypeExt for TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self.into(), state_key.into()) + } +} + +impl EventTypeExt for &T +where + T: EventTypeExt + Clone, +{ + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + self.to_owned().with_state_key(state_key) + } +} + +#[cfg(test)] +mod tests { + use std::collections::{HashMap, HashSet}; + + use maplit::{hashmap, hashset}; + use rand::seq::SliceRandom; + use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, + events::{ + StateEventType, TimelineEventType, + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + }, + int, uint, + }; + use serde_json::{json, value::to_raw_value as to_raw_json_value}; + + use super::{ + Event, EventTypeExt, StateMap, is_power_event, + room_version::RoomVersion, + test_utils::{ + INITIAL_EVENTS, PduEvent, TestStore, alice, bob, charlie, do_check, ella, event_id, + member_content_ban, member_content_join, room_id, to_init_pdu_event, to_pdu_event, + zara, + }, + }; + use crate::{debug, utils::stream::IterStream}; + + async fn test_event_sort() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let event_map = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let auth_chain: HashSet = HashSet::new(); + + let power_events = event_map + .values() + .filter(|&pdu| is_power_event(&*pdu)) + .map(|pdu| pdu.event_id.clone()) + .collect::>(); + + let fetcher = |id| ready(events.get(&id).cloned()); + let sorted_power_events = + super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + .await + .unwrap(); + + let resolved_power = super::iterative_auth_check( + &RoomVersion::V6, + sorted_power_events.iter().stream(), + HashMap::new(), // unconflicted events + &fetcher, + ) + .await + .expect("iterative auth check failed on resolved events"); + + // don't remove any events so we know it sorts them all correctly + let mut events_to_sort = events.keys().cloned().collect::>(); + + events_to_sort.shuffle(&mut rand::thread_rng()); + + let power_level = resolved_power + .get(&(StateEventType::RoomPowerLevels, "".into())) + .cloned(); + + let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher) + .await + .unwrap(); + + assert_eq!( + vec![ + "$CREATE:foo", + "$IMA:foo", + "$IPOWER:foo", + "$IJR:foo", + "$IMB:foo", + "$IMC:foo", + "$START:foo", + "$END:foo" + ], + sorted_event_ids + .iter() + .map(|id| id.to_string()) + .collect::>() + ); + } + + #[tokio::test] + async fn test_sort() { + for _ in 0..20 { + // since we shuffle the eventIds before we sort them introducing randomness + // seems like we should test this a few times + test_event_sort().await; + } + } + + #[tokio::test] + async fn ban_vs_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "MA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "MB", "MA", "PA", "START"], vec!["END", "PA", "PB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_basic() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = + vec![vec!["END", "PA2", "T2", "PA1", "T1", "START"], vec!["END", "T3", "PB", "PA1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA2", "T2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_reset() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + ]; + + let edges = vec![vec!["END", "MB", "T2", "PA", "T1", "START"], vec!["END", "T1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T1", "MB", "PA"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn join_rule_evasion() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Private)).unwrap(), + ), + to_init_pdu_event( + "ME", + ella(), + TimelineEventType::RoomMember, + Some(ella().to_string().as_str()), + member_content_join(), + ), + ]; + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "ME", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec![event_id("JR")]; + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn offtopic_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value( + &json!({ "users": { alice(): 100, bob(): 50, charlie(): 50 } }), + ) + .unwrap(), + ), + to_init_pdu_event( + "PC", + charlie(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50, charlie(): 0 } })) + .unwrap(), + ), + ]; + + let edges = vec![vec!["END", "PC", "PB", "PA", "START"], vec!["END", "PA"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PC"].into_iter().map(event_id).collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_setting() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MZ1", + zara(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "T4", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "T4", "MZ1", "PA2", "T2", "PA1", "T1", "START"], vec![ + "END", "MZ1", "T3", "PB", "PA1", + ]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T4", "PA2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn test_event_map_none() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let mut store = TestStore::(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, expected) = store.set_up(); + + let ev_map = store.0.clone(); + let fetcher = |id| ready(ev_map.get(&id).cloned()); + + let exists = |id: ::Id| ready(ev_map.get(&*id).is_some()); + + let state_sets = [state_at_bob, state_at_charlie]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let resolved = match super::resolve( + &RoomVersionId::V2, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + assert_eq!(expected, resolved); + } + + #[tokio::test] + async fn test_lexicographical_sort() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + let res = super::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap(); + + assert_eq!( + vec!["o", "l", "n", "m", "p"], + res.iter() + .map(ToString::to_string) + .map(|s| s.replace('$', "").replace(":foo", "")) + .collect::>() + ); + } + + #[tokio::test] + async fn ban_with_auth_chains() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let ban = BAN_STATE_SET(); + + let edges = vec![vec!["END", "MB", "PA", "START"], vec!["END", "IME", "MB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(&ban.values().cloned().collect::>(), edges, expected_state_ids).await; + } + + #[tokio::test] + async fn ban_with_auth_chains2() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let init = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + let mut inner = init.clone(); + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let ev_map = &store.0; + let state_sets = [state_set_a, state_set_b]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); + let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); + let resolved = match super::resolve( + &RoomVersionId::V6, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + debug!( + resolved = ?resolved + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>(), + "resolved state", + ); + + let expected = [ + "$CREATE:foo", + "$IJR:foo", + "$PA:foo", + "$IMA:foo", + "$IMB:foo", + "$IMC:foo", + "$MB:foo", + ]; + + for id in expected.iter().map(|i| event_id(i)) { + // make sure our resolved events are equal to the expected list + assert!(resolved.values().any(|eid| eid == &id) || init.contains_key(&id), "{id}"); + } + assert_eq!(expected.len(), resolved.len()); + } + + #[tokio::test] + async fn join_rule_with_auth_chain() { + let join_rule = JOIN_RULE(); + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "IMZ", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["JR"].into_iter().map(event_id).collect::>(); + + do_check(&join_rule.values().cloned().collect::>(), edges, expected_state_ids) + .await; + } + + #[allow(non_snake_case)] + fn BAN_STATE_SET() -> HashMap { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + #[allow(non_snake_case)] + fn JOIN_RULE() -> HashMap { + vec![ + to_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&json!({ "join_rule": "invite" })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["START"], + ), + to_pdu_event( + "IMZ", + zara(), + TimelineEventType::RoomPowerLevels, + Some(zara().as_str()), + member_content_join(), + &["CREATE", "JR", "IPOWER"], + &["START"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + macro_rules! state_set { + ($($kind:expr_2021 => $key:expr_2021 => $id:expr_2021),* $(,)?) => {{ + #[allow(unused_mut)] + let mut x = StateMap::new(); + $( + x.insert(($kind, $key.into()), $id); + )* + x + }}; + } + + #[test] + fn separate_unique_conflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@b:hs1" => 1], + state_set![StateEventType::RoomMember => "@c:hs1" => 2], + ] + .iter(), + ); + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0], + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } + + #[test] + fn separate_conflicted() { + let (unconflicted, mut conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 1], + state_set![StateEventType::RoomMember => "@a:hs1" => 2], + ] + .iter(), + ); + + // HashMap iteration order is random, so sort this before asserting on it + for v in conflicted.values_mut() { + v.sort_unstable(); + } + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0, 1, 2], + ],); + } + + #[test] + fn separate_unconflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, StateMap::new()); + } + + #[test] + fn separate_mixed() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@b:hs1" => 1, + ], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@c:hs1" => 2, + ], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } +} diff --git a/src/core/matrix/state_res/outcomes.txt b/src/core/matrix/state_res/outcomes.txt new file mode 100644 index 00000000..0fa1c734 --- /dev/null +++ b/src/core/matrix/state_res/outcomes.txt @@ -0,0 +1,104 @@ +11/29/2020 BRANCH: timo-spec-comp REV: d2a85669cc6056679ce6ca0fde4658a879ad2b08 +lexicographical topological sort + time: [1.7123 us 1.7157 us 1.7199 us] + change: [-1.7584% -1.5433% -1.3205%] (p = 0.00 < 0.05) + Performance has improved. +Found 8 outliers among 100 measurements (8.00%) + 2 (2.00%) low mild + 5 (5.00%) high mild + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.981 us 10.998 us 11.020 us] +Found 3 outliers among 100 measurements (3.00%) + 3 (3.00%) high mild + +resolve state of 10 events 3 conflicting + time: [26.858 us 26.946 us 27.037 us] + +11/29/2020 BRANCH: event-trait REV: f0eb1310efd49d722979f57f20bd1ac3592b0479 +lexicographical topological sort + time: [1.7686 us 1.7738 us 1.7810 us] + change: [-3.2752% -2.4634% -1.7635%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.643 us 10.656 us 10.669 us] + change: [-4.9990% -3.8078% -2.8319%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.149 us 29.252 us 29.375 us] + change: [-0.8433% -0.3270% +0.2656%] (p = 0.25 > 0.05) + No change in performance detected. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high mild + +4/26/2020 BRANCH: fix-test-serde REV: +lexicographical topological sort + time: [1.6793 us 1.6823 us 1.6857 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 4 (4.00%) high mild + 4 (4.00%) high severe + +resolve state of 5 events one fork + time: [9.9993 us 10.062 us 10.159 us] +Found 9 outliers among 100 measurements (9.00%) + 7 (7.00%) high mild + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [26.004 us 26.092 us 26.195 us] +Found 16 outliers among 100 measurements (16.00%) + 11 (11.00%) high mild + 5 (5.00%) high severe + +6/30/2021 BRANCH: state-closure REV: 174c3e2a72232ad75b3fb14b3551f5f746f4fe84 +lexicographical topological sort + time: [1.5496 us 1.5536 us 1.5586 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 7 (7.00%) high severe + +resolve state of 5 events one fork + time: [10.319 us 10.333 us 10.347 us] +Found 2 outliers among 100 measurements (2.00%) + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [25.770 us 25.805 us 25.839 us] +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +7/20/2021 BRANCH stateres-result REV: +This marks the switch to HashSet/Map +lexicographical topological sort + time: [1.8122 us 1.8177 us 1.8233 us] + change: [+15.205% +15.919% +16.502%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +resolve state of 5 events one fork + time: [11.966 us 12.010 us 12.059 us] + change: [+16.089% +16.730% +17.469%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 3 (3.00%) high mild + 4 (4.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.092 us 29.201 us 29.311 us] + change: [+12.447% +12.847% +13.280%] (p = 0.00 < 0.05) + Performance has regressed. +Found 9 outliers among 100 measurements (9.00%) + 6 (6.00%) high mild + 3 (3.00%) high severe diff --git a/src/core/matrix/state_res/power_levels.rs b/src/core/matrix/state_res/power_levels.rs new file mode 100644 index 00000000..19ba8fb9 --- /dev/null +++ b/src/core/matrix/state_res/power_levels.rs @@ -0,0 +1,256 @@ +use std::collections::BTreeMap; + +use ruma::{ + Int, OwnedUserId, UserId, + events::{TimelineEventType, room::power_levels::RoomPowerLevelsEventContent}, + power_levels::{NotificationPowerLevels, default_power_level}, + serde::{ + deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, + vec_deserialize_v1_powerlevel_values, + }, +}; +use serde::Deserialize; +use serde_json::{Error, from_str as from_json_str}; + +use super::{Result, RoomVersion}; +use crate::error; + +#[derive(Deserialize)] +struct IntRoomPowerLevelsEventContent { + #[serde(default = "default_power_level")] + ban: Int, + + #[serde(default)] + events: BTreeMap, + + #[serde(default)] + events_default: Int, + + #[serde(default)] + invite: Int, + + #[serde(default = "default_power_level")] + kick: Int, + + #[serde(default = "default_power_level")] + redact: Int, + + #[serde(default = "default_power_level")] + state_default: Int, + + #[serde(default)] + users: BTreeMap, + + #[serde(default)] + users_default: Int, + + #[serde(default)] + notifications: IntNotificationPowerLevels, +} + +impl From for RoomPowerLevelsEventContent { + fn from(int_pl: IntRoomPowerLevelsEventContent) -> Self { + let IntRoomPowerLevelsEventContent { + ban, + events, + events_default, + invite, + kick, + redact, + state_default, + users, + users_default, + notifications, + } = int_pl; + + let mut pl = Self::new(); + pl.ban = ban; + pl.events = events; + pl.events_default = events_default; + pl.invite = invite; + pl.kick = kick; + pl.redact = redact; + pl.state_default = state_default; + pl.users = users; + pl.users_default = users_default; + pl.notifications = notifications.into(); + + pl + } +} + +#[derive(Deserialize)] +struct IntNotificationPowerLevels { + #[serde(default = "default_power_level")] + room: Int, +} + +impl Default for IntNotificationPowerLevels { + fn default() -> Self { Self { room: default_power_level() } } +} + +impl From for NotificationPowerLevels { + fn from(int_notif: IntNotificationPowerLevels) -> Self { + let mut notif = Self::new(); + notif.room = int_notif.room; + + notif + } +} + +#[inline] +pub(crate) fn deserialize_power_levels( + content: &str, + room_version: &RoomVersion, +) -> Option { + if room_version.integer_power_levels { + deserialize_integer_power_levels(content) + } else { + deserialize_legacy_power_levels(content) + } +} + +fn deserialize_integer_power_levels(content: &str) -> Option { + match from_json_str::(content) { + | Ok(content) => Some(content.into()), + | Err(_) => { + error!("m.room.power_levels event is not valid with integer values"); + None + }, + } +} + +fn deserialize_legacy_power_levels(content: &str) -> Option { + match from_json_str(content) { + | Ok(content) => Some(content), + | Err(_) => { + error!( + "m.room.power_levels event is not valid with integer or string integer values" + ); + None + }, + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_v1_powerlevel_values")] + pub(crate) users: Vec<(OwnedUserId, Int)>, + + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) users_default: Int, +} + +impl PowerLevelsContentFields { + pub(crate) fn get_user_power(&self, user_id: &UserId) -> Option<&Int> { + let comparator = |item: &(OwnedUserId, Int)| { + let item: &UserId = &item.0; + item.cmp(user_id) + }; + + self.users + .binary_search_by(comparator) + .ok() + .and_then(|idx| self.users.get(idx).map(|item| &item.1)) + } +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_int_powerlevel_values")] + users: Vec<(OwnedUserId, Int)>, + + #[serde(default)] + users_default: Int, +} + +impl From for PowerLevelsContentFields { + fn from(pl: IntPowerLevelsContentFields) -> Self { + let IntPowerLevelsContentFields { users, users_default } = pl; + Self { users, users_default } + } +} + +#[inline] +pub(crate) fn deserialize_power_levels_content_fields( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + deserialize_integer_power_levels_content_fields(content) + } else { + deserialize_legacy_power_levels_content_fields(content) + } +} + +fn deserialize_integer_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str::(content).map(Into::into) +} + +fn deserialize_legacy_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str(content) +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentInvite { + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) invite: Int, +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentInvite { + #[serde(default)] + invite: Int, +} + +impl From for PowerLevelsContentInvite { + fn from(pl: IntPowerLevelsContentInvite) -> Self { + let IntPowerLevelsContentInvite { invite } = pl; + Self { invite } + } +} + +pub(crate) fn deserialize_power_levels_content_invite( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentRedact { + #[serde(default = "default_power_level", deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) redact: Int, +} + +#[derive(Deserialize)] +pub(crate) struct IntPowerLevelsContentRedact { + #[serde(default = "default_power_level")] + redact: Int, +} + +impl From for PowerLevelsContentRedact { + fn from(pl: IntPowerLevelsContentRedact) -> Self { + let IntPowerLevelsContentRedact { redact } = pl; + Self { redact } + } +} + +pub(crate) fn deserialize_power_levels_content_redact( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} diff --git a/src/core/matrix/state_res/room_version.rs b/src/core/matrix/state_res/room_version.rs new file mode 100644 index 00000000..8dfd6cde --- /dev/null +++ b/src/core/matrix/state_res/room_version.rs @@ -0,0 +1,150 @@ +use ruma::RoomVersionId; + +use super::{Error, Result}; + +#[derive(Debug)] +#[allow(clippy::exhaustive_enums)] +pub enum RoomDisposition { + /// A room version that has a stable specification. + Stable, + /// A room version that is not yet fully specified. + Unstable, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum EventFormatVersion { + /// $id:server event id format + V1, + /// MSC1659-style $hash event id format: introduced for room v3 + V2, + /// MSC1884-style $hash format: introduced for room v4 + V3, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum StateResolutionVersion { + /// State resolution for rooms at version 1. + V1, + /// State resolution for room at version 2 or later. + V2, +} + +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +#[allow(clippy::struct_excessive_bools)] +pub struct RoomVersion { + /// The stability of this room. + pub disposition: RoomDisposition, + /// The format of the EventId. + pub event_format: EventFormatVersion, + /// Which state resolution algorithm is used. + pub state_res: StateResolutionVersion, + // FIXME: not sure what this one means? + pub enforce_key_validity: bool, + + /// `m.room.aliases` had special auth rules and redaction rules + /// before room version 6. + /// + /// before MSC2261/MSC2432, + pub special_case_aliases_auth: bool, + /// Strictly enforce canonical json, do not allow: + /// * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] + /// * Floats + /// * NaN, Infinity, -Infinity + pub strict_canonicaljson: bool, + /// Verify notifications key while checking m.room.power_levels. + /// + /// bool: MSC2209: Check 'notifications' + pub limit_notifications_power_levels: bool, + /// Extra rules when verifying redaction events. + pub extra_redaction_checks: bool, + /// Allow knocking in event authentication. + /// + /// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/) for more information. + pub allow_knocking: bool, + /// Adds support for the restricted join rule. + /// + /// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289) for more information. + pub restricted_join_rules: bool, + /// Adds support for the knock_restricted join rule. + /// + /// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) for more information. + pub knock_restricted_join_rule: bool, + /// Enforces integer power levels. + /// + /// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667) for more information. + pub integer_power_levels: bool, + /// Determine the room creator using the `m.room.create` event's `sender`, + /// instead of the event content's `creator` field. + /// + /// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175) for more information. + pub use_room_create_sender: bool, +} + +impl RoomVersion { + pub const V1: Self = Self { + disposition: RoomDisposition::Stable, + event_format: EventFormatVersion::V1, + state_res: StateResolutionVersion::V1, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + extra_redaction_checks: true, + allow_knocking: false, + restricted_join_rules: false, + knock_restricted_join_rule: false, + integer_power_levels: false, + use_room_create_sender: false, + }; + pub const V10: Self = Self { + knock_restricted_join_rule: true, + integer_power_levels: true, + ..Self::V9 + }; + pub const V11: Self = Self { + use_room_create_sender: true, + ..Self::V10 + }; + pub const V2: Self = Self { + state_res: StateResolutionVersion::V2, + ..Self::V1 + }; + pub const V3: Self = Self { + event_format: EventFormatVersion::V2, + extra_redaction_checks: false, + ..Self::V2 + }; + pub const V4: Self = Self { + event_format: EventFormatVersion::V3, + ..Self::V3 + }; + pub const V5: Self = Self { enforce_key_validity: true, ..Self::V4 }; + pub const V6: Self = Self { + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + ..Self::V5 + }; + pub const V7: Self = Self { allow_knocking: true, ..Self::V6 }; + pub const V8: Self = Self { restricted_join_rules: true, ..Self::V7 }; + pub const V9: Self = Self::V8; + + pub fn new(version: &RoomVersionId) -> Result { + Ok(match version { + | RoomVersionId::V1 => Self::V1, + | RoomVersionId::V2 => Self::V2, + | RoomVersionId::V3 => Self::V3, + | RoomVersionId::V4 => Self::V4, + | RoomVersionId::V5 => Self::V5, + | RoomVersionId::V6 => Self::V6, + | RoomVersionId::V7 => Self::V7, + | RoomVersionId::V8 => Self::V8, + | RoomVersionId::V9 => Self::V9, + | RoomVersionId::V10 => Self::V10, + | RoomVersionId::V11 => Self::V11, + | ver => return Err(Error::Unsupported(format!("found version `{ver}`"))), + }) + } +} diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs new file mode 100644 index 00000000..a666748a --- /dev/null +++ b/src/core/matrix/state_res/test_utils.rs @@ -0,0 +1,691 @@ +use std::{ + borrow::Borrow, + collections::{BTreeMap, HashMap, HashSet}, + sync::atomic::{AtomicU64, Ordering::SeqCst}, +}; + +use futures::future::ready; +use ruma::{ + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, ServerSignatures, + UserId, event_id, + events::{ + TimelineEventType, + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, + int, room_id, uint, user_id, +}; +use serde_json::{ + json, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, +}; + +pub(crate) use self::event::PduEvent; +use super::auth_types_for_event; +use crate::{ + Result, info, + matrix::{Event, EventTypeExt, StateMap}, +}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +pub(crate) async fn do_check( + events: &[PduEvent], + edges: Vec>, + expected_state_ids: Vec, +) { + // To activate logging use `RUST_LOG=debug cargo t` + + let init_events = INITIAL_EVENTS(); + + let mut store = TestStore( + init_events + .values() + .chain(events) + .map(|ev| (ev.event_id().to_owned(), ev.clone())) + .collect(), + ); + + // This will be lexi_topo_sorted for resolution + let mut graph = HashMap::new(); + // This is the same as in `resolve` event_id -> OriginalStateEvent + let mut fake_event_map = HashMap::new(); + + // Create the DB of events that led up to this point + // TODO maybe clean up some of these clones it is just tests but... + for ev in init_events.values().chain(events) { + graph.insert(ev.event_id().to_owned(), HashSet::new()); + fake_event_map.insert(ev.event_id().to_owned(), ev.clone()); + } + + for pair in INITIAL_EDGES().windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + + for edge_list in edges { + for pair in edge_list.windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + } + + // event_id -> PduEvent + let mut event_map: HashMap = HashMap::new(); + // event_id -> StateMap + let mut state_at_event: HashMap> = HashMap::new(); + + // Resolve the current state and add it to the state_at_event map then continue + // on in "time" + for node in super::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap() + { + let fake_event = fake_event_map.get(&node).unwrap(); + let event_id = fake_event.event_id().to_owned(); + + let prev_events = graph.get(&node).unwrap(); + + let state_before: StateMap = if prev_events.is_empty() { + HashMap::new() + } else if prev_events.len() == 1 { + state_at_event + .get(prev_events.iter().next().unwrap()) + .unwrap() + .clone() + } else { + let state_sets = prev_events + .iter() + .filter_map(|k| state_at_event.get(k)) + .collect::>(); + + info!( + "{:#?}", + state_sets + .iter() + .map(|map| map + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>()) + .collect::>() + ); + + let auth_chain_sets: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let event_map = &event_map; + let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); + let exists = |id: ::Id| ready(event_map.get(&id).is_some()); + let resolved = super::resolve( + &RoomVersionId::V6, + state_sets, + &auth_chain_sets, + &fetch, + &exists, + 1, + ) + .await; + + match resolved { + | Ok(state) => state, + | Err(e) => panic!("resolution for {node} failed: {e}"), + } + }; + + let mut state_after = state_before.clone(); + + let ty = fake_event.event_type(); + let key = fake_event.state_key().unwrap(); + state_after.insert(ty.with_state_key(key), event_id.to_owned()); + + let auth_types = auth_types_for_event( + fake_event.event_type(), + fake_event.sender(), + fake_event.state_key(), + fake_event.content(), + ) + .unwrap(); + + let mut auth_events = vec![]; + for key in auth_types { + if state_before.contains_key(&key) { + auth_events.push(state_before[&key].clone()); + } + } + + // TODO The event is just remade, adding the auth_events and prev_events here + // the `to_pdu_event` was split into `init` and the fn below, could be better + let e = fake_event; + let ev_id = e.event_id(); + let event = to_pdu_event( + e.event_id().as_str(), + e.sender(), + e.event_type().clone(), + e.state_key(), + e.content().to_owned(), + &auth_events, + &prev_events.iter().cloned().collect::>(), + ); + + // We have to update our store, an actual user of this lib would + // be giving us state from a DB. + store.0.insert(ev_id.to_owned(), event.clone()); + + state_at_event.insert(node, state_after); + event_map.insert(event_id.to_owned(), store.0.get(ev_id).unwrap().clone()); + } + + let mut expected_state = StateMap::new(); + for node in expected_state_ids { + let ev = event_map.get(&node).unwrap_or_else(|| { + panic!( + "{node} not found in {:?}", + event_map + .keys() + .map(ToString::to_string) + .collect::>() + ) + }); + + let key = ev.event_type().with_state_key(ev.state_key().unwrap()); + + expected_state.insert(key, node); + } + + let start_state = state_at_event.get(event_id!("$START:foo")).unwrap(); + + let end_state = state_at_event + .get(event_id!("$END:foo")) + .unwrap() + .iter() + .filter(|(k, v)| { + expected_state.contains_key(k) + || start_state.get(k) != Some(*v) + // Filter out the dummy messages events. + // These act as points in time where there should be a known state to + // test against. + && **k != ("m.room.message".into(), "dummy".into()) + }) + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>(); + + assert_eq!(expected_state, end_state); +} + +#[allow(clippy::exhaustive_structs)] +pub(crate) struct TestStore(pub(crate) HashMap); + +impl TestStore { + pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result { + self.0 + .get(event_id) + .cloned() + .ok_or_else(|| super::Error::NotFound(format!("{event_id} not found"))) + .map_err(Into::into) + } + + /// Returns a Vec of the related auth events to the given `event`. + pub(crate) fn auth_event_ids( + &self, + room_id: &RoomId, + event_ids: Vec, + ) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while let Some(ev_id) = stack.pop() { + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } +} + +// A StateStore implementation for testing +#[allow(clippy::type_complexity)] +impl TestStore { + pub(crate) fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), create_event.clone()); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +pub(crate) fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + + format!("${id}:foo").try_into().unwrap() +} + +pub(crate) fn alice() -> &'static UserId { user_id!("@alice:foo") } + +pub(crate) fn bob() -> &'static UserId { user_id!("@bob:foo") } + +pub(crate) fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +pub(crate) fn ella() -> &'static UserId { user_id!("@ella:foo") } + +pub(crate) fn zara() -> &'static UserId { user_id!("@zara:foo") } + +pub(crate) fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +pub(crate) fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +pub(crate) fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +pub(crate) fn to_init_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, +) -> PduEvent { + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + + let state_key = state_key.map(ToOwned::to_owned); + PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events: vec![], + prev_events: vec![], + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + } +} + +pub(crate) fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> PduEvent +where + S: AsRef, +{ + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + } +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS() -> HashMap { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap { + vec![to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + )] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EDGES() -> Vec { + vec!["START", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] + .into_iter() + .map(event_id) + .collect::>() +} + +pub(crate) mod event { + use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, + }; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use crate::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + #[allow(clippy::exhaustive_structs)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/metrics/mod.rs b/src/core/metrics/mod.rs index f2022166..8f7a5571 100644 --- a/src/core/metrics/mod.rs +++ b/src/core/metrics/mod.rs @@ -19,8 +19,6 @@ pub struct Metrics { runtime_intervals: std::sync::Mutex>, // TODO: move stats - pub requests_spawn_active: AtomicU32, - pub requests_spawn_finished: AtomicU32, pub requests_handle_active: AtomicU32, pub requests_handle_finished: AtomicU32, pub requests_panic: AtomicU32, @@ -48,8 +46,6 @@ impl Metrics { #[cfg(tokio_unstable)] runtime_intervals: std::sync::Mutex::new(runtime_intervals), - requests_spawn_active: AtomicU32::new(0), - requests_spawn_finished: AtomicU32::new(0), requests_handle_active: AtomicU32::new(0), requests_handle_finished: AtomicU32::new(0), requests_panic: AtomicU32::new(0), diff --git a/src/core/mod.rs b/src/core/mod.rs index 1416ed9e..b91cdf0b 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,23 +1,28 @@ +#![type_length_limit = "12288"] + pub mod alloc; pub mod config; pub mod debug; pub mod error; pub mod info; pub mod log; +pub mod matrix; pub mod metrics; pub mod mods; -pub mod pdu; pub mod server; pub mod utils; +pub use ::arrayvec; pub use ::http; pub use ::ruma; +pub use ::smallstr; +pub use ::smallvec; pub use ::toml; pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId}; +pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/core/mods/module.rs b/src/core/mods/module.rs index ff181e4f..b65bbca2 100644 --- a/src/core/mods/module.rs +++ b/src/core/mods/module.rs @@ -3,8 +3,8 @@ use std::{ time::SystemTime, }; -use super::{canary, new, path, Library, Symbol}; -use crate::{error, Result}; +use super::{Library, Symbol, canary, new, path}; +use crate::{Result, error}; pub struct Module { handle: Option, diff --git a/src/core/mods/new.rs b/src/core/mods/new.rs index 77d89af4..258fdedc 100644 --- a/src/core/mods/new.rs +++ b/src/core/mods/new.rs @@ -1,6 +1,6 @@ use std::ffi::OsStr; -use super::{path, Library}; +use super::{Library, path}; use crate::{Err, Result}; const OPEN_FLAGS: i32 = libloading::os::unix::RTLD_LAZY | libloading::os::unix::RTLD_GLOBAL; diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs deleted file mode 100644 index 6a92afe8..00000000 --- a/src/core/pdu/event.rs +++ /dev/null @@ -1,35 +0,0 @@ -pub use ruma::state_res::Event; -use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; -use serde_json::value::RawValue as RawJsonValue; - -use super::Pdu; - -impl Event for Pdu { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { &self.room_id } - - fn sender(&self) -> &UserId { &self.sender } - - fn event_type(&self) -> &TimelineEventType { &self.kind } - - fn content(&self) -> &RawJsonValue { &self.content } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - MilliSecondsSinceUnixEpoch(self.origin_server_ts) - } - - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.auth_events.iter() - } - - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } -} diff --git a/src/core/server.rs b/src/core/server.rs index 45ba7420..4b673f32 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -1,7 +1,7 @@ use std::{ sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, time::SystemTime, }; @@ -9,7 +9,7 @@ use std::{ use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; -use crate::{config, config::Config, log::Log, metrics::Metrics, Err, Result}; +use crate::{Err, Result, config, config::Config, log::Log, metrics::Metrics}; /// Server runtime state; public portion pub struct Server { @@ -83,7 +83,7 @@ impl Server { }) } - pub fn restart(&self) -> Result<()> { + pub fn restart(&self) -> Result { if self.restarting.swap(true, Ordering::AcqRel) { return Err!("Restart already in progress"); } @@ -93,7 +93,7 @@ impl Server { }) } - pub fn shutdown(&self) -> Result<()> { + pub fn shutdown(&self) -> Result { if self.stopping.swap(true, Ordering::AcqRel) { return Err!("Shutdown already in progress"); } @@ -136,7 +136,16 @@ impl Server { } #[inline] - pub fn running(&self) -> bool { !self.stopping.load(Ordering::Acquire) } + pub fn running(&self) -> bool { !self.is_stopping() } + + #[inline] + pub fn is_stopping(&self) -> bool { self.stopping.load(Ordering::Relaxed) } + + #[inline] + pub fn is_reloading(&self) -> bool { self.reloading.load(Ordering::Relaxed) } + + #[inline] + pub fn is_restarting(&self) -> bool { self.restarting.load(Ordering::Relaxed) } #[inline] pub fn is_ours(&self, name: &str) -> bool { name == self.config.server_name } diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 40316440..507b9b9a 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -1,6 +1,6 @@ use bytesize::ByteSize; -use crate::{err, Result}; +use crate::{Result, err}; /// Parse a human-writable size string w/ si-unit suffix into integer #[inline] @@ -17,15 +17,13 @@ pub fn from_str(str: &str) -> Result { Ok(bytes) } -/// Output a human-readable size string w/ si-unit suffix +/// Output a human-readable size string w/ iec-unit suffix #[inline] #[must_use] pub fn pretty(bytes: usize) -> String { - const SI_UNITS: bool = true; - let bytes: u64 = bytes.try_into().expect("failed to convert usize to u64"); - bytesize::to_string(bytes, SI_UNITS) + ByteSize::b(bytes).display().iec().to_string() } #[inline] diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs index 60243e97..4887d164 100644 --- a/src/core/utils/defer.rs +++ b/src/core/utils/defer.rs @@ -12,14 +12,14 @@ macro_rules! defer { let _defer_ = _Defer_ { closure: || $body }; }; - ($body:expr) => { + ($body:expr_2021) => { $crate::defer! {{ $body }} }; } #[macro_export] macro_rules! scope_restore { - ($val:ident, $ours:expr) => { + ($val:ident, $ours:expr_2021) => { let theirs = $crate::utils::exchange($val, $ours); $crate::defer! {{ *$val = theirs; }}; }; diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs new file mode 100644 index 00000000..24f239ff --- /dev/null +++ b/src/core/utils/future/bool_ext.rs @@ -0,0 +1,82 @@ +//! Extended external extensions to futures::FutureExt + +use std::marker::Unpin; + +use futures::{ + Future, FutureExt, + future::{select_ok, try_join, try_join_all, try_select}, +}; + +pub trait BoolExt +where + Self: Future + Send, +{ + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized; + + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin; +} + +impl BoolExt for Fut +where + Fut: Future + Send, +{ + #[inline] + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_join(a, b).map(|result| result.is_ok()) + } + + #[inline] + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_select(a, b).map(|result| result.is_ok()) + } +} + +pub async fn and(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + try_join_all(args).map(|result| result.is_ok()) +} + +pub async fn or(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send + Unpin, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + select_ok(args).map(|result| result.is_ok()) +} diff --git a/src/core/utils/future/ext_ext.rs b/src/core/utils/future/ext_ext.rs index 38decaae..219bb664 100644 --- a/src/core/utils/future/ext_ext.rs +++ b/src/core/utils/future/ext_ext.rs @@ -2,7 +2,7 @@ use std::marker::Unpin; -use futures::{future, future::Select, Future}; +use futures::{Future, future, future::Select}; /// This interface is not necessarily complete; feel free to add as-needed. pub trait ExtExt diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 153dcfe1..d896e66d 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,7 +1,13 @@ +mod bool_ext; mod ext_ext; mod option_ext; +mod option_stream; +mod ready_eq_ext; mod try_ext_ext; +pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; +pub use option_stream::OptionStream; +pub use ready_eq_ext::ReadyEqExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs index ed61de56..920dd044 100644 --- a/src/core/utils/future/option_ext.rs +++ b/src/core/utils/future/option_ext.rs @@ -1,6 +1,6 @@ #![allow(clippy::wrong_self_convention)] -use futures::{future::OptionFuture, Future, FutureExt}; +use futures::{Future, FutureExt, future::OptionFuture}; pub trait OptionExt { fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; @@ -11,11 +11,14 @@ pub trait OptionExt { impl OptionExt for OptionFuture where Fut: Future + Send, + T: Send, { + #[inline] fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_none_or(f)) } + #[inline] fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_some_and(f)) } diff --git a/src/core/utils/future/option_stream.rs b/src/core/utils/future/option_stream.rs new file mode 100644 index 00000000..81130c87 --- /dev/null +++ b/src/core/utils/future/option_stream.rs @@ -0,0 +1,25 @@ +use futures::{Future, FutureExt, Stream, StreamExt, future::OptionFuture}; + +use super::super::IterStream; + +pub trait OptionStream { + fn stream(self) -> impl Stream + Send; +} + +impl OptionStream for OptionFuture +where + Fut: Future + Send, + S: Stream + Send, + O: IntoIterator + Send, + ::IntoIter: Send, + T: Send, +{ + #[inline] + fn stream(self) -> impl Stream + Send { + self.map(|opt| opt.map(|(curr, next)| curr.into_iter().stream().chain(next))) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten() + } +} diff --git a/src/core/utils/future/ready_eq_ext.rs b/src/core/utils/future/ready_eq_ext.rs new file mode 100644 index 00000000..1625adae --- /dev/null +++ b/src/core/utils/future/ready_eq_ext.rs @@ -0,0 +1,25 @@ +//! Future extension for Partial Equality against present value + +use futures::{Future, FutureExt}; + +pub trait ReadyEqExt +where + Self: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + fn eq(self, t: &T) -> impl Future + Send; + + fn ne(self, t: &T) -> impl Future + Send; +} + +impl ReadyEqExt for Fut +where + Fut: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + #[inline] + fn eq(self, t: &T) -> impl Future + Send { self.map(move |r| r.eq(t)) } + + #[inline] + fn ne(self, t: &T) -> impl Future + Send { self.map(move |r| r.ne(t)) } +} diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index aa3d72e4..b2114e56 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -7,9 +7,8 @@ use std::marker::Unpin; use futures::{ - future, + TryFuture, TryFutureExt, future, future::{MapOkOrElse, TrySelect, UnwrapOrElse}, - TryFuture, TryFutureExt, }; /// This interface is not necessarily complete; feel free to add as-needed. diff --git a/src/core/utils/hash/argon.rs b/src/core/utils/hash/argon.rs index 18146b47..66dfab75 100644 --- a/src/core/utils/hash/argon.rs +++ b/src/core/utils/hash/argon.rs @@ -1,11 +1,11 @@ use std::sync::OnceLock; use argon2::{ - password_hash, password_hash::SaltString, Algorithm, Argon2, Params, PasswordHash, - PasswordHasher, PasswordVerifier, Version, + Algorithm, Argon2, Params, PasswordHash, PasswordHasher, PasswordVerifier, Version, + password_hash, password_hash::SaltString, }; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; const M_COST: u32 = Params::DEFAULT_M_COST; // memory size in 1 KiB blocks const T_COST: u32 = Params::DEFAULT_T_COST; // nr of iterations diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs index 4a3fec8f..3f2f225e 100644 --- a/src/core/utils/json.rs +++ b/src/core/utils/json.rs @@ -1,6 +1,6 @@ use std::{fmt, str::FromStr}; -use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; +use ruma::{CanonicalJsonError, CanonicalJsonObject, canonical_json::try_from_json_map}; use crate::Result; diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index ed157daf..9316731c 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -6,10 +6,11 @@ use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; pub use self::{expected::Expected, tried::Tried}; -use crate::{debug::type_name, err, Err, Error, Result}; +use crate::{Err, Error, Result, debug::type_name, err}; /// Checked arithmetic expression. Returns a Result #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! checked { ($($input:tt)+) => { $crate::utils::math::checked_ops!($($input)+) @@ -22,6 +23,7 @@ macro_rules! checked { /// has no realistic expectation for error and no interest in cluttering the /// callsite with result handling from checked!. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! expected { ($msg:literal, $($input:tt)+) => { $crate::checked!($($input)+).expect($msg) @@ -37,6 +39,7 @@ macro_rules! expected { /// regression analysis. #[cfg(not(debug_assertions))] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { //#[allow(clippy::arithmetic_side_effects)] { @@ -53,6 +56,7 @@ macro_rules! validated { /// the expression is obviously safe. The check is elided in release-mode. #[cfg(debug_assertions)] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { $crate::expected!($($input)+) } } diff --git a/src/core/utils/math/tried.rs b/src/core/utils/math/tried.rs index 2006d2d5..09de731f 100644 --- a/src/core/utils/math/tried.rs +++ b/src/core/utils/math/tried.rs @@ -1,6 +1,6 @@ use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; -use crate::{checked, Result}; +use crate::{Result, checked}; pub trait Tried { #[inline] diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 1a4b52da..54404e4c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -28,7 +28,7 @@ pub use self::{ bool::BoolExt, bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, debug::slice_truncated as debug_slice_truncated, - future::TryExtExt as TryFutureExtExt, + future::{BoolExt as FutureBoolExt, OptionStream, TryExtExt as TryFutureExtExt}, hash::sha256::delimited as calculate_hash, html::Escape as HtmlEscape, json::{deserialize_from_str, to_canonical_object}, @@ -49,10 +49,10 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou #[macro_export] macro_rules! extract_variant { - ($e:expr, $variant:path) => { + ( $e:expr_2021, $( $variant:path )|* ) => { match $e { - | $variant(value) => Some(value), - | _ => None, + $( $variant(value) => Some(value), )* + _ => None, } }; } @@ -84,6 +84,17 @@ macro_rules! apply { }; } +#[macro_export] +macro_rules! pair_of { + ($decl:ty) => { + ($decl, $decl) + }; + + ($init:expr_2021) => { + ($init, $init) + }; +} + /// Functor for truthy #[macro_export] macro_rules! is_true { @@ -123,7 +134,7 @@ macro_rules! is_equal_to { |x| x == $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x == $val }; } @@ -135,7 +146,7 @@ macro_rules! is_less_than { |x| x < $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x < $val }; } diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 03a4adf1..01504ce6 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -6,7 +6,7 @@ use std::{ use tokio::sync::OwnedMutexGuard as Omg; -use crate::{err, Result}; +use crate::{Result, err}; /// Map of Mutexes pub struct MutexMap { diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index 1d289c6e..72487633 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -4,7 +4,7 @@ use std::{ }; use arrayvec::ArrayString; -use rand::{seq::SliceRandom, thread_rng, Rng}; +use rand::{Rng, seq::SliceRandom, thread_rng}; pub fn shuffle(vec: &mut [T]) { let mut rng = thread_rng(); diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index 282008e7..832f2638 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. broad_ combinators /// produce out-of-order diff --git a/src/core/utils/stream/cloned.rs b/src/core/utils/stream/cloned.rs index d6a0e647..b89e4695 100644 --- a/src/core/utils/stream/cloned.rs +++ b/src/core/utils/stream/cloned.rs @@ -1,6 +1,6 @@ use std::clone::Clone; -use futures::{stream::Map, Stream, StreamExt}; +use futures::{Stream, StreamExt, stream::Map}; pub trait Cloned<'a, T, S> where diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs index 3509bb83..ec572714 100644 --- a/src/core/utils/stream/expect.rs +++ b/src/core/utils/stream/expect.rs @@ -10,7 +10,7 @@ pub trait TryExpect<'a, Item> { impl<'a, T, Item> TryExpect<'a, Item> for T where - T: Stream> + TryStream + Send + 'a, + T: Stream> + Send + TryStream + 'a, Item: 'a, { #[inline] diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs index 9baa00f3..37c89d9a 100644 --- a/src/core/utils/stream/ignore.rs +++ b/src/core/utils/stream/ignore.rs @@ -1,4 +1,4 @@ -use futures::{future::ready, Stream, StreamExt, TryStream}; +use futures::{Stream, StreamExt, TryStream, future::ready}; use crate::{Error, Result}; diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs index 9077deac..e9a91b1c 100644 --- a/src/core/utils/stream/iter_stream.rs +++ b/src/core/utils/stream/iter_stream.rs @@ -1,7 +1,6 @@ use futures::{ - stream, + StreamExt, stream, stream::{Stream, TryStream}, - StreamExt, }; use crate::{Error, Result}; diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 23455322..a356f05f 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -14,8 +14,8 @@ mod try_wideband; mod wideband; pub use band::{ - automatic_amplification, automatic_width, set_amplification, set_width, AMPLIFICATION_LIMIT, - WIDTH_LIMIT, + AMPLIFICATION_LIMIT, WIDTH_LIMIT, automatic_amplification, automatic_width, + set_amplification, set_width, }; pub use broadband::BroadbandExt; pub use cloned::Cloned; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index d93187e9..be4d1b25 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{FutureExt, Ready, ready}, stream::{ All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, }, @@ -16,7 +16,7 @@ use futures::{ /// This interface is not necessarily complete; feel free to add as-needed. pub trait ReadyExt where - Self: Stream + Send + Sized, + Self: Stream + Sized, { fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> where @@ -26,6 +26,12 @@ where where F: Fn(Item) -> bool; + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send; + fn ready_filter<'a, F>( self, f: F, @@ -93,7 +99,7 @@ where impl ReadyExt for S where - S: Stream + Send + Sized, + S: Stream + Sized, { #[inline] fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> @@ -111,6 +117,19 @@ where self.any(move |t| ready(f(t))) } + #[inline] + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send, + { + self.ready_filter(f) + .take(1) + .into_future() + .map(|(curr, _next)| curr) + } + #[inline] fn ready_filter<'a, F>( self, diff --git a/src/core/utils/stream/try_parallel.rs b/src/core/utils/stream/try_parallel.rs index 7f8a63b1..60fef0ae 100644 --- a/src/core/utils/stream/try_parallel.rs +++ b/src/core/utils/stream/try_parallel.rs @@ -1,10 +1,10 @@ //! Parallelism stream combinator extensions to futures::Stream -use futures::{stream::TryStream, TryFutureExt}; +use futures::{TryFutureExt, stream::TryStream}; use tokio::{runtime, task::JoinError}; use super::TryBroadbandExt; -use crate::{utils::sys::available_parallelism, Error, Result}; +use crate::{Error, Result, utils::sys::available_parallelism}; /// Parallelism extensions to augment futures::StreamExt. These combinators are /// for computation-oriented workloads, unlike -band combinators for I/O diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 3261acb6..287fa1e1 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{Ready, ready}, stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt, TryTakeWhile}, }; @@ -13,8 +13,8 @@ use crate::Result; /// This interface is not necessarily complete; feel free to add as-needed. pub trait TryReadyExt where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn ready_and_then( self, @@ -67,8 +67,8 @@ where impl TryReadyExt for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn ready_and_then( diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs index 3ddce6ad..417806fc 100644 --- a/src/core/utils/stream/try_tools.rs +++ b/src/core/utils/stream/try_tools.rs @@ -1,15 +1,15 @@ //! TryStreamTools for futures::TryStream #![allow(clippy::type_complexity)] -use futures::{future, future::Ready, stream::TryTakeWhile, TryStream, TryStreamExt}; +use futures::{TryStream, TryStreamExt, future, future::Ready, stream::TryTakeWhile}; use crate::Result; /// TryStreamTools pub trait TryTools where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn try_take( self, @@ -23,8 +23,8 @@ where impl TryTools for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn try_take( diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs index a8560bb4..cbebf610 100644 --- a/src/core/utils/stream/wideband.rs +++ b/src/core/utils/stream/wideband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. wideband_ combinators /// produce in-order. diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index cc692c14..7d81903d 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -5,7 +5,7 @@ mod unquote; mod unquoted; pub use self::{between::Between, split::SplitInfallible, unquote::Unquote, unquoted::Unquoted}; -use crate::{utils::exchange, Result}; +use crate::{Result, utils::exchange}; pub const EMPTY: &str = ""; @@ -14,6 +14,7 @@ pub const EMPTY: &str = ""; /// returned otherwise the input (i.e. &'static str) is returned. If multiple /// arguments are provided the first is assumed to be a format string. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! format_maybe { ($s:literal $(,)?) => { if $crate::is_format!($s) { std::format!($s).into() } else { $s.into() } @@ -27,6 +28,7 @@ macro_rules! format_maybe { /// Constant expression to decide if a literal is a format string. Note: could /// use some improvement. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! is_format { ($s:literal) => { ::const_str::contains!($s, "{") && ::const_str::contains!($s, "}") @@ -60,6 +62,7 @@ pub fn camel_to_snake_string(s: &str) -> String { } #[inline] +#[allow(clippy::unbuffered_bytes)] // these are allocated string utilities, not file I/O utils pub fn camel_to_snake_case(output: &mut O, input: I) -> Result<()> where I: std::io::Read, diff --git a/src/core/utils/string/unquoted.rs b/src/core/utils/string/unquoted.rs index 5b002d99..88fa011f 100644 --- a/src/core/utils/string/unquoted.rs +++ b/src/core/utils/string/unquoted.rs @@ -1,9 +1,9 @@ use std::ops::Deref; -use serde::{de, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, de}; use super::Unquote; -use crate::{err, Result}; +use crate::{Result, err}; /// Unquoted string which deserialized from a quoted string. Construction from a /// &str is infallible such that the input can already be unquoted. Construction diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index a0d5be52..f795ccb8 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; pub use compute::available_parallelism; -use crate::{debug, Result}; +use crate::{Result, debug}; /// This is needed for opening lots of file descriptors, which tends to /// happen more often when using RocksDB and making lots of federation @@ -16,7 +16,7 @@ use crate::{debug, Result}; /// * #[cfg(unix)] pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { - use nix::sys::resource::{getrlimit, setrlimit, Resource::RLIMIT_NOFILE as NOFILE}; + use nix::sys::resource::{Resource::RLIMIT_NOFILE as NOFILE, getrlimit, setrlimit}; let (soft_limit, hard_limit) = getrlimit(NOFILE)?; if soft_limit < hard_limit { diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index ce2aa504..5274cd66 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -2,7 +2,7 @@ use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock}; -use crate::{is_equal_to, Result}; +use crate::{Result, is_equal_to}; type Id = usize; @@ -45,7 +45,7 @@ pub fn set_affinity(mut ids: I) where I: Iterator + Clone + Debug, { - use core_affinity::{set_each_for_current, set_for_current, CoreId}; + use core_affinity::{CoreId, set_each_for_current, set_for_current}; let n = ids.clone().count(); let mask: Mask = ids.clone().fold(0, |mask, id| { @@ -118,7 +118,7 @@ pub fn cores_available() -> impl Iterator { from_mask(*CORES_AVAILABL #[cfg(target_os = "linux")] #[inline] pub fn getcpu() -> Result { - use crate::{utils::math, Error}; + use crate::{Error, utils::math}; // SAFETY: This is part of an interface with many low-level calls taking many // raw params, but it's unclear why this specific call is unsafe. Nevertheless diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 25b17904..b71c3437 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -3,7 +3,7 @@ use std::{ ffi::OsStr, fs, - fs::{read_to_string, FileType}, + fs::{FileType, read_to_string}, iter::IntoIterator, path::{Path, PathBuf}, }; @@ -11,9 +11,9 @@ use std::{ use libc::dev_t; use crate::{ + Result, result::FlatOk, utils::{result::LogDebugErr, string::SplitInfallible}, - Result, }; /// Device characteristics useful for random access throughput @@ -117,16 +117,13 @@ pub fn name_from_path(path: &Path) -> Result { /// Get the (major, minor) of the block device on which Path is mounted. #[allow(clippy::useless_conversion, clippy::unnecessary_fallible_conversions)] -pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { +fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { #[cfg(target_family = "unix")] use std::os::unix::fs::MetadataExt; let stat = fs::metadata(path)?; let dev_id = stat.dev().try_into()?; - - // SAFETY: These functions may not need to be marked as unsafe. - // see: https://github.com/rust-lang/libc/issues/3759 - let (major, minor) = unsafe { (libc::major(dev_id), libc::minor(dev_id)) }; + let (major, minor) = (libc::major(dev_id), libc::minor(dev_id)); Ok((major.try_into()?, minor.try_into()?)) } diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index 1bcb92b8..05a0655b 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -241,7 +241,7 @@ fn set_intersection_sorted_all() { #[tokio::test] async fn set_intersection_sorted_stream2() { use futures::StreamExt; - use utils::{set::intersection_sorted_stream2, IterStream}; + use utils::{IterStream, set::intersection_sorted_stream2}; let a = ["bar"]; let b = ["bar", "foo"]; diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index 81fdda2a..73f73971 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -2,7 +2,7 @@ pub mod exponential_backoff; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use crate::{err, Result}; +use crate::{Result, err}; #[inline] #[must_use] diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 557c9a3e..55d4793f 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -17,24 +17,35 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", -] -jemalloc = [ - "rust-rocksdb/jemalloc", -] io_uring = [ "rust-rocksdb/io-uring", ] +jemalloc = [ + "conduwuit-core/jemalloc", + "rust-rocksdb/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] zstd_compression = [ + "conduwuit-core/zstd_compression", "rust-rocksdb/zstd", ] [dependencies] -arrayvec.workspace = true async-channel.workspace = true conduwuit-core.workspace = true const-str.workspace = true @@ -45,7 +56,6 @@ minicbor-serde.workspace = true rust-rocksdb.workspace = true serde.workspace = true serde_json.workspace = true -smallvec.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/src/database/benches.rs b/src/database/benches.rs new file mode 100644 index 00000000..56d1411c --- /dev/null +++ b/src/database/benches.rs @@ -0,0 +1,17 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn ser_str(b: &mut test::Bencher) { + use conduwuit::ruma::{RoomId, UserId}; + + use crate::ser::serialize_to_vec; + + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + b.iter(|| { + let key = (user_id, room_id); + let _s = serialize_to_vec(key).expect("failed to serialize user_id"); + }); +} diff --git a/src/database/de.rs b/src/database/de.rs index 7cc8f00a..849b3b2e 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,9 +1,9 @@ -use arrayvec::ArrayVec; -use conduwuit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; +use conduwuit::{ + Error, Result, arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, +}; use serde::{ - de, + Deserialize, de, de::{DeserializeSeed, Visitor}, - Deserialize, }; use crate::util::unhandled; @@ -22,7 +22,7 @@ pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where T: Deserialize<'a>, { - let mut deserializer = Deserializer { buf, pos: 0, seq: false }; + let mut deserializer = Deserializer { buf, pos: 0, rec: 0, seq: false }; T::deserialize(&mut deserializer).debug_inspect(|_| { deserializer @@ -35,6 +35,7 @@ where pub(crate) struct Deserializer<'de> { buf: &'de [u8], pos: usize, + rec: usize, seq: bool, } @@ -107,7 +108,7 @@ impl<'de> Deserializer<'de> { /// consumed None is returned instead. #[inline] fn record_peek_byte(&self) -> Option { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; let buf = &self.buf[self.pos..]; debug_assert!( !started || buf[0] == Self::SEP, @@ -121,13 +122,14 @@ impl<'de> Deserializer<'de> { /// the start of the next record. (Case for some sequences) #[inline] fn record_start(&mut self) { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; debug_assert!( !started || self.buf[self.pos] == Self::SEP, "Missing expected record separator at current position" ); self.inc_pos(started.into()); + self.inc_rec(1); } /// Consume all remaining bytes, which may include record separators, @@ -157,6 +159,9 @@ impl<'de> Deserializer<'de> { debug_assert!(self.pos <= self.buf.len(), "pos out of range"); } + #[inline] + fn inc_rec(&mut self, n: usize) { self.rec = self.rec.saturating_add(n); } + /// Unconsumed input bytes. #[inline] fn remaining(&self) -> Result { @@ -236,7 +241,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { | "Ignore" => self.record_ignore(), | "IgnoreAll" => self.record_ignore_all(), | _ => unhandled!("Unrecognized deserialization Directive {name:?}"), - }; + } visitor.visit_unit() } @@ -270,8 +275,16 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_option>(self, _visitor: V) -> Result { - unhandled!("deserialize Option not implemented") + fn deserialize_option>(self, visitor: V) -> Result { + if self + .buf + .get(self.pos) + .is_none_or(|b| *b == Deserializer::SEP) + { + visitor.visit_none() + } else { + visitor.visit_some(self) + } } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] diff --git a/src/database/engine.rs b/src/database/engine.rs index be3d62cf..38dd7512 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -12,31 +12,31 @@ mod repair; use std::{ ffi::CStr, sync::{ - atomic::{AtomicU32, Ordering}, Arc, + atomic::{AtomicU32, Ordering}, }, }; -use conduwuit::{debug, info, warn, Err, Result}; +use conduwuit::{Err, Result, debug, info, warn}; use rocksdb::{ AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded, WaitForCompactOptions, }; use crate::{ + Context, pool::Pool, util::{map_err, result}, - Context, }; pub struct Engine { + pub(crate) db: Db, + pub(crate) pool: Arc, + pub(crate) ctx: Arc, pub(super) read_only: bool, pub(super) secondary: bool, pub(crate) checksums: bool, corks: AtomicU32, - pub(crate) db: Db, - pub(crate) pool: Arc, - pub(crate) ctx: Arc, } pub(crate) type Db = DBWithThreadMode; diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index db718c2c..ac72e6d4 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -1,24 +1,16 @@ -use std::fmt::Write; +use std::{ffi::OsString, path::PathBuf}; -use conduwuit::{error, implement, info, utils::time::rfc2822_from_seconds, warn, Result}; +use conduwuit::{Err, Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use super::Engine; -use crate::{or_else, util::map_err}; +use crate::util::map_err; #[implement(Engine)] #[tracing::instrument(skip(self))] pub fn backup(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok(()); - } - - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?; - let mut engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)?; + let mut engine = self.backup_engine()?; + let config = &self.ctx.server.config; if config.database_backups_to_keep > 0 { let flush = !self.is_read_only(); engine @@ -40,34 +32,62 @@ pub fn backup(&self) -> Result { } } + if config.database_backups_to_keep == 0 { + warn!("Configuration item `database_backups_to_keep` is set to 0."); + } + Ok(()) } #[implement(Engine)] -pub fn backup_list(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok("Configure database_backup_path to enable backups, or the path specified is \ - not valid" - .to_owned()); +pub fn backup_list(&self) -> Result + Send> { + let info = self.backup_engine()?.get_backup_info(); + + if info.is_empty() { + return Err!("No backups found."); } - let mut res = String::new(); - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?; - let engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).or_else(or_else)?; - for info in engine.get_backup_info() { - writeln!( - res, + let list = info.into_iter().map(|info| { + format!( "#{} {}: {} bytes, {} files", info.backup_id, rfc2822_from_seconds(info.timestamp), info.size, info.num_files, - )?; + ) + }); + + Ok(list) +} + +#[implement(Engine)] +pub fn backup_count(&self) -> Result { + let info = self.backup_engine()?.get_backup_info(); + + Ok(info.len()) +} + +#[implement(Engine)] +fn backup_engine(&self) -> Result { + let path = self.backup_path()?; + let options = BackupEngineOptions::new(path).map_err(map_err)?; + BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err) +} + +#[implement(Engine)] +fn backup_path(&self) -> Result { + let path = self + .ctx + .server + .config + .database_backup_path + .clone() + .map(PathBuf::into_os_string) + .unwrap_or_default(); + + if path.is_empty() { + return Err!(Config("database_backup_path", "Configure path to enable backups")); } - Ok(res) + Ok(path) } diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 83bce08c..7ceec722 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,12 +1,12 @@ -use conduwuit::{err, utils::math::Expected, Config, Result}; +use conduwuit::{Config, Result, err, utils::math::Expected}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, - DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, - UniversalCompactOptions, UniversalCompactionStopStyle, + DBCompressionType as CompressionType, DataBlockIndexType, FifoCompactOptions, + LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, }; use super::descriptor::{CacheDisp, Descriptor}; -use crate::{util::map_err, Context}; +use crate::{Context, util::map_err}; pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; @@ -16,7 +16,7 @@ pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; pub(crate) fn cf_options(ctx: &Context, opts: Options, desc: &Descriptor) -> Result { let cache = get_cache(ctx, desc); let config = &ctx.server.config; - descriptor_cf_options(opts, desc.clone(), config, cache.as_ref()) + descriptor_cf_options(opts, *desc, config, cache.as_ref()) } fn descriptor_cf_options( @@ -46,6 +46,7 @@ fn descriptor_cf_options( opts.set_compaction_style(desc.compaction); opts.set_compaction_pri(desc.compaction_pri); opts.set_universal_compaction_options(&uc_options(&desc)); + opts.set_fifo_compaction_options(&fifo_options(&desc)); let compression_shape: Vec<_> = desc .compression_shape @@ -142,6 +143,13 @@ fn set_compression(desc: &mut Descriptor, config: &Config) { } } +fn fifo_options(desc: &Descriptor) -> FifoCompactOptions { + let mut opts = FifoCompactOptions::default(); + opts.set_max_table_files_size(desc.limit_size); + + opts +} + fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { let mut opts = UniversalCompactOptions::default(); opts.set_stop_style(UniversalCompactionStopStyle::Total); diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs index 04e08854..380e37af 100644 --- a/src/database/engine/context.rs +++ b/src/database/engine/context.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{debug, utils::math::usize_from_f64, Result, Server}; +use conduwuit::{Result, Server, debug, utils::math::usize_from_f64}; use rocksdb::{Cache, Env, LruCacheOptions}; use crate::{or_else, pool::Pool}; diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 01847257..18cec742 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,7 +1,7 @@ use std::{cmp, convert::TryFrom}; -use conduwuit::{utils, Config, Result}; -use rocksdb::{statistics::StatsLevel, Cache, DBRecoveryMode, Env, LogLevel, Options}; +use conduwuit::{Config, Result, utils}; +use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; @@ -29,9 +29,9 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul opts.set_max_file_opening_threads(0); // IO - opts.set_atomic_flush(true); opts.set_manual_wal_flush(true); - opts.set_enable_pipelined_write(false); + opts.set_atomic_flush(config.rocksdb_atomic_flush); + opts.set_enable_pipelined_write(!config.rocksdb_atomic_flush); if config.rocksdb_direct_io { opts.set_use_direct_reads(true); opts.set_use_direct_io_for_flush_and_compaction(true); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 934ef831..2274da9c 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -6,14 +6,8 @@ use rocksdb::{ use super::cf_opts::SENTINEL_COMPRESSION_LEVEL; +/// Column Descriptor #[derive(Debug, Clone, Copy)] -pub(crate) enum CacheDisp { - Unique, - Shared, - SharedWith(&'static str), -} - -#[derive(Debug, Clone)] pub(crate) struct Descriptor { pub(crate) name: &'static str, pub(crate) dropped: bool, @@ -30,6 +24,7 @@ pub(crate) struct Descriptor { pub(crate) file_shape: i32, pub(crate) level0_width: i32, pub(crate) merge_width: (i32, i32), + pub(crate) limit_size: u64, pub(crate) ttl: u64, pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, @@ -46,7 +41,16 @@ pub(crate) struct Descriptor { pub(crate) auto_readahead_max: usize, } -pub(crate) static BASE: Descriptor = Descriptor { +/// Cache Disposition +#[derive(Debug, Clone, Copy)] +pub(crate) enum CacheDisp { + Unique, + Shared, + SharedWith(&'static str), +} + +/// Base descriptor supplying common defaults to all derived descriptors. +static BASE: Descriptor = Descriptor { name: EMPTY, dropped: false, cache_disp: CacheDisp::Shared, @@ -62,6 +66,7 @@ pub(crate) static BASE: Descriptor = Descriptor { file_shape: 2, level0_width: 2, merge_width: (2, 16), + limit_size: 0, ttl: 60 * 60 * 24 * 21, compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, @@ -78,16 +83,21 @@ pub(crate) static BASE: Descriptor = Descriptor { auto_readahead_max: 1024 * 1024 * 2, }; +/// Tombstone descriptor for columns which have been or will be deleted. +pub(crate) static DROPPED: Descriptor = Descriptor { dropped: true, ..BASE }; + +/// Descriptor for large datasets with random updates across the keyspace. pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, cache_shards: 128, compression_level: -3, - bottommost_level: Some(-1), + bottommost_level: Some(2), compressed_index: true, ..BASE }; +/// Descriptor for large datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestLargestSeqFirst, write_size: 1024 * 1024 * 64, @@ -95,12 +105,13 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { file_size: 1024 * 1024 * 2, cache_shards: 128, compression_level: -2, - bottommost_level: Some(-1), + bottommost_level: Some(2), compression_shape: [0, 0, 1, 1, 1, 1, 1], compressed_index: false, ..BASE }; +/// Descriptor for small datasets with random updates across the keyspace. pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -117,6 +128,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { ..RANDOM }; +/// Descriptor for small datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -132,3 +144,14 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compressed_index: false, ..SEQUENTIAL }; + +/// Descriptor for small persistent caches with random updates. Oldest entries +/// are deleted after limit_size reached. +pub(crate) static RANDOM_SMALL_CACHE: Descriptor = Descriptor { + compaction: CompactionStyle::Fifo, + cache_disp: CacheDisp::Unique, + limit_size: 1024 * 1024 * 64, + ttl: 60 * 60 * 24 * 14, + file_shape: 2, + ..RANDOM_SMALL +}; diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs index f603c57b..1f38a63c 100644 --- a/src/database/engine/files.rs +++ b/src/database/engine/files.rs @@ -1,32 +1,15 @@ -use std::fmt::Write; - -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; +use rocksdb::LiveFile as SstFile; use super::Engine; +use crate::util::map_err; #[implement(Engine)] -pub fn file_list(&self) -> Result { - match self.db.live_files() { - | Err(e) => Ok(String::from(e)), - | Ok(mut files) => { - files.sort_by_key(|f| f.name.clone()); - let mut res = String::new(); - writeln!(res, "| lev | sst | keys | dels | size | column |")?; - writeln!(res, "| ---: | :--- | ---: | ---: | ---: | :--- |")?; - for file in files { - writeln!( - res, - "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", - file.level, - file.name, - file.num_entries, - file.num_deletions, - file.size, - file.column_family_name, - )?; - } - - Ok(res) - }, - } +pub fn file_list(&self) -> impl Iterator> + Send + use<> { + self.db + .live_files() + .map_err(map_err) + .into_iter() + .flat_map(Vec::into_iter) + .map(Ok) } diff --git a/src/database/engine/logger.rs b/src/database/engine/logger.rs index a1898e30..23e23fc7 100644 --- a/src/database/engine/logger.rs +++ b/src/database/engine/logger.rs @@ -18,5 +18,5 @@ pub(crate) fn handle(level: LogLevel, msg: &str) { | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), | LogLevel::Info => debug!("{msg}"), | LogLevel::Warn => warn!("{msg}"), - }; + } } diff --git a/src/database/engine/memory_usage.rs b/src/database/engine/memory_usage.rs index 01859815..9bb5c535 100644 --- a/src/database/engine/memory_usage.rs +++ b/src/database/engine/memory_usage.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use rocksdb::perf::get_memory_usage_stats; use super::Engine; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index ad724765..84e59a6a 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -1,20 +1,20 @@ use std::{ collections::BTreeSet, path::Path, - sync::{atomic::AtomicU32, Arc}, + sync::{Arc, atomic::AtomicU32}, }; -use conduwuit::{debug, implement, info, warn, Result}; +use conduwuit::{Result, debug, implement, info, warn}; use rocksdb::{ColumnFamilyDescriptor, Options}; use super::{ + Db, Engine, cf_opts::cf_options, db_opts::db_options, descriptor::{self, Descriptor}, repair::repair, - Db, Engine, }; -use crate::{or_else, Context}; +use crate::{Context, or_else}; #[implement(Engine)] #[tracing::instrument(skip_all)] @@ -56,13 +56,13 @@ pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result = desc .iter() - .cloned() + .copied() .chain(missing_descriptors) .map(|ref desc| cf_options(ctx, db_opts.clone(), desc)) .collect::>()?; diff --git a/src/database/engine/repair.rs b/src/database/engine/repair.rs index 61283904..aeec0caf 100644 --- a/src/database/engine/repair.rs +++ b/src/database/engine/repair.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use conduwuit::{info, warn, Err, Result}; +use conduwuit::{Err, Result, info, warn}; use rocksdb::Options; use super::Db; diff --git a/src/database/handle.rs b/src/database/handle.rs index 43b57839..484e5618 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -4,7 +4,7 @@ use conduwuit::Result; use rocksdb::DBPinnableSlice; use serde::{Deserialize, Serialize, Serializer}; -use crate::{keyval::deserialize_val, Deserialized, Slice}; +use crate::{Deserialized, Slice, keyval::deserialize_val}; pub struct Handle<'a> { val: DBPinnableSlice<'a>, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index 056e53d1..6059cd53 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,6 +1,5 @@ -use conduwuit::Result; +use conduwuit::{Result, smallvec::SmallVec}; use serde::{Deserialize, Serialize}; -use smallvec::SmallVec; use crate::{de, ser}; diff --git a/src/database/map.rs b/src/database/map.rs index 45139e18..ed38e1fc 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,3 +1,4 @@ +mod clear; pub mod compact; mod contains; mod count; @@ -40,28 +41,28 @@ pub(crate) use self::options::{ read_options_default, write_options_default, }; pub use self::{get_batch::Get, qry_batch::Qry}; -use crate::{watchers::Watchers, Engine}; +use crate::{Engine, watchers::Watchers}; pub struct Map { name: &'static str, - db: Arc, - cf: Arc, watchers: Watchers, - write_options: WriteOptions, + cf: Arc, + db: Arc, read_options: ReadOptions, cache_read_options: ReadOptions, + write_options: WriteOptions, } impl Map { pub(crate) fn open(db: &Arc, name: &'static str) -> Result> { Ok(Arc::new(Self { name, - db: db.clone(), - cf: open::open(db, name), watchers: Watchers::default(), - write_options: write_options_default(db), + cf: open::open(db, name), + db: db.clone(), read_options: read_options_default(db), cache_read_options: cache_read_options_default(db), + write_options: write_options_default(db), })) } diff --git a/src/database/map/clear.rs b/src/database/map/clear.rs new file mode 100644 index 00000000..321ec79c --- /dev/null +++ b/src/database/map/clear.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; + +use conduwuit::{ + Result, implement, + utils::stream::{ReadyExt, TryIgnore}, +}; +use futures::{Stream, TryStreamExt}; + +use crate::keyval::Key; + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// See for_clear() with additional details. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub async fn clear(self: &Arc) { + self.for_clear().ignore_err().ready_for_each(|_| ()).await; +} + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// Provides stream of keys undergoing deletion along with any errors. +/// +/// Note this operation applies to a snapshot of the data when invoked. +/// Additional data written during or after this call may be missed. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub fn for_clear(self: &Arc) -> impl Stream>> + Send { + self.raw_keys().inspect_ok(|key| self.remove(key)) +} diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs index c0381eb4..b49bf30b 100644 --- a/src/database/map/compact.rs +++ b/src/database/map/compact.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use rocksdb::{BottommostLevelCompaction, CompactOptions}; use crate::keyval::KeyBuf; @@ -52,7 +52,7 @@ pub fn compact_blocking(&self, opts: Options) -> Result { co.set_target_level(level.try_into()?); }, | (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"), - }; + } self.db .db diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 424f8970..474818e8 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + Result, + arrayvec::ArrayVec, err, implement, utils::{future::TryExtExt, result::FlatOk}, - Result, }; use futures::FutureExt; use serde::Serialize; @@ -16,7 +16,10 @@ use crate::{keyval::KeyBuf, ser}; /// - harder errors may not be reported #[inline] #[implement(super::Map)] -pub fn contains(self: &Arc, key: &K) -> impl Future + Send + '_ +pub fn contains( + self: &Arc, + key: &K, +) -> impl Future + Send + '_ + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -32,7 +35,7 @@ where pub fn acontains( self: &Arc, key: &K, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -49,7 +52,7 @@ pub fn bcontains( self: &Arc, key: &K, buf: &mut B, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, @@ -62,7 +65,10 @@ where /// - key is raw #[inline] #[implement(super::Map)] -pub fn exists<'a, K>(self: &'a Arc, key: &K) -> impl Future + Send + 'a +pub fn exists<'a, K>( + self: &'a Arc, + key: &K, +) -> impl Future + Send + 'a + use<'a, K> where K: AsRef<[u8]> + ?Sized + Debug + 'a, { diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 22b298b9..78f9e2e3 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -16,7 +16,10 @@ pub fn count(self: &Arc) -> impl Future + Send + '_ { /// - From is a structured key #[implement(super::Map)] #[inline] -pub fn count_from<'a, P>(self: &'a Arc, from: &P) -> impl Future + Send + 'a +pub fn count_from<'a, P>( + self: &'a Arc, + from: &P, +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { @@ -46,7 +49,7 @@ where pub fn count_prefix<'a, P>( self: &'a Arc, prefix: &P, -) -> impl Future + Send + 'a +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index d6c65be2..0971fb17 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,20 +1,23 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; -use futures::{future::ready, Future, FutureExt, TryFutureExt}; +use conduwuit::{Err, Result, err, implement, utils::result::MapExpect}; +use futures::{Future, FutureExt, TryFutureExt, future::ready}; use rocksdb::{DBPinnableSlice, ReadOptions}; use tokio::task; use crate::{ - util::{is_incomplete, map_err, or_else}, Handle, + util::{is_incomplete, map_err, or_else}, }; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] #[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn get(self: &Arc, key: &K) -> impl Future>> + Send +pub fn get( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: AsRef<[u8]> + Debug + ?Sized, { diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index ab9c1dc8..e23a8848 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,12 +1,11 @@ use std::{convert::AsRef, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; @@ -64,7 +63,7 @@ where pub(crate) fn get_batch_cached<'a, I, K>( &self, keys: I, -) -> impl Iterator>>> + Send +) -> impl Iterator>>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -78,7 +77,7 @@ where pub(crate) fn get_batch_blocking<'a, I, K>( &self, keys: I, -) -> impl Iterator>> + Send +) -> impl Iterator>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -92,7 +91,7 @@ fn get_batch_blocking_opts<'a, I, K>( &self, keys: I, read_options: &ReadOptions, -) -> impl Iterator>, rocksdb::Error>> + Send +) -> impl Iterator>, rocksdb::Error>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 68c305af..6f010097 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -5,8 +5,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use rocksdb::WriteBatchWithTransaction; use serde::Serialize; diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 2fe70f15..7ca932a5 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 76c76325..c9b1717a 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -25,7 +25,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn keys_from_raw

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn keys_from_raw

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -37,7 +40,7 @@ where pub fn keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -47,7 +50,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_keys_from

    (self: &Arc, from: &P) -> impl Stream>> + Send +pub fn raw_keys_from

    ( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 28bc7ccd..09dd79ac 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn keys_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/open.rs b/src/database/map/open.rs index 6ecec044..07f7a0c6 100644 --- a/src/database/map/open.rs +++ b/src/database/map/open.rs @@ -30,8 +30,5 @@ pub(super) fn open(db: &Arc, name: &str) -> Arc { // lifetime parameter. We should not hold this handle, even in its Arc, after // closing the database (dropping `Engine`). Since `Arc` is a sibling // member along with this handle in `Map`, that is prevented. - unsafe { - Arc::increment_strong_count(cf_ptr); - Arc::from_raw(cf_ptr) - } + unsafe { Arc::from_raw(cf_ptr) } } diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs index 401eba43..c6f13c0b 100644 --- a/src/database/map/qry.rs +++ b/src/database/map/qry.rs @@ -1,18 +1,20 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; -use arrayvec::ArrayVec; -use conduwuit::{implement, Result}; +use conduwuit::{Result, arrayvec::ArrayVec, implement}; use futures::Future; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is serialized into an allocated buffer to perform /// the query. #[implement(super::Map)] #[inline] -pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send +pub fn qry( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -28,7 +30,7 @@ where pub fn aqry( self: &Arc, key: &K, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -44,7 +46,7 @@ pub fn bqry( self: &Arc, key: &K, buf: &mut B, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs index 31817c48..e42d3e63 100644 --- a/src/database/map/qry_batch.rs +++ b/src/database/map/qry_batch.rs @@ -1,17 +1,16 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; pub trait Qry<'a, K, S> where @@ -51,7 +50,6 @@ where .iter() .map(ser::serialize_to::) .map(|result| result.expect("failed to serialize query key")) - .map(Into::into) .collect(); self.db diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index ec37bbfe..a7ae9133 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -1,7 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use serde::Serialize; use crate::{keyval::KeyBuf, ser, util::or_else}; diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 21558a17..c00f3e55 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 65072337..04e457dc 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::rev_stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn rev_keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -29,7 +29,7 @@ where pub fn rev_keys_from_raw

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -41,7 +41,7 @@ where pub fn rev_keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -55,7 +55,7 @@ where pub fn rev_raw_keys_from

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index fb29acaf..fbe9f9ca 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn rev_keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn rev_keys_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index f55053be..789a52e8 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; @@ -40,7 +40,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index ddc98607..a612d2a2 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, util::is_incomplete, }; @@ -20,7 +20,7 @@ use crate::{ pub fn rev_stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -39,7 +39,7 @@ where pub fn rev_stream_from_raw

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -55,7 +55,7 @@ where pub fn rev_stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -74,7 +74,7 @@ where pub fn rev_raw_stream_from

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { @@ -89,7 +89,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 22a2ce53..46dc9247 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn rev_stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn rev_stream_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index bfc8ba04..f7371b6c 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; @@ -39,7 +39,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 74140a65..ccf48db6 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, }; @@ -19,7 +19,7 @@ use crate::{ pub fn stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -37,7 +37,7 @@ where pub fn stream_from_raw

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -53,7 +53,7 @@ where pub fn stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -71,7 +71,7 @@ where pub fn raw_stream_from

    ( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { @@ -86,7 +86,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index adacfc81..a26478aa 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn stream_prefix_raw

    ( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/maps.rs b/src/database/maps.rs index 9ae5ab44..19f9ced4 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -3,8 +3,8 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::Result; use crate::{ - engine::descriptor::{self, CacheDisp, Descriptor}, Engine, Map, + engine::descriptor::{self, CacheDisp, Descriptor}, }; pub(super) type Maps = BTreeMap; @@ -121,14 +121,18 @@ pub(super) static MAPS: &[Descriptor] = &[ index_size: 512, ..descriptor::SEQUENTIAL }, - Descriptor { - name: "presenceid_presence", - ..descriptor::SEQUENTIAL_SMALL - }, Descriptor { name: "publicroomids", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "pushkey_deviceid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "presenceid_presence", + ..descriptor::SEQUENTIAL_SMALL + }, Descriptor { name: "readreceiptid_readreceipt", ..descriptor::RANDOM @@ -169,8 +173,11 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomsynctoken_shortstatehash", + file_shape: 3, val_size_hint: Some(8), block_size: 512, + compression_level: 3, + bottommost_level: Some(6), ..descriptor::SEQUENTIAL }, Descriptor { @@ -179,6 +186,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_invitecount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { @@ -191,10 +199,12 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_leftcount", + val_size_hint: Some(8), ..descriptor::RANDOM }, Descriptor { name: "roomuserid_knockedcount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { @@ -223,7 +233,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_destination", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servername_educount", @@ -231,7 +241,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_override", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servernameevent_data", diff --git a/src/database/mod.rs b/src/database/mod.rs index 4f8e2ad9..ffcefee9 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "3072"] + extern crate conduwuit_core as conduwuit; extern crate rust_rocksdb as rocksdb; @@ -5,6 +7,8 @@ conduwuit::mod_ctor! {} conduwuit::mod_dtor! {} conduwuit::rustc_flags_capture! {} +#[cfg(test)] +mod benches; mod cork; mod de; mod deserialized; @@ -23,18 +27,18 @@ mod watchers; use std::{ops::Index, sync::Arc}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; pub use self::{ de::{Ignore, IgnoreAll}, deserialized::Deserialized, handle::Handle, - keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::{compact, Get, Map, Qry}, - ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, + keyval::{KeyVal, Slice, serialize_key, serialize_val}, + map::{Get, Map, Qry, compact}, + ser::{Cbor, Interfix, Json, SEP, Separator, serialize, serialize_to, serialize_to_vec}, }; pub(crate) use self::{ - engine::{context::Context, Engine}, + engine::{Engine, context::Context}, util::or_else, }; use crate::maps::{Maps, MapsKey, MapsVal}; diff --git a/src/database/pool.rs b/src/database/pool.rs index 86516c31..0fa742d1 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -3,8 +3,8 @@ mod configure; use std::{ mem::take, sync::{ - atomic::{AtomicUsize, Ordering}, Arc, Mutex, + atomic::{AtomicUsize, Ordering}, }, thread, thread::JoinHandle, @@ -12,19 +12,18 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - debug, debug_warn, err, error, implement, - result::{DebugInspect, LogDebugErr}, + Error, Result, Server, debug, err, error, implement, + result::DebugInspect, + smallvec::SmallVec, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, - Error, Result, Server, }; -use futures::{channel::oneshot, TryFutureExt}; +use futures::{TryFutureExt, channel::oneshot}; use oneshot::Sender as ResultSender; use rocksdb::Direction; -use smallvec::SmallVec; use self::configure::configure; -use crate::{keyval::KeyBuf, stream, Handle, Map}; +use crate::{Handle, Map, keyval::KeyBuf, stream}; /// Frontend thread-pool. Operating system threads are used to make database /// requests which are not cached. These thread-blocking requests are offloaded @@ -147,11 +146,9 @@ pub(crate) fn close(&self) { .map(JoinHandle::join) .map(|result| result.map_err(Error::from_panic)) .enumerate() - .for_each(|(id, result)| { - match result { - | Ok(()) => trace!(?id, "worker joined"), - | Err(error) => error!(?id, "worker joined with error: {error}"), - }; + .for_each(|(id, result)| match result { + | Ok(()) => trace!(?id, "worker joined"), + | Err(error) => error!(?id, "worker joined with error: {error}"), }); } @@ -248,13 +245,6 @@ async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { self.queued_max.fetch_max(queue.len(), Ordering::Relaxed); } - if queue.is_full() { - debug_warn!( - capacity = ?queue.capacity(), - "pool queue is full" - ); - } - queue .send(cmd) .await @@ -290,9 +280,12 @@ fn worker_init(&self, id: usize) { // affinity is empty (no-op) if there's only one queue set_affinity(affinity.clone()); - #[cfg(feature = "jemalloc")] + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] if affinity.clone().count() == 1 && conduwuit::alloc::je::is_affine_arena() { - use conduwuit::alloc::je::this_thread::{arena_id, set_arena}; + use conduwuit::{ + alloc::je::this_thread::{arena_id, set_arena}, + result::LogDebugErr, + }; let id = affinity.clone().next().expect("at least one id"); @@ -343,7 +336,7 @@ fn worker_handle(self: &Arc, cmd: Cmd) { | Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd), | Cmd::Get(cmd) => self.handle_batch(cmd), | Cmd::Iter(cmd) => self.handle_iter(cmd), - }; + } } #[implement(Pool)] @@ -360,7 +353,7 @@ fn handle_iter(&self, mut cmd: Seek) { return; } - let from = cmd.key.as_deref().map(Into::into); + let from = cmd.key.as_deref(); let result = match cmd.dir { | Direction::Forward => cmd.state.init_fwd(from), @@ -392,7 +385,7 @@ fn handle_batch(self: &Arc, mut cmd: Get) { return; } - let keys = cmd.key.iter().map(Into::into); + let keys = cmd.key.iter(); let result: SmallVec<_> = cmd.map.get_batch_blocking(keys).collect(); diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index ff42ef51..92dda56e 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ - debug, debug_info, expected, is_equal_to, + Server, debug, debug_info, expected, is_equal_to, utils::{ math::usize_from_f64, result::LogDebugErr, @@ -9,7 +9,6 @@ use conduwuit::{ stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, sys::{compute::is_core_available, storage}, }, - Server, }; use super::{QUEUE_LIMIT, WORKER_LIMIT}; diff --git a/src/database/ser.rs b/src/database/ser.rs index 372b7522..2e1a2cb0 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,7 +1,7 @@ use std::io::Write; -use conduwuit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; -use serde::{ser, Deserialize, Serialize}; +use conduwuit::{Error, Result, debug::type_name, err, result::DebugInspect, utils::exchange}; +use serde::{Deserialize, Serialize, ser}; use crate::util::unhandled; @@ -224,7 +224,7 @@ impl ser::Serializer for &mut Serializer<'_, W> { self.separator()?; }, | _ => unhandled!("Unrecognized serialization directive: {name:?}"), - }; + } Ok(()) } diff --git a/src/database/stream.rs b/src/database/stream.rs index f3063bb3..eb264ccd 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -5,15 +5,15 @@ mod keys_rev; use std::sync::Arc; -use conduwuit::{utils::exchange, Result}; +use conduwuit::{Result, utils::exchange}; use rocksdb::{DBRawIteratorWithThreadMode, ReadOptions}; pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; use crate::{ + Map, Slice, engine::Db, keyval::{Key, KeyVal, Val}, util::{is_incomplete, map_err}, - Map, Slice, }; pub(crate) struct State<'a> { @@ -113,13 +113,13 @@ impl<'a> State<'a> { } #[inline] - fn fetch_key(&self) -> Option> { self.inner.key().map(Key::from) } + fn fetch_key(&self) -> Option> { self.inner.key() } #[inline] - fn _fetch_val(&self) -> Option> { self.inner.value().map(Val::from) } + fn _fetch_val(&self) -> Option> { self.inner.value() } #[inline] - fn fetch(&self) -> Option> { self.inner.item().map(KeyVal::from) } + fn fetch(&self) -> Option> { self.inner.item() } #[inline] pub(super) fn status(&self) -> Option { self.inner.status().err() } diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 8814419e..ede2b822 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct Items<'a> { diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index f6fcb0e5..dba8d16c 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct ItemsRev<'a> { diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index b953f51c..7c89869b 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct Keys<'a> { diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index acf78d88..51561e5c 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct KeysRev<'a> { diff --git a/src/database/tests.rs b/src/database/tests.rs index 2f143698..c1a9f47c 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -2,14 +2,15 @@ use std::fmt::Debug; -use arrayvec::ArrayVec; -use conduwuit::ruma::{serde::Raw, RoomId, UserId}; +use conduwuit::{ + arrayvec::ArrayVec, + ruma::{EventId, RoomId, UserId, serde::Raw}, +}; use serde::Serialize; use crate::{ - de, ser, - ser::{serialize_to_vec, Json}, - Ignore, Interfix, + Ignore, Interfix, de, ser, + ser::{Json, serialize_to_vec}, }; #[test] @@ -151,8 +152,8 @@ fn ser_json_macro() { let content = serde_json::to_value(content).expect("failed to serialize content"); let sender: &UserId = "@foo:example.com".try_into().unwrap(); let serialized = serialize_to_vec(Json(json!({ - "sender": sender, "content": content, + "sender": sender, }))) .expect("failed to serialize value"); @@ -324,8 +325,8 @@ fn ser_array() { assert_eq!(&s, &v, "vec serialization does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_array() { let a: u64 = 123_456; let b: u64 = 987_654; @@ -356,8 +357,8 @@ fn de_array() { assert_eq!(vec[1], b, "deserialized vec [1] does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_complex() { type Key<'a> = (&'a UserId, ArrayVec, &'a RoomId); @@ -389,3 +390,160 @@ fn de_complex() { assert_eq!(arr, key, "deserialization of serialization does not match"); } + +#[test] +fn serde_tuple_option_value_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (&RoomId, Option<&UserId>) = (room_id, Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.1, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_value_none() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + + let bb: (&RoomId, Option<&UserId>) = (room_id, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_none_value() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (None, user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_value() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&UserId>) = (Some(room_id), Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(bb.1, cc.1); +} + +#[test] +fn serde_tuple_option_none_none() { + let aa = vec![0xFF]; + + let bb: (Option<&RoomId>, Option<&UserId>) = (None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(None, cc.1); +} + +#[test] +fn serde_tuple_option_some_none_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + (Some(room_id), None, Some(user_id)); + + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(None, cc.1); + assert_eq!(bb.1, cc.1); + assert_eq!(bb.2, cc.2); +} + +#[test] +fn serde_tuple_option_none_none_none() { + let aa = vec![0xFF, 0xFF]; + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = (None, None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(bb, cc); +} diff --git a/src/database/watchers.rs b/src/database/watchers.rs index 9ce6f74c..b3907833 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, HashMap}, + collections::{HashMap, hash_map}, future::Future, pin::Pin, sync::RwLock, @@ -53,6 +53,6 @@ impl Watchers { tx.0.send(()).expect("channel should still be open"); } } - }; + } } } diff --git a/src/macros/admin.rs b/src/macros/admin.rs index e35bd586..fe227b43 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -1,14 +1,14 @@ use itertools::Itertools; use proc_macro::{Span, TokenStream}; use proc_macro2::TokenStream as TokenStream2; -use quote::{quote, ToTokens}; -use syn::{parse_quote, Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant}; +use quote::{ToTokens, quote}; +use syn::{Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant, parse_quote}; -use crate::{utils::camel_to_snake_string, Result}; +use crate::{Result, utils::camel_to_snake_string}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { - #[conduwuit_macros::implement(crate::Command, params = "<'_>")] + #[conduwuit_macros::implement(crate::Context, params = "<'_>")] }; item.attrs.push(attr); @@ -19,15 +19,16 @@ pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result = item.variants.iter().map(dispatch_arm).try_collect()?; let switch = quote! { + #[allow(clippy::large_stack_frames)] //TODO: fixme pub(super) async fn process( command: #name, - context: &crate::Command<'_> + context: &crate::Context<'_> ) -> Result { use #name::*; #[allow(non_snake_case)] - Ok(match command { + match command { #( #arm )* - }) + } } }; @@ -47,8 +48,7 @@ fn dispatch_arm(v: &Variant) -> Result { let arg = field.clone(); quote! { #name { #( #field ),* } => { - let c = Box::pin(context.#handler(#( #arg ),*)).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler(#( #arg ),*)).await }, } }, @@ -58,15 +58,14 @@ fn dispatch_arm(v: &Variant) -> Result { }; quote! { #name ( #field ) => { - Box::pin(#handler::process(#field, context)).await?; + Box::pin(#handler::process(#field, context)).await } } }, | Fields::Unit => { quote! { #name => { - let c = Box::pin(context.#handler()).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler()).await }, } }, diff --git a/src/macros/cargo.rs b/src/macros/cargo.rs index cd36658e..a452c672 100644 --- a/src/macros/cargo.rs +++ b/src/macros/cargo.rs @@ -4,7 +4,7 @@ use proc_macro::{Span, TokenStream}; use quote::quote; use syn::{Error, ItemConst, Meta}; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn manifest(item: ItemConst, args: &[Meta]) -> Result { let member = utils::get_named_string(args, "crate"); diff --git a/src/macros/config.rs b/src/macros/config.rs index 50feefa8..7b424325 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -2,15 +2,15 @@ use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _ use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{ - parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, - FieldsNamed, ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, + Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaList, + MetaNameValue, Type, TypePath, parse::Parser, punctuated::Punctuated, spanned::Spanned, }; use crate::{ - utils::{get_simple_settings, is_cargo_build, is_cargo_test}, Result, + utils::{get_simple_settings, is_cargo_build, is_cargo_test}, }; const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; @@ -205,7 +205,7 @@ fn get_default(field: &Field) -> Option { }, | Meta::Path { .. } => return Some("false".to_owned()), | _ => return None, - }; + } } None diff --git a/src/macros/implement.rs b/src/macros/implement.rs index 8d18f243..7acc12d2 100644 --- a/src/macros/implement.rs +++ b/src/macros/implement.rs @@ -3,7 +3,7 @@ use quote::quote; use syn::{Error, ItemFn, Meta, Path}; use utils::get_named_generics; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn implement(item: ItemFn, args: &[Meta]) -> Result { let generics = get_named_generics(args, "generics")?; diff --git a/src/macros/mod.rs b/src/macros/mod.rs index 1aa1e24f..31a797fe 100644 --- a/src/macros/mod.rs +++ b/src/macros/mod.rs @@ -9,8 +9,9 @@ mod utils; use proc_macro::TokenStream; use syn::{ + Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, parse::{Parse, Parser}, - parse_macro_input, Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, + parse_macro_input, }; pub(crate) type Result = std::result::Result; diff --git a/src/macros/refutable.rs b/src/macros/refutable.rs index 66e0ebc3..acfc4cd5 100644 --- a/src/macros/refutable.rs +++ b/src/macros/refutable.rs @@ -1,5 +1,5 @@ use proc_macro::{Span, TokenStream}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{FnArg::Typed, Ident, ItemFn, Meta, Pat, PatIdent, PatType, Stmt}; use crate::Result; @@ -20,7 +20,7 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result let variant = &pat.path; let fields = &pat.fields; - let Some(Typed(PatType { ref mut pat, .. })) = sig.inputs.get_mut(i) else { + let Some(Typed(PatType { pat, .. })) = sig.inputs.get_mut(i) else { continue; }; diff --git a/src/macros/utils.rs b/src/macros/utils.rs index af2519a7..a45e5ecc 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use syn::{parse_str, Expr, ExprLit, Generics, Lit, Meta, MetaNameValue}; +use syn::{Expr, ExprLit, Generics, Lit, Meta, MetaNameValue, parse_str}; use crate::Result; diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index f774c37a..0c5e2b6f 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -36,6 +36,7 @@ assets = [ [features] default = [ + "blurhashing", "brotli_compression", "element_hacks", "gzip_compression", @@ -49,6 +50,9 @@ default = [ "zstd_compression", ] +blurhashing = [ + "conduwuit-service/blurhashing", +] brotli_compression = [ "conduwuit-api/brotli_compression", "conduwuit-core/brotli_compression", @@ -67,6 +71,7 @@ element_hacks = [ ] gzip_compression = [ "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", "conduwuit-router/gzip_compression", "conduwuit-service/gzip_compression", ] @@ -138,6 +143,7 @@ zstd_compression = [ "conduwuit-core/zstd_compression", "conduwuit-database/zstd_compression", "conduwuit-router/zstd_compression", + "conduwuit-service/zstd_compression", ] conduwuit_mods = [ "conduwuit-core/conduwuit_mods", diff --git a/src/main/clap.rs b/src/main/clap.rs index 2bb6f3f2..707a1c76 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -3,16 +3,21 @@ use std::path::PathBuf; use clap::{ArgAction, Parser}; -use conduwuit::{ +use conduwuit_core::{ + Err, Result, config::{Figment, FigmentValue}, err, toml, utils::available_parallelism, - Err, Result, }; /// Commandline arguments #[derive(Parser, Debug)] -#[clap(version = conduwuit::version(), about, long_about = None, name = "conduwuit")] +#[clap( + about, + long_about = None, + name = "conduwuit", + version = conduwuit_core::version(), +)] pub(crate) struct Args { #[arg(short, long)] /// Path to the config TOML file (optional) @@ -22,6 +27,14 @@ pub(crate) struct Args { #[arg(long, short('O'))] pub(crate) option: Vec, + /// Run in a stricter read-only --maintenance mode. + #[arg(long)] + pub(crate) read_only: bool, + + /// Run in maintenance mode while refusing connections. + #[arg(long)] + pub(crate) maintenance: bool, + #[cfg(feature = "console")] /// Activate admin command console automatically after startup. #[arg(long, num_args(0))] @@ -116,6 +129,15 @@ pub(super) fn parse() -> Args { Args::parse() } /// Synthesize any command line options with configuration file options. pub(crate) fn update(mut config: Figment, args: &Args) -> Result { + if args.read_only { + config = config.join(("rocksdb_read_only", true)); + } + + if args.maintenance || args.read_only { + config = config.join(("startup_netburst", false)); + config = config.join(("listening", false)); + } + #[cfg(feature = "console")] // Indicate the admin console should be spawned automatically if the // configuration file hasn't already. diff --git a/src/main/logging.rs b/src/main/logging.rs index 85945e8a..eeeda127 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,13 +1,13 @@ use std::sync::Arc; -use conduwuit::{ +use conduwuit_core::{ + Result, config::Config, debug_warn, err, - log::{capture, fmt_span, ConsoleFormat, LogLevelReloadHandles}, + log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span}, result::UnwrapOrErr, - Result, }; -use tracing_subscriber::{fmt, layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; +use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload}; #[cfg(feature = "perf_measurements")] pub(crate) type TracingFlameGuard = @@ -30,7 +30,7 @@ pub(crate) fn init( .with_span_events(console_span_events) .event_format(ConsoleFormat::new(config)) .fmt_fields(ConsoleFormat::new(config)) - .map_writer(|w| w); + .with_writer(ConsoleWriter::new(config)); let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); diff --git a/src/main/main.rs b/src/main/main.rs index dacc2a2e..1a9d3fe4 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "49152"] //TODO: reduce me + pub(crate) mod clap; mod logging; mod mods; @@ -7,24 +9,21 @@ mod sentry; mod server; mod signal; -extern crate conduwuit_core as conduwuit; +use std::sync::{Arc, atomic::Ordering}; -use std::sync::{atomic::Ordering, Arc}; - -use conduwuit::{debug_info, error, rustc_flags_capture, Error, Result}; +use conduwuit_core::{Error, Result, debug_info, error, rustc_flags_capture}; use server::Server; rustc_flags_capture! {} -fn main() -> Result<(), Error> { +fn main() -> Result { let args = clap::parse(); let runtime = runtime::new(&args)?; let server = Server::new(&args, Some(runtime.handle()))?; + runtime.spawn(signal::signal(server.clone())); runtime.block_on(async_main(&server))?; - - // explicit drop here to trace thread and tls dtors - drop(runtime); + runtime::shutdown(&server, runtime); #[cfg(unix)] if server.server.restarting.load(Ordering::Acquire) { diff --git a/src/main/mods.rs b/src/main/mods.rs index 9ab36e6c..d585a381 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -6,16 +6,16 @@ extern crate conduwuit_service; use std::{ future::Future, pin::Pin, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; -use conduwuit::{debug, error, mods, Error, Result}; +use conduwuit_core::{Error, Result, debug, error, mods}; use conduwuit_service::Services; use crate::Server; type StartFuncResult = Pin>> + Send>>; -type StartFuncProto = fn(&Arc) -> StartFuncResult; +type StartFuncProto = fn(&Arc) -> StartFuncResult; type RunFuncResult = Pin> + Send>>; type RunFuncProto = fn(&Arc) -> RunFuncResult; @@ -34,8 +34,8 @@ const MODULE_NAMES: &[&str] = &[ ]; #[cfg(panic_trap)] -conduwuit::mod_init! {{ - conduwuit::debug::set_panic_trap(); +conduwuit_core::mod_init! {{ + conduwuit_core::debug::set_panic_trap(); }} pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, bool), Error> { diff --git a/src/main/restart.rs b/src/main/restart.rs index e6f45b82..b9d1dc94 100644 --- a/src/main/restart.rs +++ b/src/main/restart.rs @@ -2,7 +2,7 @@ use std::{env, os::unix::process::CommandExt, process::Command}; -use conduwuit::{debug, info, utils}; +use conduwuit_core::{debug, info, utils}; #[cold] pub(super) fn restart() -> ! { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 9f4f60f8..1c58ea81 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,30 +1,29 @@ use std::{ iter::once, sync::{ + Arc, OnceLock, atomic::{AtomicUsize, Ordering}, - OnceLock, }, thread, time::Duration, }; -use conduwuit::{ - is_true, - result::LogDebugErr, - utils::{ - available_parallelism, - sys::compute::{nth_core_available, set_affinity}, - }, - Result, +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] +use conduwuit_core::result::LogDebugErr; +use conduwuit_core::{ + Result, debug, is_true, + utils::sys::compute::{nth_core_available, set_affinity}, }; use tokio::runtime::Builder; -use crate::clap::Args; +use crate::{clap::Args, server::Server}; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +const SHUTDOWN_TIMEOUT: Duration = Duration::from_millis(10000); +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] const DISABLE_MUZZY_THRESHOLD: usize = 4; static WORKER_AFFINITY: OnceLock = OnceLock::new(); @@ -63,6 +62,8 @@ pub(super) fn new(args: &Args) -> Result { #[cfg(tokio_unstable)] builder .on_task_spawn(task_spawn) + .on_before_task_poll(task_enter) + .on_after_task_poll(task_leave) .on_task_terminate(task_terminate); #[cfg(tokio_unstable)] @@ -83,6 +84,42 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { .metrics_poll_time_histogram_configuration(linear); } +#[cfg(tokio_unstable)] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { + use conduwuit_core::event; + use tracing::Level; + + // The final metrics output is promoted to INFO when tokio_unstable is active in + // a release/bench mode and DEBUG is likely optimized out + const LEVEL: Level = if cfg!(debug_assertions) { + Level::DEBUG + } else { + Level::INFO + }; + + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default(); + + event!(LEVEL, ?runtime_metrics, "Final runtime metrics"); +} + +#[cfg(not(tokio_unstable))] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); +} + #[tracing::instrument( name = "fork", level = "debug", @@ -122,9 +159,9 @@ fn set_worker_affinity() { set_worker_mallctl(id); } -#[cfg(feature = "jemalloc")] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] fn set_worker_mallctl(id: usize) { - use conduwuit::alloc::je::{ + use conduwuit_core::alloc::je::{ is_affine_arena, this_thread::{set_arena, set_muzzy_decay}, }; @@ -137,13 +174,14 @@ fn set_worker_mallctl(id: usize) { .get() .expect("GC_MUZZY initialized by runtime::new()"); - let muzzy_auto_disable = available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + let muzzy_auto_disable = + conduwuit_core::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { set_muzzy_decay(-1).log_debug_err().ok(); } } -#[cfg(not(feature = "jemalloc"))] +#[cfg(any(not(feature = "jemalloc"), target_env = "msvc"))] fn set_worker_mallctl(_: usize) {} #[tracing::instrument( @@ -189,8 +227,8 @@ fn thread_park() { } fn gc_on_park() { - #[cfg(feature = "jemalloc")] - conduwuit::alloc::je::this_thread::decay() + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] + conduwuit_core::alloc::je::this_thread::decay() .log_debug_err() .ok(); } @@ -216,3 +254,25 @@ fn task_spawn(meta: &tokio::runtime::TaskMeta<'_>) {} ), )] fn task_terminate(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "enter", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_enter(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "leave", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_leave(meta: &tokio::runtime::TaskMeta<'_>) {} diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 02835ec8..68f12eb7 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -5,13 +5,13 @@ use std::{ sync::{Arc, OnceLock}, }; -use conduwuit::{config::Config, debug, trace}; +use conduwuit_core::{config::Config, debug, trace}; use sentry::{ - types::{ - protocol::v7::{Context, Event}, - Dsn, - }, Breadcrumb, ClientOptions, Level, + types::{ + Dsn, + protocol::v7::{Context, Event}, + }, }; static SEND_PANIC: OnceLock = OnceLock::new(); @@ -43,7 +43,7 @@ fn options(config: &Config) -> ClientOptions { traces_sample_rate: config.sentry_traces_sample_rate, debug: cfg!(debug_assertions), release: sentry::release_name!(), - user_agent: conduwuit::version::user_agent().into(), + user_agent: conduwuit_core::version::user_agent().into(), attach_stacktrace: config.sentry_attach_stacktrace, before_send: Some(Arc::new(before_send)), before_breadcrumb: Some(Arc::new(before_breadcrumb)), diff --git a/src/main/server.rs b/src/main/server.rs index 7376b2fc..8f697ca4 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,11 +1,11 @@ use std::{path::PathBuf, sync::Arc}; -use conduwuit::{ +use conduwuit_core::{ + Error, Result, config::Config, info, log::Log, utils::{stream, sys}, - Error, Result, }; use tokio::{runtime, sync::Mutex}; @@ -14,7 +14,7 @@ use crate::{clap::Args, logging::TracingFlameGuard}; /// Server runtime state; complete pub(crate) struct Server { /// Server runtime state; public portion - pub(crate) server: Arc, + pub(crate) server: Arc, pub(crate) services: Mutex>>, @@ -25,7 +25,7 @@ pub(crate) struct Server { #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] // Module instances; TODO: move to mods::loaded mgmt vector - pub(crate) mods: tokio::sync::RwLock>, + pub(crate) mods: tokio::sync::RwLock>, } impl Server { @@ -66,11 +66,11 @@ impl Server { database_path = ?config.database_path, log_levels = %config.log, "{}", - conduwuit::version(), + conduwuit_core::version(), ); Ok(Arc::new(Self { - server: Arc::new(conduwuit::Server::new(config, runtime.cloned(), Log { + server: Arc::new(conduwuit_core::Server::new(config, runtime.cloned(), Log { reload: tracing_reload_handle, capture, })), diff --git a/src/main/signal.rs b/src/main/signal.rs index 343b95c9..a5d07774 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug_error, trace, warn}; +use conduwuit_core::{debug_error, trace, warn}; use tokio::signal; use super::server::Server; diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 51e15aed..e4ddcb9b 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -17,34 +17,79 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-admin/brotli_compression", + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "tower-http/compression-br", +] +direct_tls = [ + "axum-server/tls-rustls", + "dep:rustls", + "dep:axum-server-dual-protocol", +] +gzip_compression = [ + "conduwuit-admin/gzip_compression", + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", + "tower-http/compression-gzip", +] +io_uring = [ + "conduwuit-admin/io_uring", + "conduwuit-api/io_uring", + "conduwuit-service/io_uring", + "conduwuit-api/io_uring", +] +jemalloc = [ + "conduwuit-admin/jemalloc", + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-admin/jemalloc_conf", + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-admin/jemalloc_prof", + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-admin/jemalloc_stats", + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-admin/release_max_log_level", + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] sentry_telemetry = [ + "conduwuit-core/sentry_telemetry", "dep:sentry", "dep:sentry-tracing", "dep:sentry-tower", ] -zstd_compression = [ - "tower-http/compression-zstd", -] -gzip_compression = [ - "tower-http/compression-gzip", -] -brotli_compression = [ - "tower-http/compression-br", -] systemd = [ "dep:sd-notify", ] - -direct_tls = [ - "axum-server/tls-rustls", - "dep:rustls", - "dep:axum-server-dual-protocol", +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "tower-http/compression-zstd", ] [dependencies] @@ -69,11 +114,11 @@ ruma.workspace = true rustls.workspace = true rustls.optional = true sentry.optional = true +sentry.workspace = true sentry-tower.optional = true sentry-tower.workspace = true sentry-tracing.optional = true sentry-tracing.workspace = true -sentry.workspace = true serde_json.workspace = true tokio.workspace = true tower.workspace = true diff --git a/src/router/layers.rs b/src/router/layers.rs index 96bca4fd..6920555d 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -1,16 +1,16 @@ use std::{any::Any, sync::Arc, time::Duration}; use axum::{ - extract::{DefaultBodyLimit, MatchedPath}, Router, + extract::{DefaultBodyLimit, MatchedPath}, }; use axum_client_ip::SecureClientIpSource; -use conduwuit::{error, Result, Server}; +use conduwuit::{Result, Server, debug, error}; use conduwuit_api::router::state::Guard; use conduwuit_service::Services; use http::{ - header::{self, HeaderName}, HeaderValue, Method, StatusCode, + header::{self, HeaderName}, }; use tower::ServiceBuilder; use tower_http::{ @@ -18,6 +18,7 @@ use tower_http::{ cors::{self, CorsLayer}, sensitive_headers::SetSensitiveHeadersLayer, set_header::SetResponseHeaderLayer, + timeout::{RequestBodyTimeoutLayer, ResponseBodyTimeoutLayer, TimeoutLayer}, trace::{DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, TraceLayer}, }; use tracing::Level; @@ -48,9 +49,9 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ))] let layers = layers.layer(compression_layer(server)); + let services_ = services.clone(); let layers = layers .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) - .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::spawn)) .layer( TraceLayer::new_for_http() .make_span_with(tracing_span::<_>) @@ -60,6 +61,13 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) .layer(SecureClientIpSource::ConnectInfo.into_extension()) + .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_response_timeout, + ))) + .layer(RequestBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_receive_timeout, + ))) + .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) .layer(SetResponseHeaderLayer::if_not_present( HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster HeaderValue::from_static("?1"), @@ -86,7 +94,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { )) .layer(cors_layer(server)) .layer(body_limit_layer(server)) - .layer(CatchPanicLayer::custom(catch_panic)); + .layer(CatchPanicLayer::custom(move |panic| catch_panic(panic, services_.clone()))); let (router, guard) = router::build(services); Ok((router.layer(layers), guard)) @@ -164,21 +172,20 @@ fn body_limit_layer(server: &Server) -> DefaultBodyLimit { #[allow(clippy::needless_pass_by_value)] fn catch_panic( err: Box, + services: Arc, ) -> http::Response> { - //TODO: XXX - /* - conduwuit_service::services() - .server - .metrics - .requests_panic - .fetch_add(1, std::sync::atomic::Ordering::Release); - */ - let details = if let Some(s) = err.downcast_ref::() { - s.clone() - } else if let Some(s) = err.downcast_ref::<&str>() { - (*s).to_owned() - } else { - "Unknown internal server error occurred.".to_owned() + services + .server + .metrics + .requests_panic + .fetch_add(1, std::sync::atomic::Ordering::Release); + + let details = match err.downcast_ref::() { + | Some(s) => s.clone(), + | _ => match err.downcast_ref::<&str>() { + | Some(s) => (*s).to_owned(), + | _ => "Unknown internal server error occurred.".to_owned(), + }, }; error!("{details:#}"); @@ -196,20 +203,26 @@ fn catch_panic( } fn tracing_span(request: &http::Request) -> tracing::Span { - let path = request.extensions().get::().map_or_else( - || { - request - .uri() - .path_and_query() - .expect("all requests have a path") - .as_str() - }, - truncated_matched_path, - ); + let path = request + .extensions() + .get::() + .map_or_else(|| request_path_str(request), truncated_matched_path); - let method = request.method(); + tracing::span! { + parent: None, + debug::INFO_SPAN_LEVEL, + "router", + method = %request.method(), + %path, + } +} - tracing::debug_span!(parent: None, "router", %method, %path) +fn request_path_str(request: &http::Request) -> &str { + request + .uri() + .path_and_query() + .expect("all requests have a path") + .as_str() } fn truncated_matched_path(path: &MatchedPath) -> &str { diff --git a/src/router/mod.rs b/src/router/mod.rs index f64dcb67..7038c5df 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "32768"] //TODO: reduce me + mod layers; mod request; mod router; diff --git a/src/router/request.rs b/src/router/request.rs index ca063338..dba90324 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,87 +1,27 @@ -use std::sync::{atomic::Ordering, Arc}; +use std::{ + fmt::Debug, + sync::{Arc, atomic::Ordering}, + time::Duration, +}; use axum::{ extract::State, response::{IntoResponse, Response}, }; -use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; +use conduwuit::{Result, debug, debug_error, debug_warn, err, error, trace}; use conduwuit_service::Services; +use futures::FutureExt; use http::{Method, StatusCode, Uri}; +use tokio::time::sleep; +use tracing::Span; -#[tracing::instrument( - parent = None, - level = "trace", - skip_all, - fields( - handled = %services - .server - .metrics - .requests_spawn_finished - .fetch_add(1, Ordering::Relaxed), - active = %services - .server - .metrics - .requests_spawn_active - .fetch_add(1, Ordering::Relaxed), - ) -)] -pub(crate) async fn spawn( - State(services): State>, - req: http::Request, - next: axum::middleware::Next, -) -> Result { - let server = &services.server; - - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = server - .metrics - .requests_spawn_active - .fetch_sub(1, Ordering::Relaxed); - }}; - - if !server.running() { - debug_warn!("unavailable pending shutdown"); - return Err(StatusCode::SERVICE_UNAVAILABLE); - } - - let fut = next.run(req); - let task = server.runtime().spawn(fut); - task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) -} - -#[tracing::instrument( - level = "debug", - skip_all, - fields( - handled = %services - .server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed), - active = %services - .server - .metrics - .requests_handle_active - .fetch_add(1, Ordering::Relaxed), - ) -)] +#[tracing::instrument(name = "request", level = "debug", skip_all)] pub(crate) async fn handle( State(services): State>, req: http::Request, next: axum::middleware::Next, ) -> Result { - let server = &services.server; - - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = server - .metrics - .requests_handle_active - .fetch_sub(1, Ordering::Relaxed); - }}; - - if !server.running() { + if !services.server.running() { debug_warn!( method = %req.method(), uri = %req.uri(), @@ -93,14 +33,74 @@ pub(crate) async fn handle( let uri = req.uri().clone(); let method = req.method().clone(); - let result = next.run(req).await; - handle_result(&method, &uri, result) + let services_ = services.clone(); + let parent = Span::current(); + let task = services.server.runtime().spawn(async move { + tokio::select! { + response = execute(&services_, req, next, &parent) => response, + response = services_.server.until_shutdown() + .then(|()| { + let timeout = services_.server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); + sleep(timeout) + }) + .map(|()| StatusCode::SERVICE_UNAVAILABLE) + .map(IntoResponse::into_response) => response, + } + }); + + task.await + .map_err(unhandled) + .and_then(move |result| handle_result(&method, &uri, result)) +} + +#[tracing::instrument( + name = "handle", + level = "debug", + parent = parent, + skip_all, + fields( + active = %services + .server + .metrics + .requests_handle_active + .fetch_add(1, Ordering::Relaxed), + handled = %services + .server + .metrics + .requests_handle_finished + .load(Ordering::Relaxed), + ) +)] +async fn execute( + // we made a safety contract that Services will not go out of scope + // during the request; this ensures a reference is accounted for at + // the base frame of the task regardless of its detachment. + services: &Arc, + req: http::Request, + next: axum::middleware::Next, + parent: &Span, +) -> Response { + #[cfg(debug_assertions)] + conduwuit::defer! {{ + _ = services.server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed); + _ = services.server + .metrics + .requests_handle_active + .fetch_sub(1, Ordering::Relaxed); + }}; + + next.run(req).await } fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { let status = result.status(); let reason = status.canonical_reason().unwrap_or("Unknown Reason"); let code = status.as_u16(); + if status.is_server_error() { error!(method = ?method, uri = ?uri, "{code} {reason}"); } else if status.is_client_error() { @@ -117,3 +117,10 @@ fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result(e: Error) -> StatusCode { + error!("unhandled error or panic during request: {e:?}"); + + StatusCode::INTERNAL_SERVER_ERROR +} diff --git a/src/router/router.rs b/src/router/router.rs index b3531418..0f95b924 100644 --- a/src/router/router.rs +++ b/src/router/router.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use axum::{response::IntoResponse, routing::get, Router}; +use axum::{Router, response::IntoResponse, routing::get}; use conduwuit::Error; use conduwuit_api::router::{state, state::Guard}; use conduwuit_service::Services; diff --git a/src/router/run.rs b/src/router/run.rs index ea8a7666..ff54594f 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -3,12 +3,12 @@ extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; use std::{ - sync::{atomic::Ordering, Arc, Weak}, + sync::{Arc, Weak, atomic::Ordering}, time::Duration, }; use axum_server::Handle as ServerHandle; -use conduwuit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; +use conduwuit::{Error, Result, Server, debug, debug_error, debug_info, error, info}; use futures::FutureExt; use service::Services; use tokio::{ @@ -77,6 +77,10 @@ pub(crate) async fn start(server: Arc) -> Result> { pub(crate) async fn stop(services: Arc) -> Result<()> { debug!("Shutting down..."); + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); + // Wait for all completions before dropping or we'll lose them to the module // unload and explode. services.stop().await; @@ -100,10 +104,6 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - info!("Shutdown complete."); Ok(()) } @@ -122,10 +122,10 @@ async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_serve error!("failed sending shutdown transaction to channel: {e}"); } - let timeout = Duration::from_secs(36); + let timeout = server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); debug!( ?timeout, - spawn_active = ?server.metrics.requests_spawn_active.load(Ordering::Relaxed), handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), "Notifying for graceful shutdown" ); diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index 5c822f2b..2399edf0 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -6,7 +6,7 @@ mod unix; use std::sync::Arc; use axum_server::Handle as ServerHandle; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use conduwuit_service::Services; use tokio::sync::broadcast; diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 0e971f3c..6db7e138 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -1,11 +1,11 @@ use std::{ net::SocketAddr, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::Router; -use axum_server::{bind, Handle as ServerHandle}; -use conduwuit::{debug_info, info, Result, Server}; +use axum_server::{Handle as ServerHandle, bind}; +use conduwuit::{Result, Server, debug_info, info}; use tokio::task::JoinSet; pub(super) async fn serve( @@ -24,27 +24,20 @@ pub(super) async fn serve( info!("Listening on {addrs:?}"); while join_set.join_next().await.is_some() {} - let spawn_active = server.metrics.requests_spawn_active.load(Ordering::Relaxed); let handle_active = server .metrics .requests_handle_active .load(Ordering::Relaxed); debug_info!( - spawn_finished = server - .metrics - .requests_spawn_finished - .load(Ordering::Relaxed), handle_finished = server .metrics .requests_handle_finished .load(Ordering::Relaxed), panics = server.metrics.requests_panic.load(Ordering::Relaxed), - spawn_active, handle_active, "Stopped listening on {addrs:?}", ); - debug_assert!(spawn_active == 0, "active request tasks are not joined"); debug_assert!(handle_active == 0, "active request handles still pending"); Ok(()) diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 9d3fbd3b..20b58601 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -3,10 +3,10 @@ use std::{net::SocketAddr, sync::Arc}; use axum::Router; use axum_server::Handle as ServerHandle; use axum_server_dual_protocol::{ - axum_server::{bind_rustls, tls_rustls::RustlsConfig}, ServerExt, + axum_server::{bind_rustls, tls_rustls::RustlsConfig}, }; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; @@ -17,14 +17,13 @@ pub(super) async fn serve( addrs: Vec, ) -> Result { let tls = &server.config.tls; - let certs = tls - .certs - .as_ref() - .ok_or(err!(Config("tls.certs", "Missing required value in tls config section")))?; + let certs = tls.certs.as_ref().ok_or_else(|| { + err!(Config("tls.certs", "Missing required value in tls config section")) + })?; let key = tls .key .as_ref() - .ok_or(err!(Config("tls.key", "Missing required value in tls config section")))?; + .ok_or_else(|| err!(Config("tls.key", "Missing required value in tls config section")))?; // we use ring for ruma and hashing state, but aws-lc-rs is the new default. // without this, TLS mode will panic. @@ -32,12 +31,14 @@ pub(super) async fn serve( .install_default() .expect("failed to initialise aws-lc-rs rustls crypto provider"); - debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); info!( "Note: It is strongly recommended that you use a reverse proxy instead of running \ conduwuit directly with TLS." ); - let conf = RustlsConfig::from_pem_file(certs, key).await?; + debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); + let conf = RustlsConfig::from_pem_file(certs, key) + .await + .map_err(|e| err!(Config("tls", "Failed to load certificates or key: {e}")))?; let mut join_set = JoinSet::new(); let app = app.into_make_service_with_connect_info::(); diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 6855b34c..2af17274 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -4,15 +4,15 @@ use std::{ net::{self, IpAddr, Ipv4Addr}, os::fd::AsRawFd, path::Path, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::{ - extract::{connect_info::IntoMakeServiceWithConnectInfo, Request}, Router, + extract::{Request, connect_info::IntoMakeServiceWithConnectInfo}, }; use conduwuit::{ - debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server, + Err, Result, Server, debug, debug_error, info, result::UnwrapInfallible, trace, warn, }; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ @@ -21,10 +21,10 @@ use hyper_util::{ }; use tokio::{ fs, - net::{unix::SocketAddr, UnixListener, UnixStream}, + net::{UnixListener, UnixStream, unix::SocketAddr}, sync::broadcast::{self}, task::JoinSet, - time::{sleep, Duration}, + time::{Duration, sleep}, }; use tower::{Service, ServiceExt}; @@ -159,7 +159,12 @@ async fn fini(server: &Arc, listener: UnixListener, mut tasks: JoinSet<( drop(listener); debug!("Waiting for requests to finish..."); - while server.metrics.requests_spawn_active.load(Ordering::Relaxed) > 0 { + while server + .metrics + .requests_handle_active + .load(Ordering::Relaxed) + .gt(&0) + { tokio::select! { task = tasks.join_next() => if task.is_none() { break; }, () = sleep(FINI_POLL_INTERVAL) => {}, diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index c4f75453..8b0d1405 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -17,7 +17,12 @@ crate-type = [ ] [features] +blurhashing = [ + "dep:image", + "dep:blurhash", +] brotli_compression = [ + "conduwuit-core/brotli_compression", "reqwest/brotli", ] console = [ @@ -26,27 +31,50 @@ console = [ ] element_hacks = [] gzip_compression = [ + "conduwuit-core/gzip_compression", "reqwest/gzip", ] +io_uring = [ + "conduwuit-database/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", +] media_thumbnail = [ "dep:image", ] release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", "log/max_level_trace", "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", ] url_preview = [ "dep:image", "dep:webpage", ] zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", "reqwest/zstd", ] [dependencies] -arrayvec.workspace = true async-trait.workspace = true base64.workspace = true bytes.workspace = true @@ -74,7 +102,6 @@ serde_json.workspace = true serde.workspace = true serde_yaml.workspace = true sha2.workspace = true -smallvec.workspace = true termimad.workspace = true termimad.optional = true tokio.workspace = true @@ -82,6 +109,8 @@ tracing.workspace = true url.workspace = true webpage.workspace = true webpage.optional = true +blurhash.workspace = true +blurhash.optional = true [lints] workspace = true diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index ddbc15a4..453051be 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,23 +1,22 @@ use std::sync::Arc; use conduwuit::{ - err, implement, - utils::{result::LogErr, stream::TryIgnore, ReadyExt}, - Err, Result, + Err, Result, err, implement, + utils::{ReadyExt, result::LogErr, stream::TryIgnore}, }; -use database::{Deserialized, Handle, Interfix, Json, Map}; +use database::{Deserialized, Handle, Ignore, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + RoomId, UserId, events::{ AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { services: Services, @@ -131,18 +130,20 @@ pub fn changes_since<'a>( room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64, + to: Option, ) -> impl Stream + Send + 'a { - let prefix = (room_id, user_id, Interfix); - let prefix = database::serialize_key(prefix).expect("failed to serialize prefix"); + type Key<'a> = (Option<&'a RoomId>, &'a UserId, u64, Ignore); // Skip the data that's exactly at since, because we sent that last time let first_possible = (room_id, user_id, since.saturating_add(1)); self.db .roomuserdataid_accountdata - .stream_from_raw(&first_possible) + .stream_from(&first_possible) .ignore_err() - .ready_take_while(move |(k, _)| k.starts_with(&prefix)) + .ready_take_while(move |((room_id_, user_id_, count, _), _): &(Key<'_>, _)| { + room_id == *room_id_ && user_id == *user_id_ && to.is_none_or(|to| *count <= to) + }) .map(move |(_, v)| { match room_id { | Some(_) => serde_json::from_slice::>(v) diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index de201f4b..02f41303 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -1,17 +1,18 @@ #![cfg(feature = "console")] + use std::{ collections::VecDeque, sync::{Arc, Mutex}, }; -use conduwuit::{debug, defer, error, log, Server}; +use conduwuit::{Server, debug, defer, error, log, log::is_systemd_mode}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; use termimad::MadSkin; use tokio::task::JoinHandle; -use crate::{admin, Dep}; +use crate::{Dep, admin}; pub struct Console { server: Arc, @@ -123,7 +124,7 @@ impl Console { } async fn readline(self: &Arc) -> Result { - let _suppression = log::Suppress::new(&self.server); + let _suppression = (!is_systemd_mode()).then(|| log::Suppress::new(&self.server)); let (mut readline, _writer) = Readline::new(PROMPT.to_owned())?; let self_ = Arc::clone(self); @@ -220,7 +221,7 @@ pub fn print(markdown: &str) { } fn configure_output_err(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(196), Color::AnsiValue(234)); output.inline_code = code_style.clone(); @@ -235,7 +236,7 @@ fn configure_output_err(mut output: MadSkin) -> MadSkin { } fn configure_output(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(40), Color::AnsiValue(234)); output.inline_code = code_style.clone(); diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7b691fb1..cd0fc5a9 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,7 +1,9 @@ use std::collections::BTreeMap; -use conduwuit::{pdu::PduBuilder, Result}; +use conduwuit::{Result, pdu::PduBuilder}; +use futures::FutureExt; use ruma::{ + RoomId, RoomVersionId, events::room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -14,18 +16,17 @@ use ruma::{ preview_url::RoomPreviewUrlsEventContent, topic::RoomTopicEventContent, }, - RoomId, RoomVersionId, }; use crate::Services; /// Create the admin room. /// -/// Users in this room are considered admins by conduit, and the room can be +/// Users in this room are considered admins by conduwuit, and the room can be /// used to issue admin commands by talking to the server user inside it. -pub async fn create_admin_room(services: &Services) -> Result<()> { +pub async fn create_admin_room(services: &Services) -> Result { let room_id = RoomId::new(services.globals.server_name()); - let room_version = &services.server.config.default_room_version; + let room_version = &services.config.default_room_version; let _short_id = services .rooms @@ -36,14 +37,14 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { let state_lock = services.rooms.state.mutex.lock(&room_id).await; // Create a user for the server - let server_user = &services.globals.server_user; + let server_user = services.globals.server_user.as_ref(); services.users.create(server_user, None)?; let create_content = { use RoomVersionId::*; match room_version { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => - RoomCreateEventContent::new_v1(server_user.clone()), + RoomCreateEventContent::new_v1(server_user.into()), | _ => RoomCreateEventContent::new_v11(), } }; @@ -63,6 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; // 2. Make server user/bot join @@ -71,17 +73,18 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state( - server_user.to_string(), + String::from(server_user), &RoomMemberEventContent::new(MembershipState::Join), ), server_user, &room_id, &state_lock, ) + .boxed() .await?; // 3. Power levels - let users = BTreeMap::from_iter([(server_user.clone(), 100.into())]); + let users = BTreeMap::from_iter([(server_user.into(), 69420.into())]); services .rooms @@ -95,6 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; // 4.1 Join Rules @@ -107,6 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; // 4.2 History Visibility @@ -122,6 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; // 4.3 Guest Access @@ -137,10 +143,11 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; // 5. Events implied by name and topic - let room_name = format!("{} Admin Room", services.globals.server_name()); + let room_name = format!("{} Admin Room", services.config.server_name); services .rooms .timeline @@ -150,6 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; services @@ -157,12 +165,13 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state(String::new(), &RoomTopicEventContent { - topic: format!("Manage {}", services.globals.server_name()), + topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name), }), server_user, &room_id, &state_lock, ) + .boxed() .await?; // 6. Room alias @@ -180,6 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; services @@ -187,7 +197,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .alias .set_alias(alias, &room_id, server_user)?; - // 7. (ad-hoc) Disable room previews for everyone by default + // 7. (ad-hoc) Disable room URL previews for everyone by default services .rooms .timeline @@ -197,6 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { &room_id, &state_lock, ) + .boxed() .await?; Ok(()) diff --git a/src/service/admin/execute.rs b/src/service/admin/execute.rs index 462681da..174b28ed 100644 --- a/src/service/admin/execute.rs +++ b/src/service/admin/execute.rs @@ -1,6 +1,6 @@ -use conduwuit::{debug, debug_info, error, implement, info, Err, Result}; +use conduwuit::{Err, Result, debug, debug_info, error, implement, info}; use ruma::events::room::message::RoomMessageEventContent; -use tokio::time::{sleep, Duration}; +use tokio::time::{Duration, sleep}; pub(super) const SIGNAL: &str = "SIGUSR2"; diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 3ad9283f..2d90ea52 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,74 +1,115 @@ use std::collections::BTreeMap; -use conduwuit::{error, implement, Result}; +use conduwuit::{Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder}; use ruma::{ + RoomId, UserId, events::{ + RoomAccountDataEventType, StateEventType, room::{ member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, }, - RoomId, UserId, }; -use crate::pdu::PduBuilder; - /// Invite the user to the conduwuit admin room. /// /// This is equivalent to granting server admin privileges. #[implement(super::Service)] -pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { +pub async fn make_user_admin(&self, user_id: &UserId) -> Result { let Ok(room_id) = self.get_admin_room().await else { + debug_warn!( + "make_user_admin was called without an admin room being available or created" + ); return Ok(()); }; let state_lock = self.services.state.mutex.lock(&room_id).await; + if self.services.state_cache.is_joined(user_id, &room_id).await { + return Err!(debug_warn!("User is already joined in the admin room")); + } + if self + .services + .state_cache + .is_invited(user_id, &room_id) + .await + { + return Err!(debug_warn!("User is already pending an invitation to the admin room")); + } + // Use the server user to grant the new admin's power level - let server_user = &self.services.globals.server_user; + let server_user = self.services.globals.server_user.as_ref(); - // Invite and join the real user - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - server_user, + // if this is our local user, just forcefully join them in the room. otherwise, + // invite the remote user. + if self.services.globals.user_is_local(user_id) { + debug_info!("Inviting local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + + debug_info!("Force joining local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Join), + ), + user_id, + &room_id, + &state_lock, + ) + .await?; + } else { + debug_info!("Inviting remote user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + } + + // Set power levels + let mut room_power_levels = self + .services + .state_accessor + .room_state_get_content::( &room_id, - &state_lock, + &StateEventType::RoomPowerLevels, + "", ) - .await?; - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Join), - ), - user_id, - &room_id, - &state_lock, - ) - .await?; + .await + .unwrap_or_default(); - // Set power level - let users = BTreeMap::from_iter([ - (server_user.clone(), 100.into()), - (user_id.to_owned(), 100.into()), - ]); + room_power_levels + .users + .insert(server_user.into(), 69420.into()); + room_power_levels.users.insert(user_id.into(), 100.into()); self.services .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { - users, - ..Default::default() - }), + PduBuilder::state(String::new(), &room_power_levels), server_user, &room_id, &state_lock, @@ -76,15 +117,17 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { .await?; // Set room tag - let room_tag = &self.services.server.config.admin_room_tag; + let room_tag = self.services.server.config.admin_room_tag.as_str(); if !room_tag.is_empty() { if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag).await { - error!(?room_id, ?user_id, ?room_tag, ?e, "Failed to set tag for admin grant"); + error!(?room_id, ?user_id, ?room_tag, "Failed to set tag for admin grant: {e}"); } } if self.services.server.config.admin_room_notices { - let welcome_message = String::from("## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`"); + let welcome_message = String::from( + "## Thank you for trying out Continuwuity!\n\nContinuwuity is a hard fork of conduwuit, which is also a hard fork of Conduit, currently in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. Continuwuity is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> Source code: https://forgejo.ellis.link/continuwuation/continuwuity\n> Documentation: https://continuwuity.org/\n> Report issues: https://forgejo.ellis.link/continuwuation/continuwuity/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nContinuwuity space: `/join #space:continuwuity.org`\nContinuwuity main room (Ask questions and get notified on updates): `/join #continuwuity:continuwuity.org`\nContinuwuity offtopic room: `/join #offtopic:continuwuity.org`", + ); // Send welcome message self.services @@ -102,7 +145,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { } #[implement(super::Service)] -async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result<()> { +async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result { let mut event = self .services .account_data @@ -125,7 +168,5 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R RoomAccountDataEventType::Tag, &serde_json::to_value(event)?, ) - .await?; - - Ok(()) + .await } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 31b046b7..b3466711 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -11,18 +11,18 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server, + Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; pub use create::create_admin_room; use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ - events::room::message::{Relation, RoomMessageEventContent}, OwnedEventId, OwnedRoomId, RoomId, UserId, + events::room::message::{Relation, RoomMessageEventContent}, }; use tokio::sync::RwLock; -use crate::{account_data, globals, rooms, rooms::state::RoomMutexGuard, Dep}; +use crate::{Dep, account_data, globals, rooms, rooms::state::RoomMutexGuard}; pub struct Service { services: Services, @@ -40,6 +40,7 @@ struct Services { timeline: Dep, state: Dep, state_cache: Dep, + state_accessor: Dep, account_data: Dep, services: StdRwLock>>, } @@ -85,6 +86,8 @@ impl crate::Service for Service { timeline: args.depend::("rooms::timeline"), state: args.depend::("rooms::state"), state_cache: args.depend::("rooms::state_cache"), + state_accessor: args + .depend::("rooms::state_accessor"), account_data: args.depend::("account_data"), services: None.into(), }, @@ -357,8 +360,8 @@ impl Service { } // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let emergency_password_set = self.services.globals.emergency_password().is_some(); + // the administrator can execute commands as the server user + let emergency_password_set = self.services.server.config.emergency_password.is_some(); let from_server = pdu.sender == *server_user && !emergency_password_set; if from_server && self.is_admin_room(&pdu.room_id).await { return false; diff --git a/src/service/announcements/mod.rs b/src/service/announcements/mod.rs new file mode 100644 index 00000000..4df8971b --- /dev/null +++ b/src/service/announcements/mod.rs @@ -0,0 +1,169 @@ +//! # Announcements service +//! +//! This service is responsible for checking for announcements and sending them +//! to the client. +//! +//! It is used to send announcements to the admin room and logs. +//! Annuncements are stored in /docs/static/announcements right now. +//! The highest seen announcement id is stored in the database. When the +//! announcement check is run, all announcements with an ID higher than those +//! seen before are printed to the console and sent to the admin room. +//! +//! Old announcements should be deleted to avoid spamming the room on first +//! install. +//! +//! Announcements are displayed as markdown in the admin room, but plain text in +//! the console. + +use std::{sync::Arc, time::Duration}; + +use async_trait::async_trait; +use conduwuit::{Result, Server, debug, info, warn}; +use database::{Deserialized, Map}; +use ruma::events::room::message::RoomMessageEventContent; +use serde::Deserialize; +use tokio::{ + sync::Notify, + time::{MissedTickBehavior, interval}, +}; + +use crate::{Dep, admin, client, globals}; + +pub struct Service { + interval: Duration, + interrupt: Notify, + db: Arc, + services: Services, +} + +struct Services { + admin: Dep, + client: Dep, + globals: Dep, + server: Arc, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponse { + announcements: Vec, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponseEntry { + id: u64, + date: Option, + message: String, +} + +const CHECK_FOR_ANNOUNCEMENTS_URL: &str = + "https://continuwuity.org/.well-known/continuwuity/announcements"; +const CHECK_FOR_ANNOUNCEMENTS_INTERVAL: u64 = 7200; // 2 hours +const LAST_CHECK_FOR_ANNOUNCEMENTS_ID: &[u8; 25] = b"last_seen_announcement_id"; +// In conduwuit, this was under b"a" + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + interval: Duration::from_secs(CHECK_FOR_ANNOUNCEMENTS_INTERVAL), + interrupt: Notify::new(), + db: args.db["global"].clone(), + services: Services { + globals: args.depend::("globals"), + admin: args.depend::("admin"), + client: args.depend::("client"), + server: args.server.clone(), + }, + })) + } + + #[tracing::instrument(skip_all, name = "announcements", level = "debug")] + async fn worker(self: Arc) -> Result<()> { + if !self.services.globals.allow_announcements_check() { + debug!("Disabling announcements check"); + return Ok(()); + } + + let mut i = interval(self.interval); + i.set_missed_tick_behavior(MissedTickBehavior::Delay); + i.reset_after(self.interval); + loop { + tokio::select! { + () = self.interrupt.notified() => break, + _ = i.tick() => (), + } + + if let Err(e) = self.check().await { + warn!(%e, "Failed to check for announcements"); + } + } + + Ok(()) + } + + fn interrupt(&self) { self.interrupt.notify_waiters(); } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +impl Service { + #[tracing::instrument(skip_all)] + async fn check(&self) -> Result<()> { + debug_assert!(self.services.server.running(), "server must not be shutting down"); + + let response = self + .services + .client + .default + .get(CHECK_FOR_ANNOUNCEMENTS_URL) + .send() + .await? + .text() + .await?; + + let response = serde_json::from_str::(&response)?; + for announcement in &response.announcements { + if announcement.id > self.last_check_for_announcements_id().await { + self.handle(announcement).await; + self.update_check_for_announcements_id(announcement.id); + } + } + + Ok(()) + } + + #[tracing::instrument(skip_all)] + async fn handle(&self, announcement: &CheckForAnnouncementsResponseEntry) { + if let Some(date) = &announcement.date { + info!("[announcements] {date} {:#}", announcement.message); + } else { + info!("[announcements] {:#}", announcement.message); + } + + self.services + .admin + .send_message(RoomMessageEventContent::text_markdown(format!( + "### New announcement{}\n\n{}", + announcement + .date + .as_ref() + .map_or_else(String::new, |date| format!(" - `{date}`")), + announcement.message + ))) + .await + .ok(); + } + + #[inline] + pub fn update_check_for_announcements_id(&self, id: u64) { + self.db.raw_put(LAST_CHECK_FOR_ANNOUNCEMENTS_ID, id); + } + + pub async fn last_check_for_announcements_id(&self) -> u64 { + self.db + .get(LAST_CHECK_FOR_ANNOUNCEMENTS_ID) + .await + .deserialized() + .unwrap_or(0_u64) + } +} diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 2a54ee09..7be8a471 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,20 +1,20 @@ mod namespace_regex; mod registration_info; -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc}; use async_trait::async_trait; -use conduwuit::{err, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, err, utils::stream::IterStream}; use database::Map; -use futures::{Future, StreamExt, TryStreamExt}; -use ruma::{api::appservice::Registration, RoomAliasId, RoomId, UserId}; -use tokio::sync::RwLock; +use futures::{Future, FutureExt, Stream, TryStreamExt}; +use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; +use tokio::sync::{RwLock, RwLockReadGuard}; pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; -use crate::{sending, Dep}; +use crate::{Dep, sending}; pub struct Service { - registration_info: RwLock>, + registration_info: RwLock, services: Services, db: Data, } @@ -27,6 +27,8 @@ struct Data { id_appserviceregistrations: Arc, } +type Registrations = BTreeMap; + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -41,19 +43,18 @@ impl crate::Service for Service { })) } - async fn worker(self: Arc) -> Result<()> { + async fn worker(self: Arc) -> Result { // Inserting registrations into cache - for appservice in self.iter_db_ids().await? { - self.registration_info.write().await.insert( - appservice.0, - appservice - .1 - .try_into() - .expect("Should be validated on registration"), - ); - } + self.iter_db_ids() + .try_for_each(async |appservice| { + self.registration_info + .write() + .await + .insert(appservice.0, appservice.1.try_into()?); - Ok(()) + Ok(()) + }) + .await } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } @@ -84,13 +85,13 @@ impl Service { /// # Arguments /// /// * `service_name` - the registration ID of the appservice - pub async fn unregister_appservice(&self, appservice_id: &str) -> Result<()> { + pub async fn unregister_appservice(&self, appservice_id: &str) -> Result { // removes the appservice registration info self.registration_info .write() .await .remove(appservice_id) - .ok_or(err!("Appservice not found"))?; + .ok_or_else(|| err!("Appservice not found"))?; // remove the appservice from the database self.db.id_appserviceregistrations.del(appservice_id); @@ -112,15 +113,6 @@ impl Service { .map(|info| info.registration) } - pub async fn iter_ids(&self) -> Vec { - self.registration_info - .read() - .await - .keys() - .cloned() - .collect() - } - pub async fn find_from_token(&self, token: &str) -> Option { self.read() .await @@ -156,15 +148,22 @@ impl Service { .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) } - pub fn read( - &self, - ) -> impl Future>> - { - self.registration_info.read() + pub fn iter_ids(&self) -> impl Stream + Send { + self.read() + .map(|info| info.keys().cloned().collect::>()) + .map(IntoIterator::into_iter) + .map(IterStream::stream) + .flatten_stream() } - #[inline] - pub async fn all(&self) -> Result> { self.iter_db_ids().await } + pub fn iter_db_ids(&self) -> impl Stream> + Send { + self.db + .id_appserviceregistrations + .keys() + .and_then(move |id: &str| async move { + Ok((id.to_owned(), self.get_db_registration(id).await?)) + }) + } pub async fn get_db_registration(&self, id: &str) -> Result { self.db @@ -175,16 +174,7 @@ impl Service { .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) } - async fn iter_db_ids(&self) -> Result> { - self.db - .id_appserviceregistrations - .keys() - .ignore_err() - .then(|id: String| async move { - let reg = self.get_db_registration(&id).await?; - Ok((id, reg)) - }) - .try_collect() - .await + pub fn read(&self) -> impl Future> + Send { + self.registration_info.read() } } diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs index 9758e186..a511f58d 100644 --- a/src/service/appservice/registration_info.rs +++ b/src/service/appservice/registration_info.rs @@ -1,5 +1,5 @@ use conduwuit::Result; -use ruma::{api::appservice::Registration, UserId}; +use ruma::{UserId, api::appservice::Registration}; use super::NamespaceRegex; diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index f63d78b8..1aeeb492 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use conduwuit::{err, implement, trace, Config, Result}; +use conduwuit::{Config, Result, err, implement, trace}; use either::Either; use ipaddress::IPAddress; use reqwest::redirect; @@ -56,7 +56,7 @@ impl crate::Service for Service { .build()?, well_known: base(config)? - .dns_resolver(resolver.resolver.hooked.clone()) + .dns_resolver(resolver.resolver.clone()) .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) .read_timeout(Duration::from_secs(config.well_known_timeout)) .timeout(Duration::from_secs(config.well_known_timeout)) @@ -128,7 +128,8 @@ fn base(config: &Config) -> Result { .pool_max_idle_per_host(config.request_idle_per_host.into()) .user_agent(conduwuit::version::user_agent()) .redirect(redirect::Policy::limited(6)) - .connection_verbose(true); + .danger_accept_invalid_certs(config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure) + .connection_verbose(cfg!(debug_assertions)); #[cfg(feature = "gzip_compression")] { @@ -172,10 +173,9 @@ fn base(config: &Config) -> Result { builder = builder.no_zstd(); }; - if let Some(proxy) = config.proxy.to_proxy()? { - Ok(builder.proxy(proxy)) - } else { - Ok(builder) + match config.proxy.to_proxy()? { + | Some(proxy) => Ok(builder.proxy(proxy)), + | _ => Ok(builder), } } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index 8bd09a52..fd0d8764 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -2,8 +2,9 @@ use std::{iter, ops::Deref, path::Path, sync::Arc}; use async_trait::async_trait; use conduwuit::{ - config::{check, Config}, - error, implement, Result, Server, + Result, Server, + config::{Config, check}, + error, implement, }; pub struct Service { @@ -43,7 +44,15 @@ impl Deref for Service { #[implement(Service)] fn handle_reload(&self) -> Result { if self.server.config.config_reload_signal { + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) + .expect("failed to notify systemd of reloading state"); + self.reload(iter::empty())?; + + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) + .expect("failed to notify systemd of ready state"); } Ok(()) diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 9b2e4025..3a61f710 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -1,15 +1,15 @@ use std::sync::Arc; use async_trait::async_trait; -use conduwuit::{error, warn, Result}; +use conduwuit::{Result, error, warn}; use ruma::{ events::{ - push_rules::PushRulesEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, + GlobalAccountDataEvent, GlobalAccountDataEventType, push_rules::PushRulesEventContent, }, push::Ruleset, }; -use crate::{account_data, globals, users, Dep}; +use crate::{Dep, account_data, config, globals, users}; pub struct Service { services: Services, @@ -17,6 +17,7 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, users: Dep, } @@ -27,6 +28,8 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), + globals: args.depend::("globals"), users: args.depend::("users"), }, @@ -54,9 +57,9 @@ impl Service { self.services .users - .set_password(server_user, self.services.globals.emergency_password().as_deref())?; + .set_password(server_user, self.services.config.emergency_password.as_deref())?; - let (ruleset, pwd_set) = match self.services.globals.emergency_password() { + let (ruleset, pwd_set) = match self.services.config.emergency_password { | Some(_) => (Ruleset::server_default(server_user), true), | None => (Ruleset::new(), false), }; diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 3146bb8a..1d1d1154 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -2,20 +2,19 @@ use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, error::inspect_debug_log, - implement, trace, utils::string::EMPTY, Err, Error, Result, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, + error::inspect_debug_log, implement, trace, utils::string::EMPTY, }; -use http::{header::AUTHORIZATION, HeaderValue}; +use http::{HeaderValue, header::AUTHORIZATION}; use ipaddress::IPAddress; use reqwest::{Client, Method, Request, Response, Url}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, api::{ - client::error::Error as RumaError, EndpointError, IncomingResponse, MatrixVersion, - OutgoingRequest, SendAccessToken, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::error::Error as RumaError, federation::authentication::XMatrix, }, serde::Base64, - server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, }; use crate::resolver::actual::ActualDest; @@ -65,13 +64,7 @@ where return Err!(Config("allow_federation", "Federation is disabled.")); } - if self - .services - .server - .config - .forbidden_remote_server_names - .contains(dest) - { + if self.services.moderation.is_remote_server_forbidden(dest) { return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs index dacdb20e..15521875 100644 --- a/src/service/federation/mod.rs +++ b/src/service/federation/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use conduwuit::{Result, Server}; -use crate::{client, resolver, server_keys, Dep}; +use crate::{Dep, client, moderation, resolver, server_keys}; pub struct Service { services: Services, @@ -15,6 +15,7 @@ struct Services { client: Dep, resolver: Dep, server_keys: Dep, + moderation: Dep, } impl crate::Service for Service { @@ -25,6 +26,7 @@ impl crate::Service for Service { client: args.depend::("client"), resolver: args.depend::("resolver"), server_keys: args.depend::("server_keys"), + moderation: args.depend::("moderation"), }, })) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 07b4ac2c..21c09252 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,6 +1,6 @@ use std::sync::{Arc, RwLock}; -use conduwuit::{utils, Result}; +use conduwuit::{Result, utils}; use database::{Database, Deserialized, Map}; pub struct Data { @@ -69,17 +69,7 @@ impl Data { } #[inline] - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { + pub fn bump_database_version(&self, new_version: u64) { self.global.raw_put(b"version", new_version); - Ok(()) } - - #[inline] - pub fn backup(&self) -> Result { self.db.db.backup() } - - #[inline] - pub fn backup_list(&self) -> Result { self.db.db.backup_list() } - - #[inline] - pub fn file_list(&self) -> Result { self.db.db.file_list() } } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 485d5020..a23a4c21 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,8 @@ use std::{ time::Instant, }; -use conduwuit::{error, utils::bytes::pretty, Result, Server}; +use async_trait::async_trait; +use conduwuit::{Result, Server, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; @@ -27,6 +28,7 @@ pub struct Service { type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let db = Data::new(&args); @@ -73,7 +75,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read()?.iter().fold( (0_usize, 0_usize), |(mut count, mut bytes), (event_id, _)| { @@ -89,7 +91,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { + async fn clear_cache(&self) { self.bad_event_ratelimiter .write() .expect("locked for writing") @@ -109,20 +111,6 @@ impl Service { #[inline] pub fn server_name(&self) -> &ServerName { self.server.name.as_ref() } - pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } - - pub fn allow_guest_registration(&self) -> bool { self.server.config.allow_guest_registration } - - pub fn allow_guests_auto_join_rooms(&self) -> bool { - self.server.config.allow_guests_auto_join_rooms - } - - pub fn log_guest_registrations(&self) -> bool { self.server.config.log_guest_registrations } - - pub fn allow_encryption(&self) -> bool { self.server.config.allow_encryption } - - pub fn allow_federation(&self) -> bool { self.server.config.allow_federation } - pub fn allow_public_room_directory_over_federation(&self) -> bool { self.server .config @@ -139,7 +127,9 @@ impl Service { &self.server.config.new_user_displayname_suffix } - pub fn allow_check_for_updates(&self) -> bool { self.server.config.allow_check_for_updates } + pub fn allow_announcements_check(&self) -> bool { + self.server.config.allow_announcements_check + } pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.server.config.trusted_servers } @@ -153,8 +143,6 @@ impl Service { pub fn notification_push_path(&self) -> &String { &self.server.config.notification_push_path } - pub fn emergency_password(&self) -> &Option { &self.server.config.emergency_password } - pub fn url_preview_domain_contains_allowlist(&self) -> &Vec { &self.server.config.url_preview_domain_contains_allowlist } @@ -183,22 +171,6 @@ impl Service { pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } - pub fn allow_local_presence(&self) -> bool { self.server.config.allow_local_presence } - - pub fn allow_incoming_presence(&self) -> bool { self.server.config.allow_incoming_presence } - - pub fn allow_outgoing_presence(&self) -> bool { self.server.config.allow_outgoing_presence } - - pub fn allow_incoming_read_receipts(&self) -> bool { - self.server.config.allow_incoming_read_receipts - } - - pub fn allow_outgoing_read_receipts(&self) -> bool { - self.server.config.allow_outgoing_read_receipts - } - - pub fn block_non_admin_invites(&self) -> bool { self.server.config.block_non_admin_invites } - /// checks if `user_id` is local to us via server_name comparison #[inline] pub fn user_is_local(&self, user_id: &UserId) -> bool { diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 1165c3ed..1bf048ef 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, implement, + Err, Result, err, implement, utils::stream::{ReadyExt, TryIgnore}, - Err, Result, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::StreamExt; use ruma::{ + OwnedRoomId, RoomId, UserId, api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - OwnedRoomId, RoomId, UserId, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, diff --git a/src/service/manager.rs b/src/service/manager.rs index ea33d285..3cdf5945 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,14 +1,14 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; -use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; -use futures::FutureExt; +use conduwuit::{Err, Error, Result, Server, debug, debug_warn, error, trace, utils::time, warn}; +use futures::{FutureExt, TryFutureExt}; use tokio::{ sync::{Mutex, MutexGuard}, task::{JoinHandle, JoinSet}, time::sleep, }; -use crate::{service, service::Service, Services}; +use crate::{Services, service, service::Service}; pub(crate) struct Manager { manager: Mutex>>>, @@ -183,9 +183,14 @@ async fn worker(service: Arc) -> WorkerResult { let service_ = Arc::clone(&service); let result = AssertUnwindSafe(service_.worker()) .catch_unwind() - .await .map_err(Error::from_panic); + let result = if service.unconstrained() { + tokio::task::unconstrained(result).await + } else { + result.await + }; + // flattens JoinError for panic into worker's Error (service, result.unwrap_or_else(Err)) } diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs new file mode 100644 index 00000000..91e00228 --- /dev/null +++ b/src/service/media/blurhash.rs @@ -0,0 +1,179 @@ +#[cfg(feature = "blurhashing")] +use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; +use conduwuit::{Result, implement}; + +use super::Service; + +#[implement(Service)] +#[cfg(not(feature = "blurhashing"))] +pub fn create_blurhash( + &self, + _file: &[u8], + _content_type: Option<&str>, + _file_name: Option<&str>, +) -> Result> { + conduwuit::debug_warn!("blurhashing on upload support was not compiled"); + + Ok(None) +} + +#[implement(Service)] +#[cfg(feature = "blurhashing")] +pub fn create_blurhash( + &self, + file: &[u8], + content_type: Option<&str>, + file_name: Option<&str>, +) -> Result> { + let config = BlurhashConfig::from(self.services.server.config.blurhashing); + + // since 0 means disabled blurhashing, skipped blurhashing + if config.size_limit == 0 { + return Ok(None); + } + + get_blurhash_from_request(file, content_type, file_name, config) + .map_err(|e| conduwuit::err!(debug_error!("blurhashing error: {e}"))) + .map(Some) +} + +/// Returns the blurhash or a blurhash error which implements Display. +#[tracing::instrument( + name = "blurhash", + level = "debug", + skip(data), + fields( + bytes = data.len(), + ), +)] +#[cfg(feature = "blurhashing")] +fn get_blurhash_from_request( + data: &[u8], + mime: Option<&str>, + filename: Option<&str>, + config: BlurhashConfig, +) -> Result { + // Get format image is supposed to be in + let format = get_format_from_data_mime_and_filename(data, mime, filename)?; + + // Get the image reader for said image format + let decoder = get_image_decoder_with_format_and_data(format, data)?; + + // Check image size makes sense before unpacking whole image + if is_image_above_size_limit(&decoder, config) { + return Err(BlurhashingError::ImageTooLarge); + } + + let image = image::DynamicImage::from_decoder(decoder)?; + + blurhash_an_image(&image, config) +} + +/// Gets the Image Format value from the data,mime, and filename +/// It first checks if the mime is a valid image format +/// Then it checks if the filename has a format, otherwise just guess based on +/// the binary data Assumes that mime and filename extension won't be for a +/// different file format than file. +#[cfg(feature = "blurhashing")] +fn get_format_from_data_mime_and_filename( + data: &[u8], + mime: Option<&str>, + filename: Option<&str>, +) -> Result { + let extension = filename + .map(std::path::Path::new) + .and_then(std::path::Path::extension) + .map(std::ffi::OsStr::to_string_lossy); + + mime.or(extension.as_deref()) + .and_then(image::ImageFormat::from_mime_type) + .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) +} + +#[cfg(feature = "blurhashing")] +fn get_image_decoder_with_format_and_data( + image_format: image::ImageFormat, + data: &[u8], +) -> Result, BlurhashingError> { + let mut image_reader = image::ImageReader::new(std::io::Cursor::new(data)); + image_reader.set_format(image_format); + Ok(Box::new(image_reader.into_decoder()?)) +} + +#[cfg(feature = "blurhashing")] +fn is_image_above_size_limit( + decoder: &T, + blurhash_config: BlurhashConfig, +) -> bool { + decoder.total_bytes() >= blurhash_config.size_limit +} + +#[cfg(feature = "blurhashing")] +#[tracing::instrument(name = "encode", level = "debug", skip_all)] +#[inline] +fn blurhash_an_image( + image: &image::DynamicImage, + blurhash_config: BlurhashConfig, +) -> Result { + Ok(blurhash::encode_image( + blurhash_config.components_x, + blurhash_config.components_y, + &image.to_rgba8(), + )?) +} + +#[derive(Clone, Copy, Debug)] +pub struct BlurhashConfig { + pub components_x: u32, + pub components_y: u32, + + /// size limit in bytes + pub size_limit: u64, +} + +#[cfg(feature = "blurhashing")] +impl From for BlurhashConfig { + fn from(value: CoreBlurhashConfig) -> Self { + Self { + components_x: value.components_x, + components_y: value.components_y, + size_limit: value.blurhash_max_raw_size, + } + } +} + +#[derive(Debug)] +#[cfg(feature = "blurhashing")] +pub enum BlurhashingError { + HashingLibError(Box), + #[cfg(feature = "blurhashing")] + ImageError(Box), + ImageTooLarge, +} + +#[cfg(feature = "blurhashing")] +impl From for BlurhashingError { + fn from(value: image::ImageError) -> Self { Self::ImageError(Box::new(value)) } +} + +#[cfg(feature = "blurhashing")] +impl From for BlurhashingError { + fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } +} + +#[cfg(feature = "blurhashing")] +impl std::fmt::Display for BlurhashingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Blurhash Error:")?; + match &self { + | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, + | Self::HashingLibError(e) => + write!(f, "There was an error with the blurhashing library => {e}")?, + #[cfg(feature = "blurhashing")] + | Self::ImageError(e) => + write!(f, "There was an error with the image loading library => {e}")?, + } + + Ok(()) + } +} diff --git a/src/service/media/data.rs b/src/service/media/data.rs index f48482ea..0ccd844f 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,13 +1,12 @@ use std::{sync::Arc, time::Duration}; use conduwuit::{ - debug, debug_info, err, - utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, - Err, Result, + Err, Result, debug, debug_info, err, + utils::{ReadyExt, str_from_bytes, stream::TryIgnore, string_from_bytes}, }; use database::{Database, Interfix, Map}; use futures::StreamExt; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use super::{preview::UrlPreviewData, thumbnail::Dim}; diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 9555edd7..5fd628cd 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -8,12 +8,12 @@ use std::{ }; use conduwuit::{ - debug, debug_info, debug_warn, error, info, - utils::{stream::TryIgnore, ReadyExt}, - warn, Config, Result, + Config, Result, debug, debug_info, debug_warn, error, info, + utils::{ReadyExt, stream::TryIgnore}, + warn, }; -use crate::{migrations, Services}; +use crate::Services; /// Migrates a media directory from legacy base64 file names to sha2 file names. /// All errors are fatal. Upon success the database is keyed to not perform this @@ -48,12 +48,6 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { } } - // Apply fix from when sha256_media was backward-incompat and bumped the schema - // version from 13 to 14. For users satisfying these conditions we can go back. - if services.globals.db.database_version().await == 14 && migrations::DATABASE_VERSION == 13 { - services.globals.db.bump_database_version(13)?; - } - db["global"].insert(b"feat_sha256_media", []); info!("Finished applying sha256_media"); Ok(()) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 0d98853d..d053ba54 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,20 +1,20 @@ +pub mod blurhash; mod data; pub(super) mod migrations; mod preview; mod remote; mod tests; mod thumbnail; - use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; -use base64::{engine::general_purpose, Engine as _}; +use base64::{Engine as _, engine::general_purpose}; use conduwuit::{ - debug, debug_error, debug_info, debug_warn, err, error, trace, + Err, Result, Server, debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, - warn, Err, Result, Server, + warn, }; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt, BufReader}, @@ -22,7 +22,7 @@ use tokio::{ use self::data::{Data, Metadata}; pub use self::thumbnail::Dim; -use crate::{client, globals, sending, Dep}; +use crate::{Dep, client, globals, moderation, sending}; #[derive(Debug)] pub struct FileMeta { @@ -42,6 +42,7 @@ struct Services { client: Dep, globals: Dep, sending: Dep, + moderation: Dep, } /// generated MXC ID (`media-id`) length @@ -64,6 +65,7 @@ impl crate::Service for Service { client: args.depend::("client"), globals: args.depend::("globals"), sending: args.depend::("sending"), + moderation: args.depend::("moderation"), }, })) } @@ -105,22 +107,27 @@ impl Service { /// Deletes a file in the database and from the media directory via an MXC pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> { - if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc).await { - for key in keys { - trace!(?mxc, "MXC Key: {key:?}"); - debug_info!(?mxc, "Deleting from filesystem"); + match self.db.search_mxc_metadata_prefix(mxc).await { + | Ok(keys) => { + for key in keys { + trace!(?mxc, "MXC Key: {key:?}"); + debug_info!(?mxc, "Deleting from filesystem"); - if let Err(e) = self.remove_media_file(&key).await { - debug_error!(?mxc, "Failed to remove media file: {e}"); + if let Err(e) = self.remove_media_file(&key).await { + debug_error!(?mxc, "Failed to remove media file: {e}"); + } + + debug_info!(?mxc, "Deleting from database"); + self.db.delete_file_mxc(mxc).await; } - debug_info!(?mxc, "Deleting from database"); - self.db.delete_file_mxc(mxc).await; - } - - Ok(()) - } else { - Err!(Database(error!("Failed to find any media keys for MXC {mxc} in our database."))) + Ok(()) + }, + | _ => { + Err!(Database(error!( + "Failed to find any media keys for MXC {mxc} in our database." + ))) + }, } } @@ -154,22 +161,21 @@ impl Service { /// Downloads a file. pub async fn get(&self, mxc: &Mxc<'_>) -> Result> { - if let Ok(Metadata { content_disposition, content_type, key }) = - self.db.search_file_metadata(mxc, &Dim::default()).await - { - let mut content = Vec::with_capacity(8192); - let path = self.get_media_file(&key); - BufReader::new(fs::File::open(path).await?) - .read_to_end(&mut content) - .await?; + match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(Metadata { content_disposition, content_type, key }) => { + let mut content = Vec::with_capacity(8192); + let path = self.get_media_file(&key); + BufReader::new(fs::File::open(path).await?) + .read_to_end(&mut content) + .await?; - Ok(Some(FileMeta { - content: Some(content), - content_type, - content_disposition, - })) - } else { - Ok(None) + Ok(Some(FileMeta { + content: Some(content), + content_type, + content_disposition, + })) + }, + | _ => Ok(None), } } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index e7f76bab..91660a58 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -7,7 +7,7 @@ use std::time::SystemTime; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug, err}; use conduwuit_core::implement; use ipaddress::IPAddress; use serde::Serialize; @@ -64,28 +64,33 @@ pub async fn get_url_preview(&self, url: &Url) -> Result { async fn request_url_preview(&self, url: &Url) -> Result { if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated")) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } let client = &self.services.client.url_preview; let response = client.head(url.as_str()).send().await?; + debug!(?url, "URL preview response headers: {:?}", response.headers()); + if let Some(remote_addr) = response.remote_addr() { + debug!(?url, "URL preview response remote address: {:?}", remote_addr); + if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } } - let Some(content_type) = response - .headers() - .get(reqwest::header::CONTENT_TYPE) - .and_then(|x| x.to_str().ok()) - else { - return Err!(Request(Unknown("Unknown Content-Type"))); + let Some(content_type) = response.headers().get(reqwest::header::CONTENT_TYPE) else { + return Err!(Request(Unknown("Unknown or invalid Content-Type header"))); }; + + let content_type = content_type + .to_str() + .map_err(|e| err!(Request(Unknown("Unknown or invalid Content-Type header: {e}"))))?; + let data = match content_type { | html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, | img if img.starts_with("image/") => self.download_image(url.as_str()).await?, @@ -251,7 +256,7 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { if allowlist_url_contains .iter() - .any(|url_s| url.to_string().contains(&url_s.to_string())) + .any(|url_s| url.to_string().contains(url_s)) { debug!("URL {} is allowed by url_preview_url_contains_allowlist (check 4/4)", &host); return true; diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index ca73c3ef..a1e874d8 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -1,21 +1,21 @@ use std::{fmt::Debug, time::Duration}; use conduwuit::{ - debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, - Result, + Err, Error, Result, debug_warn, err, implement, + utils::content_disposition::make_content_disposition, }; -use http::header::{HeaderValue, CONTENT_DISPOSITION, CONTENT_TYPE}; +use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE, HeaderValue}; use ruma::{ + Mxc, ServerName, UserId, api::{ + OutgoingRequest, client::{ error::ErrorKind::{NotFound, Unrecognized}, media, }, federation, federation::authenticated_media::{Content, FileOrLocation}, - OutgoingRequest, }, - Mxc, ServerName, UserId, }; use super::{Dim, FileMeta}; @@ -32,12 +32,12 @@ pub async fn fetch_remote_thumbnail( self.check_fetch_authorized(mxc)?; let result = self - .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) .await; } @@ -55,12 +55,12 @@ pub async fn fetch_remote_content( self.check_fetch_authorized(mxc)?; let result = self - .fetch_content_unauthenticated(mxc, user, server, timeout_ms) + .fetch_content_authenticated(mxc, user, server, timeout_ms) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_content_authenticated(mxc, user, server, timeout_ms) + .fetch_content_unauthenticated(mxc, user, server, timeout_ms) .await; } @@ -283,7 +283,7 @@ async fn location_request(&self, location: &str) -> Result { .map_err(Into::into) .map(|content| FileMeta { content: Some(content), - content_type: content_type.clone().map(Into::into), + content_type: content_type.clone(), content_disposition: Some(make_content_disposition( content_disposition.as_ref(), content_type.as_deref(), @@ -423,10 +423,8 @@ pub async fn fetch_remote_content_legacy( fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { if self .services - .server - .config - .prevent_media_downloads_from - .contains(mxc.server_name) + .moderation + .is_remote_server_media_downloads_forbidden(mxc.server_name) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. diff --git a/src/service/media/tests.rs b/src/service/media/tests.rs index 1d6dce30..651e0ade 100644 --- a/src/service/media/tests.rs +++ b/src/service/media/tests.rs @@ -5,7 +5,7 @@ async fn long_file_names_works() { use std::path::PathBuf; - use base64::{engine::general_purpose, Engine as _}; + use base64::{Engine as _, engine::general_purpose}; use super::*; diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 7350b3a1..e5a98774 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -7,14 +7,14 @@ use std::{cmp, num::Saturating as Sat}; -use conduwuit::{checked, err, implement, Result}; -use ruma::{http_headers::ContentDisposition, media::Method, Mxc, UInt, UserId}; +use conduwuit::{Result, checked, err, implement}; +use ruma::{Mxc, UInt, UserId, http_headers::ContentDisposition, media::Method}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt}, }; -use super::{data::Metadata, FileMeta}; +use super::{FileMeta, data::Metadata}; /// Dimension specification for a thumbnail. #[derive(Debug)] @@ -65,12 +65,12 @@ impl super::Service { // 0, 0 because that's the original file let dim = dim.normalized(); - if let Ok(metadata) = self.db.search_file_metadata(mxc, &dim).await { - self.get_thumbnail_saved(metadata).await - } else if let Ok(metadata) = self.db.search_file_metadata(mxc, &Dim::default()).await { - self.get_thumbnail_generate(mxc, &dim, metadata).await - } else { - Ok(None) + match self.db.search_file_metadata(mxc, &dim).await { + | Ok(metadata) => self.get_thumbnail_saved(metadata).await, + | _ => match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(metadata) => self.get_thumbnail_generate(mxc, &dim, metadata).await, + | _ => Ok(None), + }, } } } diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 27b4ab5a..512a7867 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -1,25 +1,25 @@ use std::cmp; use conduwuit::{ - debug, debug_info, debug_warn, error, info, + Err, Result, debug, debug_info, debug_warn, error, info, result::NotFound, utils::{ - stream::{TryExpect, TryIgnore}, IterStream, ReadyExt, + stream::{TryExpect, TryIgnore}, }, - warn, Err, Result, + warn, }; use futures::{FutureExt, StreamExt}; use itertools::Itertools; use ruma::{ + OwnedUserId, RoomId, UserId, events::{ - push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType, + GlobalAccountDataEventType, push_rules::PushRulesEvent, room::member::MembershipState, }, push::Ruleset, - OwnedUserId, RoomId, UserId, }; -use crate::{media, Services}; +use crate::{Services, media}; /// The current schema version. /// - If database is opened at greater version we reject with error. The @@ -27,15 +27,7 @@ use crate::{media, Services}; /// - If database is opened at lesser version we apply migrations up to this. /// Note that named-feature migrations may also be performed when opening at /// equal or lesser version. These are expected to be backward-compatible. -pub(crate) const DATABASE_VERSION: u64 = 13; - -/// Conduit's database version. -/// -/// Conduit bumped the database version to 16, but did not introduce any -/// breaking changes. Their database migrations are extremely fragile and risky, -/// and also do not really apply to us, so just to retain Conduit -> conduwuit -/// compatibility we'll check for both versions. -pub(crate) const CONDUIT_DATABASE_VERSION: u64 = 16; +pub(crate) const DATABASE_VERSION: u64 = 17; pub(crate) async fn migrations(services: &Services) -> Result<()> { let users_count = services.users.count().await; @@ -63,10 +55,7 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { async fn fresh(services: &Services) -> Result<()> { let db = &services.db; - services - .globals - .db - .bump_database_version(DATABASE_VERSION)?; + services.globals.db.bump_database_version(DATABASE_VERSION); db["global"].insert(b"feat_sha256_media", []); db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); @@ -130,6 +119,7 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_referencedevents_missing_sep") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_referencedevents_missing_sep(services).await?; } @@ -138,15 +128,19 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_readreceiptid_readreceipt_duplicates") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_readreceiptid_readreceipt_duplicates(services).await?; } - let version_match = services.globals.db.database_version().await == DATABASE_VERSION - || services.globals.db.database_version().await == CONDUIT_DATABASE_VERSION; + if services.globals.db.database_version().await < 17 { + services.globals.db.bump_database_version(17); + info!("Migration: Bumped database version to 17"); + } - assert!( - version_match, + assert_eq!( + services.globals.db.database_version().await, + DATABASE_VERSION, "Failed asserting local database version {} is equal to known latest conduwuit database \ version {}", services.globals.db.database_version().await, @@ -290,7 +284,7 @@ async fn db_lt_12(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(12)?; + services.globals.db.bump_database_version(12); info!("Migration: 11 -> 12 finished"); Ok(()) } @@ -335,7 +329,7 @@ async fn db_lt_13(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(13)?; + services.globals.db.bump_database_version(13); info!("Migration: 12 -> 13 finished"); Ok(()) } @@ -513,8 +507,10 @@ async fn fix_referencedevents_missing_sep(services: &Services) -> Result { } async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result { + use conduwuit::arrayvec::ArrayString; use ruma::identifiers_validation::MAX_BYTES; - type ArrayId = arrayvec::ArrayString; + + type ArrayId = ArrayString; type Key<'a> = (&'a RoomId, u64, &'a UserId); warn!("Fixing undeleted entries in readreceiptid_readreceipt..."); diff --git a/src/service/mod.rs b/src/service/mod.rs index 71bd0eb4..eb15e5ec 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "8192"] #![allow(refining_impl_trait)] mod manager; @@ -7,6 +8,7 @@ pub mod services; pub mod account_data; pub mod admin; +pub mod announcements; pub mod appservice; pub mod client; pub mod config; @@ -15,6 +17,7 @@ pub mod federation; pub mod globals; pub mod key_backups; pub mod media; +pub mod moderation; pub mod presence; pub mod pusher; pub mod resolver; @@ -24,13 +27,11 @@ pub mod server_keys; pub mod sync; pub mod transaction_ids; pub mod uiaa; -pub mod updates; pub mod users; extern crate conduwuit_core as conduwuit; extern crate conduwuit_database as database; -pub use conduwuit::{pdu, PduBuilder, PduCount, PduEvent}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; diff --git a/src/service/moderation.rs b/src/service/moderation.rs new file mode 100644 index 00000000..c3e55a1d --- /dev/null +++ b/src/service/moderation.rs @@ -0,0 +1,93 @@ +use std::sync::Arc; + +use conduwuit::{Result, implement}; +use ruma::ServerName; + +use crate::{Dep, config}; + +pub struct Service { + services: Services, +} + +struct Services { + // pub server: Arc, + pub config: Dep, +} + +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { + // server: args.server.clone(), + config: args.depend::("config"), + }, + })) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_ignored(&self, server_name: &ServerName) -> bool { + // We must never block federating with ourselves + if server_name == self.services.config.server_name { + return false; + } + + self.services + .config + .ignore_messages_from_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { + // We must never block federating with ourselves + if server_name == self.services.config.server_name { + return false; + } + + // Check if server is explicitly allowed + if self + .services + .config + .allowed_remote_server_names + .is_match(server_name.host()) + { + return false; + } + + // Check if server is explicitly forbidden + self.services + .config + .forbidden_remote_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_room_directory_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .config + .forbidden_remote_room_directory_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_media_downloads_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .config + .prevent_media_downloads_from + .is_match(server_name.host()) +} diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 4ec0a7ee..d7ef5175 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -1,16 +1,15 @@ use std::sync::Arc; use conduwuit::{ - debug_warn, utils, - utils::{stream::TryIgnore, ReadyExt}, - Result, + Result, debug_warn, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::Stream; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, UInt, UserId}; +use ruma::{UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use super::Presence; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub(crate) struct Data { presenceid_presence: Arc, diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index eb4105e5..8f646be6 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -5,16 +5,16 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; use conduwuit::{ - checked, debug, debug_warn, error, result::LogErr, trace, Error, Result, Server, + Error, Result, Server, checked, debug, debug_warn, error, result::LogErr, trace, }; use database::Database; -use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; +use futures::{Stream, StreamExt, TryFutureExt, stream::FuturesUnordered}; use loole::{Receiver, Sender}; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; +use ruma::{OwnedUserId, UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use tokio::time::sleep; use self::{data::Data, presence::Presence}; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub struct Service { timer_channel: (Sender, Receiver), diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index b322dfb4..3357bd61 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -1,8 +1,8 @@ -use conduwuit::{utils, Error, Result}; +use conduwuit::{Error, Result, utils}; use ruma::{ + UInt, UserId, events::presence::{PresenceEvent, PresenceEventContent}, presence::PresenceState, - UInt, UserId, }; use serde::{Deserialize, Serialize}; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 43d60c08..27490fb8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -2,34 +2,35 @@ use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; use conduwuit::{ - debug_warn, err, trace, + Err, PduEvent, Result, debug_warn, err, trace, utils::{stream::TryIgnore, string_from_bytes}, - warn, Err, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ + DeviceId, OwnedDeviceId, RoomId, UInt, UserId, api::{ - client::push::{set_pusher, Pusher, PusherKind}, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::push::{Pusher, PusherKind, set_pusher}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ - room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, StateEventType, - TimelineEventType, + AnySyncTimelineEvent, StateEventType, TimelineEventType, + room::power_levels::RoomPowerLevelsEventContent, }, push::{ Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak, }, serde::Raw, - uint, RoomId, UInt, UserId, + uint, }; -use crate::{client, globals, rooms, sending, users, Dep}; +use crate::{Dep, client, globals, rooms, sending, users}; pub struct Service { db: Data, @@ -47,6 +48,7 @@ struct Services { struct Data { senderkey_pusher: Arc, + pushkey_deviceid: Arc, } impl crate::Service for Service { @@ -54,6 +56,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { db: Data { senderkey_pusher: args.db["senderkey_pusher"].clone(), + pushkey_deviceid: args.db["pushkey_deviceid"].clone(), }, services: Services { globals: args.depend::("globals"), @@ -74,6 +77,7 @@ impl Service { pub async fn set_pusher( &self, sender: &UserId, + sender_device: &DeviceId, pusher: &set_pusher::v3::PusherAction, ) -> Result { match pusher { @@ -122,24 +126,35 @@ impl Service { } } - let key = (sender, data.pusher.ids.pushkey.as_str()); + let pushkey = data.pusher.ids.pushkey.as_str(); + let key = (sender, pushkey); self.db.senderkey_pusher.put(key, Json(pusher)); + self.db.pushkey_deviceid.insert(pushkey, sender_device); }, | set_pusher::v3::PusherAction::Delete(ids) => { - let key = (sender, ids.pushkey.as_str()); - self.db.senderkey_pusher.del(key); - - self.services - .sending - .cleanup_events(None, Some(sender), Some(ids.pushkey.as_str())) - .await - .ok(); + self.delete_pusher(sender, ids.pushkey.as_str()).await; }, } Ok(()) } + pub async fn delete_pusher(&self, sender: &UserId, pushkey: &str) { + let key = (sender, pushkey); + self.db.senderkey_pusher.del(key); + self.db.pushkey_deviceid.remove(pushkey); + + self.services + .sending + .cleanup_events(None, Some(sender), Some(pushkey)) + .await + .ok(); + } + + pub async fn get_pusher_device(&self, pushkey: &str) -> Result { + self.db.pushkey_deviceid.get(pushkey).await.deserialized() + } + pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result { let senderkey = (sender, pushkey); self.db diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 66854764..0151c4d7 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,15 +3,15 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; +use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; use futures::{FutureExt, TryFutureExt}; -use hickory_resolver::error::ResolveError; +use hickory_resolver::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; use super::{ cache::{CachedDest, CachedOverride, MAX_IPS}, - fed::{add_port_to_hostname, get_ip_with_port, FedDest, PortString}, + fed::{FedDest, PortString, add_port_to_hostname, get_ip_with_port}, }; #[derive(Clone, Debug)] @@ -71,12 +71,19 @@ impl super::Service { | None => if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? - } else if let Some(delegated) = self.request_well_known(dest.as_str()).await? { - self.actual_dest_3(&mut host, cache, delegated).await? - } else if let Some(overrider) = self.query_srv_record(dest.as_str()).await? { - self.actual_dest_4(&host, cache, overrider).await? } else { - self.actual_dest_5(dest, cache).await? + self.conditional_query_and_cache(dest.as_str(), 8448, true) + .await?; + self.services.server.check_running()?; + match self.request_well_known(dest.as_str()).await? { + | Some(delegated) => + self.actual_dest_3(&mut host, cache, delegated).await?, + | _ => match self.query_srv_record(dest.as_str()).await? { + | Some(overrider) => + self.actual_dest_4(&host, cache, overrider).await?, + | _ => self.actual_dest_5(dest, cache).await?, + }, + } }, }; @@ -136,10 +143,10 @@ impl super::Service { self.actual_dest_3_2(cache, delegated, pos).await } else { trace!("Delegated hostname has no port in this branch"); - if let Some(overrider) = self.query_srv_record(&delegated).await? { - self.actual_dest_3_3(cache, delegated, overrider).await - } else { - self.actual_dest_3_4(cache, delegated).await + match self.query_srv_record(&delegated).await? { + | Some(overrider) => + self.actual_dest_3_3(cache, delegated, overrider).await, + | _ => self.actual_dest_3_4(cache, delegated).await, } }, } @@ -239,56 +246,6 @@ impl super::Service { Ok(add_port_to_hostname(dest.as_str())) } - #[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] - async fn request_well_known(&self, dest: &str) -> Result> { - self.conditional_query_and_cache(dest, 8448, true).await?; - - self.services.server.check_running()?; - trace!("Requesting well known for {dest}"); - let response = self - .services - .client - .well_known - .get(format!("https://{dest}/.well-known/matrix/server")) - .send() - .await; - - trace!("response: {response:?}"); - if let Err(e) = &response { - debug!("error: {e:?}"); - return Ok(None); - } - - let response = response?; - if !response.status().is_success() { - debug!("response not 2XX"); - return Ok(None); - } - - let text = response.text().await?; - trace!("response text: {text:?}"); - if text.len() >= 12288 { - debug_warn!("response contains junk"); - return Ok(None); - } - - let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); - - let m_server = body - .get("m.server") - .unwrap_or(&serde_json::Value::Null) - .as_str() - .unwrap_or_default(); - - if ruma::identifiers_validation::server_name::validate(m_server).is_err() { - debug_error!("response content missing or invalid"); - return Ok(None); - } - - debug_info!("{dest:?} found at {m_server:?}"); - Ok(Some(m_server.to_owned())) - } - #[inline] async fn conditional_query_and_cache( &self, @@ -359,7 +316,7 @@ impl super::Service { let hostname = hostname.trim_end_matches('.'); match self.resolver.resolver.srv_lookup(hostname).await { | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => + | Ok(result) => { return Ok(result.iter().next().map(|result| { FedDest::Named( result.target().to_string().trim_end_matches('.').to_owned(), @@ -368,7 +325,8 @@ impl super::Service { .try_into() .unwrap_or_else(|_| FedDest::default_port()), ) - })), + })); + }, } } @@ -376,25 +334,28 @@ impl super::Service { } fn handle_resolve_error(e: &ResolveError, host: &'_ str) -> Result<()> { - use hickory_resolver::error::ResolveErrorKind; + use hickory_resolver::{ResolveErrorKind::Proto, proto::ProtoErrorKind}; - match *e.kind() { - | ResolveErrorKind::NoRecordsFound { .. } => { - // Raise to debug_warn if we can find out the result wasn't from cache - debug!(%host, "No DNS records found: {e}"); - Ok(()) - }, - | ResolveErrorKind::Timeout => { - Err!(warn!(%host, "DNS {e}")) - }, - | ResolveErrorKind::NoConnections => { - error!( - "Your DNS server is overloaded and has ran out of connections. It is \ - strongly recommended you remediate this issue to ensure proper federation \ - connectivity." - ); + match e.kind() { + | Proto(e) => match e.kind() { + | ProtoErrorKind::NoRecordsFound { .. } => { + // Raise to debug_warn if we can find out the result wasn't from cache + debug!(%host, "No DNS records found: {e}"); + Ok(()) + }, + | ProtoErrorKind::Timeout => { + Err!(warn!(%host, "DNS {e}")) + }, + | ProtoErrorKind::NoConnections => { + error!( + "Your DNS server is overloaded and has ran out of connections. It is \ + strongly recommended you remediate this issue to ensure proper \ + federation connectivity." + ); - Err!(error!(%host, "DNS error: {e}")) + Err!(error!(%host, "DNS error: {e}")) + }, + | _ => Err!(error!(%host, "DNS error: {e}")), }, | _ => Err!(error!(%host, "DNS error: {e}")), } diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 22a92865..cfea7187 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,13 +1,13 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; -use arrayvec::ArrayVec; use conduwuit::{ + Result, + arrayvec::ArrayVec, at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, - Result, }; use database::{Cbor, Deserialized, Map}; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, future::join}; use ruma::ServerName; use serde::{Deserialize, Serialize}; @@ -45,6 +45,21 @@ impl Cache { } } +#[implement(Cache)] +pub async fn clear(&self) { join(self.clear_destinations(), self.clear_overrides()).await; } + +#[implement(Cache)] +pub async fn clear_destinations(&self) { self.destinations.clear().await; } + +#[implement(Cache)] +pub async fn clear_overrides(&self) { self.overrides.clear().await; } + +#[implement(Cache)] +pub fn del_destination(&self, name: &ServerName) { self.destinations.remove(name); } + +#[implement(Cache)] +pub fn del_override(&self, name: &ServerName) { self.overrides.remove(name); } + #[implement(Cache)] pub fn set_destination(&self, name: &ServerName, dest: &CachedDest) { self.destinations.raw_put(name, Cbor(dest)); @@ -96,7 +111,7 @@ pub fn destinations(&self) -> impl Stream + Se self.destinations .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } #[implement(Cache)] @@ -104,7 +119,7 @@ pub fn overrides(&self) -> impl Stream + S self.overrides .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } impl CachedDest { diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index ca6106e2..3a0b2551 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -1,20 +1,20 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use futures::FutureExt; -use hickory_resolver::{lookup_ip::LookupIp, TokioAsyncResolver}; +use hickory_resolver::{TokioResolver, lookup_ip::LookupIp}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; pub struct Resolver { - pub(crate) resolver: Arc, + pub(crate) resolver: Arc, pub(crate) hooked: Arc, server: Arc, } pub(crate) struct Hooked { - resolver: Arc, + resolver: Arc, cache: Arc, server: Arc, } @@ -42,7 +42,7 @@ impl Resolver { let mut ns = sys_conf.clone(); if config.query_over_tcp_only { - ns.protocol = hickory_resolver::config::Protocol::Tcp; + ns.protocol = hickory_resolver::proto::xfer::Protocol::Tcp; } ns.trust_negative_responses = !config.query_all_nameservers; @@ -51,6 +51,7 @@ impl Resolver { } opts.cache_size = config.dns_cache_entries as usize; + opts.preserve_intermediates = true; opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain)); opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30)); opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl)); @@ -60,8 +61,7 @@ impl Resolver { opts.try_tcp_on_error = config.dns_tcp_fallback; opts.num_concurrent_reqs = 1; opts.edns0 = true; - opts.shuffle_dns_servers = true; - opts.rotate = true; + opts.case_randomization = true; opts.ip_strategy = match config.ip_lookup_strategy { | 1 => hickory_resolver::config::LookupIpStrategy::Ipv4Only, | 2 => hickory_resolver::config::LookupIpStrategy::Ipv6Only, @@ -69,15 +69,23 @@ impl Resolver { | 4 => hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4, | _ => hickory_resolver::config::LookupIpStrategy::Ipv4thenIpv6, }; - opts.authentic_data = false; - let resolver = Arc::new(TokioAsyncResolver::tokio(conf, opts)); + let rt_prov = hickory_resolver::proto::runtime::TokioRuntimeProvider::new(); + let conn_prov = hickory_resolver::name_server::TokioConnectionProvider::new(rt_prov); + let mut builder = TokioResolver::builder_with_config(conf, conn_prov); + *builder.options_mut() = opts; + let resolver = Arc::new(builder.build()); + Ok(Arc::new(Self { resolver: resolver.clone(), hooked: Arc::new(Hooked { resolver, cache, server: server.clone() }), server: server.clone(), })) } + + /// Clear the in-memory hickory-dns caches + #[inline] + pub fn clear_cache(&self) { self.resolver.clear_cache(); } } impl Resolve for Resolver { @@ -101,7 +109,7 @@ impl Resolve for Hooked { async fn hooked_resolve( cache: Arc, server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> Result> { match cache.get_override(name.as_str()).await { @@ -125,7 +133,7 @@ async fn hooked_resolve( async fn resolve_to_reqwest( server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> ResolvingResult { use std::{io, io::ErrorKind::Interrupted}; diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index bfe100e7..e5bee9ac 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -4,8 +4,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use arrayvec::ArrayString; -use conduwuit::utils::math::Expected; +use conduwuit::{arrayvec::ArrayString, utils::math::Expected}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 090e562d..c513cec9 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -2,15 +2,17 @@ pub mod actual; pub mod cache; mod dns; pub mod fed; +#[cfg(test)] mod tests; +mod well_known; use std::sync::Arc; -use arrayvec::ArrayString; -use conduwuit::{utils::MutexMap, Result, Server}; +use async_trait::async_trait; +use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; use self::{cache::Cache, dns::Resolver}; -use crate::{client, Dep}; +use crate::{Dep, client}; pub struct Service { pub cache: Arc, @@ -27,6 +29,7 @@ struct Services { type Resolving = MutexMap; type NameBuf = ArrayString<256>; +#[async_trait] impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { @@ -42,5 +45,10 @@ impl crate::Service for Service { })) } + async fn clear_cache(&self) { + self.resolver.clear_cache(); + self.cache.clear().await; + } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 870f5eab..068e08bd 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -1,6 +1,4 @@ -#![cfg(test)] - -use super::fed::{add_port_to_hostname, get_ip_with_port, FedDest}; +use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] fn ips_get_default_ports() { diff --git a/src/service/resolver/well_known.rs b/src/service/resolver/well_known.rs new file mode 100644 index 00000000..68a8e620 --- /dev/null +++ b/src/service/resolver/well_known.rs @@ -0,0 +1,49 @@ +use conduwuit::{Result, debug, debug_error, debug_info, debug_warn, implement, trace}; + +#[implement(super::Service)] +#[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] +pub(super) async fn request_well_known(&self, dest: &str) -> Result> { + trace!("Requesting well known for {dest}"); + let response = self + .services + .client + .well_known + .get(format!("https://{dest}/.well-known/matrix/server")) + .send() + .await; + + trace!("response: {response:?}"); + if let Err(e) = &response { + debug!("error: {e:?}"); + return Ok(None); + } + + let response = response?; + if !response.status().is_success() { + debug!("response not 2XX"); + return Ok(None); + } + + let text = response.text().await?; + trace!("response text: {text:?}"); + if text.len() >= 12288 { + debug_warn!("response contains junk"); + return Ok(None); + } + + let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); + + let m_server = body + .get("m.server") + .unwrap_or(&serde_json::Value::Null) + .as_str() + .unwrap_or_default(); + + if ruma::identifiers_validation::server_name::validate(m_server).is_err() { + debug_error!("response content missing or invalid"); + return Ok(None); + } + + debug_info!("{dest:?} found at {m_server:?}"); + Ok(Some(m_server.to_owned())) +} diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 17ed5e13..866e45a9 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,21 +3,20 @@ mod remote; use std::sync::Arc; use conduwuit::{ - err, - utils::{stream::TryIgnore, ReadyExt}, - Err, Result, Server, + Err, Result, Server, err, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ - events::{ - room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - StateEventType, - }, OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, UserId, + events::{ + StateEventType, + room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, }; -use crate::{admin, appservice, appservice::RegistrationInfo, globals, rooms, sending, Dep}; +use crate::{Dep, admin, appservice, appservice::RegistrationInfo, globals, rooms, sending}; pub struct Service { db: Data, diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs index 7744bee2..60aed76d 100644 --- a/src/service/rooms/alias/remote.rs +++ b/src/service/rooms/alias/remote.rs @@ -1,8 +1,8 @@ use std::iter::once; -use conduwuit::{debug, debug_error, err, implement, Result}; +use conduwuit::{Result, debug, debug_error, err, implement}; use federation::query::get_room_information::v1::Response; -use ruma::{api::federation, OwnedRoomId, OwnedServerName, RoomAliasId, ServerName}; +use ruma::{OwnedRoomId, OwnedServerName, RoomAliasId, ServerName, api::federation}; #[implement(super::Service)] pub(super) async fn remote_resolve( diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index af8ae364..8c3588cc 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{err, utils, utils::math::usize_from_f64, Err, Result}; +use conduwuit::{Err, Result, err, utils, utils::math::usize_from_f64}; use database::Map; use lru_cache::LruCache; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index df2663b2..0903ea75 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -4,21 +4,22 @@ use std::{ collections::{BTreeSet, HashSet, VecDeque}, fmt::Debug, sync::Arc, + time::Instant, }; use conduwuit::{ - at, debug, debug_error, implement, trace, + Err, Result, at, debug, debug_error, implement, trace, utils::{ - stream::{ReadyExt, TryBroadbandExt}, IterStream, + stream::{ReadyExt, TryBroadbandExt}, }, - validated, warn, Err, Result, + validated, warn, }; -use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; -use crate::{rooms, rooms::short::ShortEventId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortEventId}; pub struct Service { services: Services, @@ -30,6 +31,8 @@ struct Services { timeline: Dep, } +type Bucket<'a> = BTreeSet<(u64, &'a EventId)>; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -45,42 +48,22 @@ impl crate::Service for Service { } #[implement(Service)] -pub async fn event_ids_iter<'a, I>( +pub fn event_ids_iter<'a, I>( &'a self, - room_id: &RoomId, + room_id: &'a RoomId, starting_events: I, -) -> Result + Send + '_> +) -> impl Stream> + Send + 'a where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { - let stream = self - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .stream(); - - Ok(stream) -} - -#[implement(Service)] -pub async fn get_event_ids<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, -) -> Result> -where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, -{ - let chain = self.get_auth_chain(room_id, starting_events).await?; - let event_ids = self - .services - .short - .multi_get_eventid_from_short(chain.into_iter().stream()) - .ready_filter_map(Result::ok) - .collect() - .await; - - Ok(event_ids) + self.get_auth_chain(room_id, starting_events) + .map_ok(|chain| { + self.services + .short + .multi_get_eventid_from_short(chain.into_iter().stream()) + .ready_filter(Result::is_ok) + }) + .try_flatten_stream() } #[implement(Service)] @@ -94,9 +77,9 @@ where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? - const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); + const BUCKET: Bucket<'_> = BTreeSet::new(); - let started = std::time::Instant::now(); + let started = Instant::now(); let mut starting_ids = self .services .short @@ -120,53 +103,7 @@ where let full_auth_chain: Vec = buckets .into_iter() .try_stream() - .broad_and_then(|chunk| async move { - let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - - if chunk_key.is_empty() { - return Ok(Vec::new()); - } - - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - return Ok(cached.to_vec()); - } - - let chunk_cache: Vec<_> = chunk - .into_iter() - .try_stream() - .broad_and_then(|(shortid, event_id)| async move { - if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { - return Ok(cached.to_vec()); - } - - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); - debug!( - ?event_id, - elapsed = ?started.elapsed(), - "Cache missed event" - ); - - Ok(auth_chain) - }) - .try_collect() - .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) - .map_ok(|mut chunk_cache: Vec<_>| { - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - chunk_cache - }) - .await?; - - self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); - debug!( - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed chunk", - ); - - Ok(chunk_cache) - }) + .broad_and_then(|chunk| self.get_auth_chain_outer(room_id, started, chunk)) .try_collect() .map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect()) .map_ok(|mut full_auth_chain: Vec<_>| { @@ -174,6 +111,7 @@ where full_auth_chain.dedup(); full_auth_chain }) + .boxed() .await?; debug!( @@ -185,6 +123,60 @@ where Ok(full_auth_chain) } +#[implement(Service)] +async fn get_auth_chain_outer( + &self, + room_id: &RoomId, + started: Instant, + chunk: Bucket<'_>, +) -> Result> { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); + + if chunk_key.is_empty() { + return Ok(Vec::new()); + } + + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } + + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) + .map_ok(|mut chunk_cache: Vec<_>| { + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + chunk_cache + }) + .await?; + + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) +} + #[implement(Service)] #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] async fn get_auth_chain_inner( diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 039efca7..4ea10641 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,9 +1,9 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::Stream; -use ruma::{api::client::room::Visibility, RoomId}; +use ruma::{RoomId, api::client::room::Visibility}; pub struct Service { db: Data, diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index 714b6fc1..f847015b 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -1,7 +1,7 @@ -use conduwuit::{debug, implement, trace, warn, Err, Result}; +use conduwuit::{Err, Result, debug, implement, trace, warn}; use ruma::{ - events::{room::server_acl::RoomServerAclEventContent, StateEventType}, RoomId, ServerName, + events::{StateEventType, room::server_acl::RoomServerAclEventContent}, }; /// Returns Ok if the acl allows the server @@ -14,14 +14,21 @@ pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Res .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") .await .map(|c: RoomServerAclEventContent| c) - .inspect(|acl| trace!("ACL content found: {acl:?}")) - .inspect_err(|e| trace!("No ACL content found: {e:?}")) + .inspect(|acl| trace!(%room_id, "ACL content found: {acl:?}")) + .inspect_err(|e| trace!(%room_id, "No ACL content found: {e:?}")) else { return Ok(()); }; if acl_event_content.allow.is_empty() { - warn!("Ignoring broken ACL event (allow key is empty)"); + warn!(%room_id, "Ignoring broken ACL event (allow key is empty)"); + return Ok(()); + } + + if acl_event_content.deny.contains(&String::from("*")) + && acl_event_content.allow.contains(&String::from("*")) + { + warn!(%room_id, "Ignoring broken ACL event (allow key and deny key both contain wildcard \"*\""); return Ok(()); } diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 540ebb64..b0a7d827 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,16 +1,14 @@ use std::{ - collections::{hash_map, BTreeMap, HashSet, VecDeque}, - sync::Arc, + collections::{BTreeMap, HashSet, VecDeque, hash_map}, time::Instant, }; use conduwuit::{ - debug, debug_error, debug_warn, implement, pdu, trace, - utils::continue_exponential_backoff_secs, warn, PduEvent, + PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, + utils::continue_exponential_backoff_secs, warn, }; -use futures::TryFutureExt; use ruma::{ - api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, ServerName, + CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, }; use super::get_room_version_id; @@ -31,7 +29,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( events: &'a [OwnedEventId], create_event: &'a PduEvent, room_id: &'a RoomId, -) -> Vec<(Arc, Option>)> { +) -> Vec<(PduEvent, Option>)> { let back_off = |id| match self .services .globals @@ -53,7 +51,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(local_pdu) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await { + if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { trace!("Found {id} in db"); events_with_auth_events.push((id, Some(local_pdu), vec![])); continue; @@ -138,12 +136,15 @@ pub(super) async fn fetch_and_handle_outliers<'a>( .and_then(CanonicalJsonValue::as_array) { for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value::(auth_event.clone().into()) - { - todo_auth_events.push_back(auth_event); - } else { - warn!("Auth event id is not valid"); + match serde_json::from_value::( + auth_event.clone().into(), + ) { + | Ok(auth_event) => { + todo_auth_events.push_back(auth_event); + }, + | _ => { + warn!("Auth event id is not valid"); + }, } } } else { diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index aea70739..0f92d6e6 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -1,14 +1,13 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet, VecDeque}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use conduwuit::{debug_warn, err, implement, PduEvent, Result}; -use futures::{future, FutureExt}; -use ruma::{ - int, +use conduwuit::{ + PduEvent, Result, debug_warn, err, implement, state_res::{self}, - uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, +}; +use futures::{FutureExt, future}; +use ruma::{ + CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, int, + uint, }; use super::check_room_id; @@ -29,7 +28,7 @@ pub(super) async fn fetch_prev( initial_set: Vec, ) -> Result<( Vec, - HashMap, BTreeMap)>, + HashMap)>, )> { let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); let mut eventid_info = HashMap::new(); @@ -40,54 +39,59 @@ pub(super) async fn fetch_prev( while let Some(prev_event_id) = todo_outlier_stack.pop_front() { self.services.server.check_running()?; - if let Some((pdu, mut json_opt)) = self + match self .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) .boxed() .await .pop() { - check_room_id(room_id, &pdu)?; + | Some((pdu, mut json_opt)) => { + check_room_id(room_id, &pdu)?; - let limit = self.services.server.config.max_fetch_prev_events; - if amount > limit { - debug_warn!("Max prev event limit reached! Limit: {limit}"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if json_opt.is_none() { - json_opt = self - .services - .outlier - .get_outlier_pdu_json(&prev_event_id) - .await - .ok(); - } - - if let Some(json) = json_opt { - if pdu.origin_server_ts > first_ts_in_room { - amount = amount.saturating_add(1); - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push_back(prev_prev.clone()); - } - } - - graph - .insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); - } else { - // Time based check failed + let limit = self.services.server.config.max_fetch_prev_events; + if amount > limit { + debug_warn!("Max prev event limit reached! Limit: {limit}"); graph.insert(prev_event_id.clone(), HashSet::new()); + continue; } - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation + if json_opt.is_none() { + json_opt = self + .services + .outlier + .get_outlier_pdu_json(&prev_event_id) + .await + .ok(); + } + + if let Some(json) = json_opt { + if pdu.origin_server_ts > first_ts_in_room { + amount = amount.saturating_add(1); + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push_back(prev_prev.clone()); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + }, + | _ => { + // Fetch and handle failed graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); + }, } } diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 4f2580db..0f9e093b 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -1,10 +1,10 @@ -use std::collections::{hash_map, HashMap}; +use std::collections::{HashMap, hash_map}; -use conduwuit::{debug, debug_warn, implement, Err, Error, PduEvent, Result}; +use conduwuit::{Err, Error, PduEvent, Result, debug, debug_warn, implement}; use futures::FutureExt; use ruma::{ - api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, - RoomId, ServerName, + EventId, OwnedEventId, RoomId, ServerName, api::federation::event::get_room_state_ids, + events::StateEventType, }; use crate::rooms::short::ShortStateKey; @@ -58,10 +58,11 @@ pub(super) async fn fetch_state( | hash_map::Entry::Vacant(v) => { v.insert(pdu.event_id.clone()); }, - | hash_map::Entry::Occupied(_) => + | hash_map::Entry::Occupied(_) => { return Err!(Database( "State event's type and state_key combination exists multiple times.", - )), + )); + }, } } diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 31c7762d..77cae41d 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -1,14 +1,17 @@ use std::{ - collections::{hash_map, BTreeMap}, + collections::{BTreeMap, hash_map}, time::Instant, }; -use conduwuit::{debug, debug::INFO_SPAN_LEVEL, err, implement, warn, Err, Result}; -use futures::{ - future::{try_join5, OptionFuture}, - FutureExt, +use conduwuit::{ + Err, Result, debug, debug::INFO_SPAN_LEVEL, defer, err, implement, utils::stream::IterStream, + warn, }; -use ruma::{events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId}; +use futures::{ + FutureExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, try_join5}, +}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; use crate::rooms::timeline::RawPduId; @@ -86,7 +89,7 @@ pub async fn handle_incoming_pdu<'a>( .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, ""); - let (meta_exists, is_disabled, (), (), create_event) = try_join5( + let (meta_exists, is_disabled, (), (), ref create_event) = try_join5( meta_exists, is_disabled, origin_acl_check, @@ -104,7 +107,7 @@ pub async fn handle_incoming_pdu<'a>( } let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) + .handle_outlier_pdu(origin, create_event, event_id, room_id, value, false) .await?; // 8. if not timeline event: stop @@ -129,66 +132,71 @@ pub async fn handle_incoming_pdu<'a>( let (sorted_prev_events, mut eventid_info) = self .fetch_prev( origin, - &create_event, + create_event, room_id, first_ts_in_room, incoming_pdu.prev_events.clone(), ) .await?; - debug!(events = ?sorted_prev_events, "Got previous events"); - for prev_id in sorted_prev_events { - self.services.server.check_running()?; - if let Err(e) = self - .handle_prev_pdu( + debug!( + events = ?sorted_prev_events, + "Handling previous events" + ); + + sorted_prev_events + .iter() + .try_stream() + .map_ok(AsRef::as_ref) + .try_for_each(|prev_id| { + self.handle_prev_pdu( origin, event_id, room_id, - &mut eventid_info, - &create_event, + eventid_info.remove(prev_id), + create_event, first_ts_in_room, - &prev_id, + prev_id, ) - .await - { - use hash_map::Entry; - - let now = Instant::now(); - warn!("Prev event {prev_id} failed: {e}"); - - match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(prev_id) - { - | Entry::Vacant(e) => { - e.insert((now, 1)); - }, - | Entry::Occupied(mut e) => { - *e.get_mut() = (now, e.get().1.saturating_add(1)); - }, - }; - } - } + .inspect_err(move |e| { + warn!("Prev {prev_id} failed: {e}"); + match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(prev_id.into()) + { + | hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + | hash_map::Entry::Occupied(mut e) => { + let tries = e.get().1.saturating_add(1); + *e.get_mut() = (Instant::now(), tries); + }, + } + }) + .map(|_| self.services.server.check_running()) + }) + .boxed() + .await?; // Done with prev events, now handling the incoming event let start_time = Instant::now(); self.federation_handletime .write() .expect("locked") - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + .insert(room_id.into(), (event_id.to_owned(), start_time)); - let r = self - .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id) - .await; + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - r + self.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, create_event, origin, room_id) + .boxed() + .await } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index a35aabe0..5339249d 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,15 +1,12 @@ -use std::{ - collections::{hash_map, BTreeMap, HashMap}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, hash_map}; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; -use futures::{future::ready, TryFutureExt}; +use conduwuit::{ + Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, +}; +use futures::future::ready; use ruma::{ - api::client::error::ErrorKind, - events::StateEventType, - state_res::{self, EventTypeExt}, CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, + api::client::error::ErrorKind, events::StateEventType, }; use super::{check_room_id, get_room_version_id, to_room_version}; @@ -24,7 +21,7 @@ pub(super) async fn handle_outlier_pdu<'a>( room_id: &'a RoomId, mut value: CanonicalJsonObject, auth_events_known: bool, -) -> Result<(Arc, BTreeMap)> { +) -> Result<(PduEvent, BTreeMap)> { // 1. Remove unsigned field value.remove("unsigned"); @@ -56,10 +53,11 @@ pub(super) async fn handle_outlier_pdu<'a>( obj }, - | Err(e) => + | Err(e) => { return Err!(Request(InvalidParam(debug_error!( "Signature verification failed for {event_id}: {e}" - )))), + )))); + }, }; // Now that we have checked the signature and hashes we can add the eventID and @@ -94,7 +92,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); for id in &incoming_pdu.auth_events { - let Ok(auth_event) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await else { + let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { warn!("Could not find auth event {id}"); continue; }; @@ -122,19 +120,14 @@ pub(super) async fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, String::new())) - .map(AsRef::as_ref), + auth_events.get(&(StateEventType::RoomCreate, String::new().into())), Some(_) | None ) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Incoming event refers to wrong create event.", - )); + return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } - let state_fetch = |ty: &'static StateEventType, sk: &str| { - let key = ty.with_state_key(sk); + let state_fetch = |ty: &StateEventType, sk: &str| { + let key = (ty.to_owned(), sk.into()); ready(auth_events.get(&key)) }; @@ -160,5 +153,5 @@ pub(super) async fn handle_outlier_pdu<'a>( trace!("Added pdu as outlier."); - Ok((Arc::new(incoming_pdu), val)) + Ok((incoming_pdu, val)) } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index f911f1fd..d612b2bf 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,14 +1,10 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, - time::Instant, -}; +use std::{collections::BTreeMap, time::Instant}; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, implement, utils::continue_exponential_backoff_secs, Err, - PduEvent, Result, + Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, defer, implement, + utils::continue_exponential_backoff_secs, }; -use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UInt}; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -24,13 +20,10 @@ pub(super) async fn handle_prev_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap< - OwnedEventId, - (Arc, BTreeMap), - >, - create_event: &PduEvent, + eventid_info: Option<(PduEvent, BTreeMap)>, + create_event: &'a PduEvent, first_ts_in_room: UInt, - prev_id: &EventId, + prev_id: &'a EventId, ) -> Result { // Check for disabled again because it might have changed if self.services.metadata.is_disabled(room_id).await { @@ -61,31 +54,35 @@ pub(super) async fn handle_prev_pdu<'a>( } } - if let Some((pdu, json)) = eventid_info.remove(prev_id) { - // Skip old events - if pdu.origin_server_ts < first_ts_in_room { - return Ok(()); - } + let Some((pdu, json)) = eventid_info else { + return Ok(()); + }; - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - - self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) - .await?; - - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - debug!( - elapsed = ?start_time.elapsed(), - "Handled prev_event", - ); + // Skip old events + if pdu.origin_server_ts < first_ts_in_room { + return Ok(()); } + let start_time = Instant::now(); + self.federation_handletime + .write() + .expect("locked") + .insert(room_id.into(), ((*prev_id).to_owned(), start_time)); + + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; + + self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) + .await?; + + debug!( + elapsed = ?start_time.elapsed(), + "Handled prev_event", + ); + Ok(()) } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8bcbc48b..45675da8 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -17,17 +17,14 @@ use std::{ time::Instant, }; -use conduwuit::{ - utils::{MutexMap, TryFutureExtExt}, - Err, PduEvent, Result, Server, -}; -use futures::TryFutureExt; +use async_trait::async_trait; +use conduwuit::{Err, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; use ruma::{ - events::room::create::RoomCreateEventContent, state_res::RoomVersion, OwnedEventId, - OwnedRoomId, RoomId, RoomVersionId, + OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, + events::room::create::RoomCreateEventContent, }; -use crate::{globals, rooms, sending, server_keys, Dep}; +use crate::{Dep, globals, rooms, sending, server_keys}; pub struct Service { pub mutex_federation: RoomMutexMap, @@ -54,6 +51,7 @@ struct Services { type RoomMutexMap = MutexMap; type HandleTimeMap = HashMap; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -79,7 +77,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_federation = self.mutex_federation.len(); writeln!(out, "federation_mutex: {mutex_federation}")?; @@ -101,13 +99,8 @@ impl Service { self.services.timeline.pdu_exists(&event_id).await } - async fn event_fetch(&self, event_id: OwnedEventId) -> Option> { - self.services - .timeline - .get_pdu(&event_id) - .map_ok(Arc::new) - .ok() - .await + async fn event_fetch(&self, event_id: OwnedEventId) -> Option { + self.services.timeline.get_pdu(&event_id).await.ok() } } diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 9b130763..a49fc541 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; +use conduwuit::{Result, err, implement, pdu::gen_event_id_canonical_json, result::FlatOk}; use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 1fd91ac6..b3a7a71b 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,17 +5,15 @@ use std::{ }; use conduwuit::{ - debug, err, implement, - utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, - Result, -}; -use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{ + Error, Result, err, implement, state_res::{self, StateMap}, - OwnedEventId, RoomId, RoomVersionId, + trace, + utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width}, }; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; -use crate::rooms::state_compressor::CompressedStateEvent; +use crate::rooms::state_compressor::CompressedState; #[implement(super::Service)] #[tracing::instrument(name = "resolve", level = "debug", skip_all)] @@ -24,14 +22,14 @@ pub async fn resolve_state( room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap, -) -> Result>> { - debug!("Loading current room state ids"); +) -> Result> { + trace!("Loading current room state ids"); let current_sstatehash = self .services .state .get_room_shortstatehash(room_id) - .await - .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; + .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}")))) + .await?; let current_state_ids: HashMap<_, _> = self .services @@ -40,53 +38,44 @@ pub async fn resolve_state( .collect() .await; + trace!("Loading fork states"); let fork_states = [current_state_ids, incoming_state]; - let auth_chain_sets: Vec> = fork_states + let auth_chain_sets = fork_states .iter() .try_stream() - .wide_and_then(|state| async move { - let starting_events = state.values().map(Borrow::borrow); - - let auth_chain = self - .services + .wide_and_then(|state| { + self.services .auth_chain - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .collect(); - - Ok(auth_chain) + .event_ids_iter(room_id, state.values().map(Borrow::borrow)) + .try_collect() }) - .try_collect() - .await?; + .try_collect::>>(); - debug!("Loading fork states"); - let fork_states: Vec> = fork_states - .into_iter() + let fork_states = fork_states + .iter() .stream() - .wide_then(|fork_state| async move { + .wide_then(|fork_state| { let shortstatekeys = fork_state.keys().copied().stream(); - - let event_ids = fork_state.values().cloned().stream().boxed(); - + let event_ids = fork_state.values().cloned().stream(); self.services .short .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) .collect() - .await }) - .collect() - .await; + .map(Ok::<_, Error>) + .try_collect::>>(); - debug!("Resolving state"); + let (fork_states, auth_chain_sets) = try_join(fork_states, auth_chain_sets).await?; + + trace!("Resolving state"); let state = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await?; - debug!("State resolution done."); + trace!("State resolution done."); let state_events: Vec<_> = state .iter() .stream() @@ -99,15 +88,11 @@ pub async fn resolve_state( .collect() .await; - debug!("Compressing state..."); - let new_room_state: HashSet<_> = self + trace!("Compressing state..."); + let new_room_state: CompressedState = self .services .state_compressor - .compress_state_events( - state_events - .iter() - .map(|(ref ssk, eid)| (ssk, (*eid).borrow())), - ) + .compress_state_events(state_events.iter().map(|(ssk, eid)| (ssk, (*eid).borrow()))) .collect() .await; @@ -116,20 +101,25 @@ pub async fn resolve_state( #[implement(super::Service)] #[tracing::instrument(name = "ruma", level = "debug", skip_all)] -pub async fn state_resolution( - &self, - room_version: &RoomVersionId, - state_sets: &[StateMap], - auth_chain_sets: &[HashSet], -) -> Result> { +pub async fn state_resolution<'a, StateSets>( + &'a self, + room_version: &'a RoomVersionId, + state_sets: StateSets, + auth_chain_sets: &'a [HashSet], +) -> Result> +where + StateSets: Iterator> + Clone + Send, +{ + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); state_res::resolve( room_version, - state_sets.iter(), + state_sets, auth_chain_sets, - &|event_id| self.event_fetch(event_id), - &|event_id| self.event_exists(event_id), + &event_fetch, + &event_exists, automatic_width(), ) - .await .map_err(|e| err!(error!("State resolution failed: {e:?}"))) + .await } diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 7ef047ab..eb38c2c3 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -1,17 +1,19 @@ use std::{ borrow::Borrow, collections::{HashMap, HashSet}, - sync::Arc, + iter::Iterator, }; use conduwuit::{ - debug, err, implement, - result::LogErr, - utils::stream::{BroadbandExt, IterStream}, - PduEvent, Result, + Result, debug, err, implement, + matrix::{PduEvent, StateMap}, + trace, + utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, }; -use futures::{FutureExt, StreamExt}; -use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; + +use crate::rooms::short::ShortStateHash; // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] @@ -19,7 +21,7 @@ use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_degree_one( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, ) -> Result>> { let prev_event = &incoming_pdu.prev_events[0]; let Ok(prev_event_sstatehash) = self @@ -66,91 +68,48 @@ pub(super) async fn state_at_incoming_degree_one( #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_resolved( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result>> { - debug!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let Ok(prev_event) = self.services.timeline.get_pdu(prev_eventid).await else { - okay = false; - break; - }; - - let Ok(sstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(prev_eventid) - .await - else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if !okay { + trace!("Calculating extremity statehashes..."); + let Ok(extremity_sstatehashes) = incoming_pdu + .prev_events + .iter() + .try_stream() + .broad_and_then(|prev_eventid| { + self.services + .timeline + .get_pdu(prev_eventid) + .map_ok(move |prev_event| (prev_eventid, prev_event)) + }) + .broad_and_then(|(prev_eventid, prev_event)| { + self.services + .state_accessor + .pdu_shortstatehash(prev_eventid) + .map_ok(move |sstatehash| (sstatehash, prev_event)) + }) + .try_collect::>() + .await + else { return Ok(None); - } + }; - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: HashMap<_, _> = self - .services - .state_accessor - .state_full_ids(sstatehash) - .collect() - .await; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) - .await; - - let event_id = &prev_event.event_id; - leaf_state.insert(shortstatekey, event_id.clone()); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - for (k, id) in &leaf_state { - if let Ok((ty, st_key)) = self - .services - .short - .get_statekey_from_short(*k) - .await - .log_err() - { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } - - starting_events.push(id.borrow()); - } - - let auth_chain: HashSet = self - .services - .auth_chain - .get_event_ids(room_id, starting_events.into_iter()) - .await? + trace!("Calculating fork states..."); + let (fork_states, auth_chain_sets): (Vec>, Vec>) = + extremity_sstatehashes .into_iter() - .collect(); - - auth_chain_sets.push(auth_chain); - fork_states.push(state); - } + .try_stream() + .wide_and_then(|(sstatehash, prev_event)| { + self.state_at_incoming_fork(room_id, sstatehash, prev_event) + }) + .try_collect() + .map_ok(Vec::into_iter) + .map_ok(Iterator::unzip) + .await?; let Ok(new_state) = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await else { @@ -158,16 +117,65 @@ pub(super) async fn state_at_incoming_resolved( }; new_state - .iter() + .into_iter() .stream() - .broad_then(|((event_type, state_key), event_id)| { + .broad_then(|((event_type, state_key), event_id)| async move { self.services .short - .get_or_create_shortstatekey(event_type, state_key) - .map(move |shortstatekey| (shortstatekey, event_id.clone())) + .get_or_create_shortstatekey(&event_type, &state_key) + .map(move |shortstatekey| (shortstatekey, event_id)) + .await }) .collect() .map(Some) .map(Ok) .await } + +#[implement(super::Service)] +async fn state_at_incoming_fork( + &self, + room_id: &RoomId, + sstatehash: ShortStateHash, + prev_event: PduEvent, +) -> Result<(StateMap, HashSet)> { + let mut leaf_state: HashMap<_, _> = self + .services + .state_accessor + .state_full_ids(sstatehash) + .collect() + .await; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) + .await; + + let event_id = &prev_event.event_id; + leaf_state.insert(shortstatekey, event_id.clone()); + // Now it's the state after the pdu + } + + let auth_chain = self + .services + .auth_chain + .event_ids_iter(room_id, leaf_state.values().map(Borrow::borrow)) + .try_collect(); + + let fork_state = leaf_state + .iter() + .stream() + .broad_then(|(k, id)| { + self.services + .short + .get_statekey_from_short(*k) + .map_ok(|(ty, sk)| ((ty, sk), id.clone())) + }) + .ready_filter_map(Result::ok) + .collect() + .map(Ok); + + try_join(fork_state, auth_chain).await +} diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index f0c8f0c5..97d3df97 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,26 +1,25 @@ -use std::{ - borrow::Borrow, - collections::{BTreeMap, HashSet}, - sync::Arc, - time::Instant, -}; +use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; -use futures::{future::ready, StreamExt}; -use ruma::{ - api::client::error::ErrorKind, - events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, - state_res::{self, EventTypeExt}, - CanonicalJsonValue, RoomId, RoomVersionId, ServerName, +use conduwuit::{ + Err, Result, debug, debug_info, err, implement, + matrix::{EventTypeExt, PduEvent, StateKey, state_res}, + trace, + utils::stream::{BroadbandExt, ReadyExt}, + warn, }; +use futures::{FutureExt, StreamExt, future::ready}; +use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; use super::{get_room_version_id, to_room_version}; -use crate::rooms::{state_compressor::HashSetCompressStateEvent, timeline::RawPduId}; +use crate::rooms::{ + state_compressor::{CompressedState, HashSetCompressStateEvent}, + timeline::RawPduId, +}; #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( &self, - incoming_pdu: Arc, + incoming_pdu: PduEvent, val: BTreeMap, create_event: &PduEvent, origin: &ServerName, @@ -74,8 +73,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( debug!("Performing auth check"); // 11. Check the auth of the event passes based on the state of the event let state_fetch_state = &state_at_incoming_event; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = self.services.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = self.services.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = state_fetch_state.get(&shortstatekey)?; self.services.timeline.get_pdu(event_id).await.ok() @@ -85,7 +84,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( &room_version, &incoming_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |ty, sk| state_fetch(ty.clone(), sk.into()), ) .await .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; @@ -107,7 +106,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( ) .await?; - let state_fetch = |k: &'static StateEventType, s: &str| { + let state_fetch = |k: &StateEventType, s: &str| { let key = k.with_state_key(s); ready(auth_events.get(&key).cloned()) }; @@ -123,46 +122,15 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Soft fail check before doing state res debug!("Performing soft-fail check"); - let soft_fail = { - use RoomVersionId::*; - - !auth_check - || incoming_pdu.kind == TimelineEventType::RoomRedaction - && match room_version_id { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &incoming_pdu.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - | _ => { - let content: RoomRedactionEventContent = incoming_pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - } + let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) { + | (false, _) => true, + | (true, None) => false, + | (true, Some(redact_id)) => + !self + .services + .state_accessor + .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await?, }; // 13. Use state resolution to find new room state @@ -174,42 +142,34 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Now we calculate the set of extremities this room has after the incoming // event has been applied. We start with the previous extremities (aka leaves) trace!("Calculating extremities"); - let mut extremities: HashSet<_> = self + let extremities: Vec<_> = self .services .state .get_forward_extremities(room_id) .map(ToOwned::to_owned) + .ready_filter(|event_id| { + // Remove any that are referenced by this incoming event's prev_events + !incoming_pdu.prev_events.contains(event_id) + }) + .broad_filter_map(|event_id| async move { + // Only keep those extremities were not referenced yet + self.services + .pdu_metadata + .is_event_referenced(room_id, &event_id) + .await + .eq(&false) + .then_some(event_id) + }) .collect() .await; - // Remove any forward extremities that are referenced by this incoming event's - // prev_events - trace!( - "Calculated {} extremities; checking against {} prev_events", + debug!( + "Retained {} extremities checked against {} prev_events", extremities.len(), incoming_pdu.prev_events.len() ); - for prev_event in &incoming_pdu.prev_events { - extremities.remove(&(**prev_event)); - } - // Only keep those extremities were not referenced yet - let mut retained = HashSet::new(); - for id in &extremities { - if !self - .services - .pdu_metadata - .is_event_referenced(room_id, id) - .await - { - retained.insert(id.clone()); - } - } - - extremities.retain(|id| retained.contains(id)); - debug!("Retained {} extremities. Compressing state", extremities.len()); - - let state_ids_compressed: HashSet<_> = self + let state_ids_compressed: Arc = self .services .state_compressor .compress_state_events( @@ -218,10 +178,9 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .map(|(ssk, eid)| (ssk, eid.borrow())), ) .collect() + .map(Arc::new) .await; - let state_ids_compressed = Arc::new(state_ids_compressed); - if incoming_pdu.state_key.is_some() { debug!("Event is a state-event. Deriving new room state"); @@ -260,12 +219,14 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // if not soft fail it if soft_fail { debug!("Soft failing event"); + let extremities = extremities.iter().map(Borrow::borrow); + self.services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(|e| (**e).to_owned()).collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, @@ -273,27 +234,30 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .await?; // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {incoming_pdu:?}"); self.services .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + warn!("Event was soft failed: {incoming_pdu:?}"); + return Err!(Request(InvalidParam("Event has been soft failed"))); } - trace!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. + trace!("Appending pdu to timeline"); + let extremities = extremities + .iter() + .map(Borrow::borrow) + .chain(once(incoming_pdu.event_id.borrow())); + let pdu_id = self .services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.into_iter().collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a6e00271..346314d1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -3,13 +3,12 @@ use std::{collections::HashSet, sync::Arc}; use conduwuit::{ - implement, - utils::{stream::TryIgnore, IterStream, ReadyExt}, - Result, + Result, implement, + utils::{IterStream, ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Handle, Interfix, Map, Qry}; -use futures::{pin_mut, Stream, StreamExt}; -use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; +use futures::{Stream, StreamExt, pin_mut}; +use ruma::{DeviceId, OwnedUserId, RoomId, UserId, api::client::filter::LazyLoadOptions}; pub struct Service { db: Data, diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 6d5a85a0..54eef47d 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,11 +1,11 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::{Stream, StreamExt}; use ruma::RoomId; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 9cd3d805..12b56935 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,11 +1,9 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; -use database::{Deserialized, Json, Map}; +use conduwuit::{Result, implement, matrix::pdu::PduEvent}; +use conduwuit_database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; -use crate::PduEvent; - pub struct Service { db: Data, } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 2e6ecbb5..f0beab5a 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,25 +1,25 @@ use std::{mem::size_of, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + PduCount, PduEvent, + arrayvec::ArrayVec, result::LogErr, utils::{ + ReadyExt, stream::{TryIgnore, WidebandExt}, - u64_from_u8, ReadyExt, + u64_from_u8, }, - PduCount, PduEvent, }; use database::Map; use futures::{Stream, StreamExt}; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::{ShortEventId, ShortRoomId}, timeline::{PduId, RawPduId}, }, - Dep, }; pub(super) struct Data { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 4cb14ebc..18221c2d 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,11 +2,11 @@ mod data; use std::sync::Arc; use conduwuit::{PduCount, Result}; -use futures::StreamExt; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use futures::{StreamExt, future::try_join}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use self::data::{Data, PdusIterItem}; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { services: Services, @@ -54,10 +54,16 @@ impl Service { max_depth: u8, dir: Direction, ) -> Vec { - let room_id = self.services.short.get_or_create_shortroomid(room_id).await; + let room_id = self.services.short.get_shortroomid(room_id); - let target = match self.services.timeline.get_pdu_count(target).await { - | Ok(PduCount::Normal(c)) => c, + let target = self.services.timeline.get_pdu_count(target); + + let Ok((room_id, target)) = try_join(room_id, target).await else { + return Vec::new(); + }; + + let target = match target { + | PduCount::Normal(c) => c, // TODO: Support backfilled relations | _ => 0, // This will result in an empty iterator }; @@ -68,10 +74,14 @@ impl Service { .collect() .await; - let mut stack: Vec<_> = pdus.iter().map(|pdu| (pdu.clone(), 1)).collect(); + let mut stack: Vec<_> = pdus + .iter() + .filter(|_| max_depth > 0) + .map(|pdu| (pdu.clone(), 1)) + .collect(); 'limit: while let Some(stack_pdu) = stack.pop() { - let target = match stack_pdu.0 .0 { + let target = match stack_pdu.0.0 { | PduCount::Normal(c) => c, // TODO: Support backfilled relations | PduCount::Backfilled(_) => 0, // This will result in an empty iterator diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index c21ad36c..62f87948 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,18 +1,18 @@ use std::sync::Arc; use conduwuit::{ - utils::{stream::TryIgnore, ReadyExt}, Result, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::{Stream, StreamExt}; use ruma::{ - events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, - serde::Raw, CanonicalJsonObject, RoomId, UserId, + events::{AnySyncEphemeralRoomEvent, receipt::ReceiptEvent}, + serde::Raw, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) struct Data { roomuserid_privateread: Arc, diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 2bc21355..69e859c4 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,19 +2,23 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; -use futures::{try_join, Stream, TryFutureExt}; +use conduwuit::{ + Result, debug, err, + matrix::pdu::{PduCount, PduId, RawPduId}, + warn, +}; +use futures::{Stream, TryFutureExt, try_join}; use ruma::{ + OwnedEventId, OwnedUserId, RoomId, UserId, events::{ - receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, + receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, }, serde::Raw, - OwnedEventId, OwnedUserId, RoomId, UserId, }; use self::data::{Data, ReceiptItem}; -use crate::{rooms, sending, Dep}; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, @@ -145,12 +149,14 @@ where let receipt = serde_json::from_str::>( value.json().get(), ); - if let Ok(value) = receipt { - for (event, receipt) in value.content { - json.insert(event, receipt); - } - } else { - debug!("failed to parse receipt: {:?}", receipt); + match receipt { + | Ok(value) => + for (event, receipt) in value.content { + json.insert(event, receipt); + }, + | _ => { + debug!("failed to parse receipt: {:?}", receipt); + }, } } let content = ReceiptEventContent::from_iter(json); diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 35cfd444..4100dd75 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,26 +1,24 @@ use std::sync::Arc; -use arrayvec::ArrayVec; use conduwuit::{ + PduCount, PduEvent, Result, + arrayvec::ArrayVec, implement, utils::{ - set, + ArrayVecExt, IterStream, ReadyExt, set, stream::{TryIgnore, WidebandExt}, - ArrayVecExt, IterStream, ReadyExt, }, - PduCount, PduEvent, Result, }; -use database::{keyval::Val, Map}; +use database::{Map, keyval::Val}; use futures::{Stream, StreamExt}; -use ruma::{api::client::search::search_events::v3::Criteria, RoomId, UserId}; +use ruma::{RoomId, UserId, api::client::search::search_events::v3::Criteria}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::ShortRoomId, timeline::{PduId, RawPduId}, }, - Dep, }; pub struct Service { @@ -140,7 +138,7 @@ pub async fn search_pdus<'a>( pub async fn search_pdu_ids( &self, query: &RoomQuery<'_>, -) -> Result + Send + '_> { +) -> Result + Send + '_ + use<'_>> { let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; @@ -187,7 +185,7 @@ fn search_pdu_ids_query_word( &self, shortroomid: ShortRoomId, word: &str, -) -> impl Stream> + Send + '_ { +) -> impl Stream> + Send + '_ + use<'_> { // rustc says const'ing this not yet stable let end_id: RawPduId = PduId { shortroomid, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index dd586d02..06ff6493 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,13 +1,13 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; -use conduwuit::{err, implement, utils, utils::IterStream, Result}; +pub use conduwuit::matrix::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; +use conduwuit::{Result, err, implement, matrix::StateKey, utils, utils::IterStream}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, @@ -28,7 +28,6 @@ struct Services { } pub type ShortStateHash = ShortId; -pub type ShortStateKey = ShortId; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -181,7 +180,7 @@ where pub async fn get_statekey_from_short( &self, shortstatekey: ShortStateKey, -) -> Result<(StateEventType, String)> { +) -> Result<(StateEventType, StateKey)> { const BUFSIZE: usize = size_of::(); self.db @@ -200,7 +199,7 @@ pub async fn get_statekey_from_short( pub fn multi_get_statekey_from_short<'a, S>( &'a self, shortstatekey: S, -) -> impl Stream> + Send + 'a +) -> impl Stream> + Send + 'a where S: Stream + Send + 'a, { diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1ee2727c..ea9756ba 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,144 +1,76 @@ +mod pagination_token; +#[cfg(test)] mod tests; -use std::{ - collections::{HashMap, VecDeque}, - fmt::{Display, Formatter}, - str::FromStr, - sync::Arc, -}; +use std::{fmt::Write, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ - checked, debug_info, err, - utils::{math::usize_from_f64, IterStream}, - Error, Result, + Err, Error, PduEvent, Result, implement, + utils::{ + IterStream, + future::{BoolExt, TryExtExt}, + math::usize_from_f64, + stream::{BroadbandExt, ReadyExt}, + }, }; -use futures::{StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, pin_mut, stream::FuturesUnordered}; use lru_cache::LruCache; use ruma::{ + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, api::{ - client::{self, error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + client::space::SpaceHierarchyRoomsChunk, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, }, }, events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, - space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, StateEventType, + space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, }, serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, }; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; -use crate::{rooms, rooms::short::ShortRoomId, sending, Dep}; - -pub struct CachedSpaceHierarchySummary { - summary: SpaceHierarchyParentSummary, -} - -pub enum SummaryAccessibility { - Accessible(Box), - Inaccessible, -} - -// TODO: perhaps use some better form of token rather than just room count -#[derive(Debug, Eq, PartialEq)] -pub struct PaginationToken { - /// Path down the hierarchy of the room to start the response at, - /// excluding the root space. - pub short_room_ids: Vec, - pub limit: UInt, - pub max_depth: UInt, - pub suggested_only: bool, -} - -impl FromStr for PaginationToken { - type Err = Error; - - fn from_str(value: &str) -> Result { - let mut values = value.split('_'); - - let mut pag_tok = || { - let rooms = values - .next()? - .split(',') - .filter_map(|room_s| u64::from_str(room_s).ok()) - .collect(); - - Some(Self { - short_room_ids: rooms, - limit: UInt::from_str(values.next()?).ok()?, - max_depth: UInt::from_str(values.next()?).ok()?, - suggested_only: { - let slice = values.next()?; - - if values.next().is_none() { - if slice == "true" { - true - } else if slice == "false" { - false - } else { - None? - } - } else { - None? - } - }, - }) - }; - - if let Some(token) = pag_tok() { - Ok(token) - } else { - Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) - } - } -} - -impl Display for PaginationToken { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}_{}_{}_{}", - self.short_room_ids - .iter() - .map(ToString::to_string) - .collect::>() - .join(","), - self.limit, - self.max_depth, - self.suggested_only - ) - } -} - -/// Identifier used to check if rooms are accessible -/// -/// None is used if you want to return the room, no matter if accessible or not -enum Identifier<'a> { - UserId(&'a UserId), - ServerName(&'a ServerName), -} +pub use self::pagination_token::PaginationToken; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, - pub roomid_spacehierarchy_cache: - Mutex>>, + pub roomid_spacehierarchy_cache: Mutex, } struct Services { state_accessor: Dep, state_cache: Dep, state: Dep, - short: Dep, event_handler: Dep, timeline: Dep, sending: Dep, } +pub struct CachedSpaceHierarchySummary { + summary: SpaceHierarchyParentSummary, +} + +#[allow(clippy::large_enum_variant)] +pub enum SummaryAccessibility { + Accessible(SpaceHierarchyParentSummary), + Inaccessible, +} + +/// Identifier used to check if rooms are accessible. None is used if you want +/// to return the room, no matter if accessible or not +pub enum Identifier<'a> { + UserId(&'a UserId), + ServerName(&'a ServerName), +} + +type Cache = LruCache>; + +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -150,7 +82,6 @@ impl crate::Service for Service { .depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), state: args.depend::("rooms::state"), - short: args.depend::("rooms::short"), event_handler: args .depend::("rooms::event_handler"), timeline: args.depend::("rooms::timeline"), @@ -160,562 +91,434 @@ impl crate::Service for Service { })) } + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { + let roomid_spacehierarchy_cache = self.roomid_spacehierarchy_cache.lock().await.len(); + + writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; + + Ok(()) + } + + async fn clear_cache(&self) { self.roomid_spacehierarchy_cache.lock().await.clear(); } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Gets the response for the space hierarchy over federation request - /// - /// Errors if the room does not exist, so a check if the room exists should - /// be done - pub async fn get_federation_hierarchy( - &self, - room_id: &RoomId, - server_name: &ServerName, - suggested_only: bool, - ) -> Result { - match self - .get_summary_and_children_local( - &room_id.to_owned(), - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(room)) => { - let mut children = Vec::new(); - let mut inaccessible_children = Vec::new(); +/// Gets the summary of a space using solely local information +#[implement(Service)] +pub async fn get_summary_and_children_local( + &self, + current_room: &RoomId, + identifier: &Identifier<'_>, +) -> Result> { + match self + .roomid_spacehierarchy_cache + .lock() + .await + .get_mut(current_room) + .as_ref() + { + | None => (), // cache miss + | Some(None) => return Ok(None), + | Some(Some(cached)) => { + let allowed_rooms = cached.summary.allowed_room_ids.iter().map(AsRef::as_ref); - for (child, _via) in get_parent_children_via(&room, suggested_only) { - match self - .get_summary_and_children_local( - &child, - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(summary)) => { - children.push((*summary).into()); - }, - | Some(SummaryAccessibility::Inaccessible) => { - inaccessible_children.push(child); - }, - | None => (), - } - } - - Ok(federation::space::get_hierarchy::v1::Response { - room: *room, - children, - inaccessible_children, - }) - }, - | Some(SummaryAccessibility::Inaccessible) => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room is inaccessible")), - | None => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room was not found")), - } - } - - /// Gets the summary of a space using solely local information - async fn get_summary_and_children_local( - &self, - current_room: &OwnedRoomId, - identifier: Identifier<'_>, - ) -> Result> { - if let Some(cached) = self - .roomid_spacehierarchy_cache - .lock() - .await - .get_mut(¤t_room.to_owned()) - .as_ref() - { - return Ok(if let Some(cached) = cached { - if self - .is_accessible_child( - current_room, - &cached.summary.join_rule, - &identifier, - &cached.summary.allowed_room_ids, - ) - .await - { - Some(SummaryAccessibility::Accessible(Box::new(cached.summary.clone()))) - } else { - Some(SummaryAccessibility::Inaccessible) - } - } else { - None - }); - } - - if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? { - let summary = self - .get_room_summary(current_room, children_pdus, &identifier) - .await; - if let Ok(summary) = summary { - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); - - Ok(Some(SummaryAccessibility::Accessible(Box::new(summary)))) - } else { - Ok(None) - } - } else { - Ok(None) - } - } - - /// Gets the summary of a space using solely federation - #[tracing::instrument(level = "debug", skip(self))] - async fn get_summary_and_children_federation( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - for server in via { - debug_info!("Asking {server} for /hierarchy"); - let Ok(response) = self - .services - .sending - .send_federation_request(server, federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }) - .await - else { - continue; - }; - - debug_info!("Got response from {server} for /hierarchy\n{response:?}"); - let summary = response.room.clone(); - - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + let is_accessible_child = self.is_accessible_child( + current_room, + &cached.summary.join_rule, + identifier, + allowed_rooms, ); - for child in response.children { - let mut guard = self.roomid_spacehierarchy_cache.lock().await; - if !guard.contains_key(current_room) { - guard.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { - summary: { - let SpaceHierarchyChildSummary { - canonical_alias, - name, - num_joined_members, - room_id, - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - allowed_room_ids, - } = child; + let accessibility = if is_accessible_child.await { + SummaryAccessibility::Accessible(cached.summary.clone()) + } else { + SummaryAccessibility::Inaccessible + }; - SpaceHierarchyParentSummary { - canonical_alias, - name, - num_joined_members, - room_id: room_id.clone(), - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - children_state: self - .get_stripped_space_child_events(&room_id) - .await? - .unwrap(), - allowed_room_ids, - } - }, - }), - ); - } - } - if self - .is_accessible_child( - current_room, - &response.room.join_rule, - &Identifier::UserId(user_id), - &response.room.allowed_room_ids, - ) - .await - { - return Ok(Some(SummaryAccessibility::Accessible(Box::new(summary.clone())))); - } + return Ok(Some(accessibility)); + }, + } - return Ok(Some(SummaryAccessibility::Inaccessible)); - } + let children_pdus: Vec<_> = self + .get_space_child_events(current_room) + .map(PduEvent::into_stripped_spacechild_state_event) + .collect() + .await; + let Ok(summary) = self + .get_room_summary(current_room, children_pdus, identifier) + .boxed() + .await + else { + return Ok(None); + }; + + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); + + Ok(Some(SummaryAccessibility::Accessible(summary))) +} + +/// Gets the summary of a space using solely federation +#[implement(Service)] +#[tracing::instrument(level = "debug", skip(self))] +async fn get_summary_and_children_federation( + &self, + current_room: &RoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let request = federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }; + + let mut requests: FuturesUnordered<_> = via + .iter() + .map(|server| { + self.services + .sending + .send_federation_request(server, request.clone()) + }) + .collect(); + + let Some(Ok(response)) = requests.next().await else { self.roomid_spacehierarchy_cache .lock() .await - .insert(current_room.clone(), None); + .insert(current_room.to_owned(), None); - Ok(None) - } + return Ok(None); + }; - /// Gets the summary of a space using either local or remote (federation) - /// sources - async fn get_summary_and_children_client( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - if let Ok(Some(response)) = self - .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) - .await - { - Ok(Some(response)) - } else { - self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) - .await - } - } + let summary = response.room; + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); - async fn get_room_summary( - &self, - current_room: &OwnedRoomId, - children_state: Vec>, - identifier: &Identifier<'_>, - ) -> Result { - let room_id: &RoomId = current_room; - - let join_rule = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); - - let allowed_room_ids = self - .services - .state_accessor - .allowed_room_ids(join_rule.clone()); - - if !self - .is_accessible_child( - current_room, - &join_rule.clone().into(), - identifier, - &allowed_room_ids, - ) - .await - { - debug_info!("User is not allowed to see room {room_id}"); - // This error will be caught later - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to see the room", - )); - } - - Ok(SpaceHierarchyParentSummary { - canonical_alias: self - .services - .state_accessor - .get_canonical_alias(room_id) - .await - .ok(), - name: self.services.state_accessor.get_name(room_id).await.ok(), - num_joined_members: self - .services - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .expect("user count should not be that big"), - room_id: room_id.to_owned(), - topic: self - .services - .state_accessor - .get_room_topic(room_id) - .await - .ok(), - world_readable: self - .services - .state_accessor - .is_world_readable(room_id) - .await, - guest_can_join: self.services.state_accessor.guest_can_join(room_id).await, - avatar_url: self - .services - .state_accessor - .get_avatar(room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: join_rule.into(), - room_type: self - .services - .state_accessor - .get_room_type(room_id) - .await - .ok(), - children_state, - allowed_room_ids, + response + .children + .into_iter() + .stream() + .then(|child| { + self.roomid_spacehierarchy_cache + .lock() + .map(|lock| (child, lock)) }) - } - - pub async fn get_client_hierarchy( - &self, - sender_user: &UserId, - room_id: &RoomId, - limit: usize, - short_room_ids: Vec, - max_depth: u64, - suggested_only: bool, - ) -> Result { - let mut parents = VecDeque::new(); - - // Don't start populating the results if we have to start at a specific room. - let mut populate_results = short_room_ids.is_empty(); - - let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { - | Some(server_name) => vec![server_name.into()], - | None => vec![], - })]]; - - let mut results = Vec::with_capacity(limit); - - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } - { - if results.len() >= limit { - break; - } - - match ( - self.get_summary_and_children_client( - ¤t_room, - suggested_only, - sender_user, - &via, - ) - .await?, - current_room == room_id, - ) { - | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let mut children: Vec<(OwnedRoomId, Vec)> = - get_parent_children_via(&summary, suggested_only) - .into_iter() - .filter(|(room, _)| parents.iter().all(|parent| parent != room)) - .rev() - .collect(); - - if populate_results { - results.push(summary_to_chunk(*summary.clone())); - } else { - children = children - .iter() - .rev() - .stream() - .skip_while(|(room, _)| { - self.services - .short - .get_shortroomid(room) - .map_ok(|short| { - Some(&short) != short_room_ids.get(parents.len()) - }) - .unwrap_or_else(|_| false) - }) - .map(Clone::clone) - .collect::)>>() - .await - .into_iter() - .rev() - .collect(); - - if children.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room IDs in token were not found.", - )); - } - - // We have reached the room after where we last left off - let parents_len = parents.len(); - if checked!(parents_len + 1)? == short_room_ids.len() { - populate_results = true; - } - } - - let parents_len: u64 = parents.len().try_into()?; - if !children.is_empty() && parents_len < max_depth { - parents.push_back(current_room.clone()); - stack.push(children); - } - // Root room in the space hierarchy, we return an error - // if this one fails. - }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room is inaccessible", - )); - }, - | (None, true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room was not found", - )); - }, - // Just ignore other unavailable rooms - | (None | Some(SummaryAccessibility::Inaccessible), false) => (), - } - } - - Ok(client::space::get_hierarchy::v1::Response { - next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { - parents.pop_front(); - parents.push_back(room); - - let short_room_ids: Vec<_> = parents - .iter() - .stream() - .filter_map(|room_id| async move { - self.services.short.get_shortroomid(room_id).await.ok() - }) - .collect() - .await; - - Some( - PaginationToken { - short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string(), - ) - } else { - None - }, - rooms: results, + .ready_filter_map(|(child, mut cache)| { + (!cache.contains_key(current_room)).then_some((child, cache)) }) - } + .for_each(|(child, cache)| self.cache_insert(cache, current_room, child)) + .await; - /// Simply returns the stripped m.space.child events of a room - async fn get_stripped_space_child_events( - &self, - room_id: &RoomId, - ) -> Result>>, Error> { - let Ok(current_shortstatehash) = - self.services.state.get_room_shortstatehash(room_id).await - else { - return Ok(None); - }; + let identifier = Identifier::UserId(user_id); + let allowed_room_ids = summary.allowed_room_ids.iter().map(AsRef::as_ref); - let state: HashMap<_, Arc<_>> = self - .services - .state_accessor - .state_full_ids(current_shortstatehash) - .collect() - .await; + let is_accessible_child = self + .is_accessible_child(current_room, &summary.join_rule, &identifier, allowed_room_ids) + .await; - let mut children_pdus = Vec::with_capacity(state.len()); - for (key, id) in state { - let (event_type, state_key) = - self.services.short.get_statekey_from_short(key).await?; + let accessibility = if is_accessible_child { + SummaryAccessibility::Accessible(summary) + } else { + SummaryAccessibility::Inaccessible + }; - if event_type != StateEventType::SpaceChild { - continue; - } - - let pdu = - self.services.timeline.get_pdu(&id).await.map_err(|e| { - err!(Database("Event {id:?} in space state not found: {e:?}")) - })?; + Ok(Some(accessibility)) +} +/// Simply returns the stripped m.space.child events of a room +#[implement(Service)] +fn get_space_child_events<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|current_shortstatehash| { + self.services + .state_accessor + .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) + .boxed() + }) + .map(Result::into_iter) + .map(IterStream::stream) + .map(StreamExt::flatten) + .flatten_stream() + .broad_filter_map(move |(state_key, event_id): (_, OwnedEventId)| async move { + self.services + .timeline + .get_pdu(&event_id) + .map_ok(move |pdu| (state_key, pdu)) + .ok() + .await + }) + .ready_filter_map(move |(state_key, pdu)| { if let Ok(content) = pdu.get_content::() { if content.via.is_empty() { - continue; + return None; } } - if OwnedRoomId::try_from(state_key).is_ok() { - children_pdus.push(pdu.to_stripped_spacechild_state_event()); + if RoomId::parse(&state_key).is_err() { + return None; } - } - Ok(Some(children_pdus)) + Some(pdu) + }) +} + +/// Gets the summary of a space using either local or remote (federation) +/// sources +#[implement(Service)] +pub async fn get_summary_and_children_client( + &self, + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let identifier = Identifier::UserId(user_id); + + if let Ok(Some(response)) = self + .get_summary_and_children_local(current_room, &identifier) + .await + { + return Ok(Some(response)); } - /// With the given identifier, checks if a room is accessable - async fn is_accessible_child( - &self, - current_room: &OwnedRoomId, - join_rule: &SpaceRoomJoinRule, - identifier: &Identifier<'_>, - allowed_room_ids: &Vec, - ) -> bool { - match identifier { - | Identifier::ServerName(server_name) => { - // Checks if ACLs allow for the server to participate - if self - .services - .event_handler - .acl_check(server_name, current_room) - .await - .is_err() - { - return false; - } - }, - | Identifier::UserId(user_id) => { - if self - .services - .state_cache - .is_joined(user_id, current_room) - .await || self - .services - .state_cache - .is_invited(user_id, current_room) - .await - { - return true; - } - }, - } - match &join_rule { - | SpaceRoomJoinRule::Public - | SpaceRoomJoinRule::Knock - | SpaceRoomJoinRule::KnockRestricted => true, - | SpaceRoomJoinRule::Restricted => { - for room in allowed_room_ids { - match identifier { - | Identifier::UserId(user) => { - if self.services.state_cache.is_joined(user, room).await { - return true; - } - }, - | Identifier::ServerName(server) => { - if self.services.state_cache.server_in_room(server, room).await { - return true; - } - }, - } - } - false - }, - // Invite only, Private, or Custom join rule - | _ => false, + self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) + .await +} + +#[implement(Service)] +async fn get_room_summary( + &self, + room_id: &RoomId, + children_state: Vec>, + identifier: &Identifier<'_>, +) -> Result { + let join_rule = self.services.state_accessor.get_join_rules(room_id).await; + + let is_accessible_child = self + .is_accessible_child( + room_id, + &join_rule.clone().into(), + identifier, + join_rule.allowed_rooms(), + ) + .await; + + if !is_accessible_child { + return Err!(Request(Forbidden("User is not allowed to see the room"))); + } + + let name = self.services.state_accessor.get_name(room_id).ok(); + + let topic = self.services.state_accessor.get_room_topic(room_id).ok(); + + let room_type = self.services.state_accessor.get_room_type(room_id).ok(); + + let world_readable = self.services.state_accessor.is_world_readable(room_id); + + let guest_can_join = self.services.state_accessor.guest_can_join(room_id); + + let num_joined_members = self + .services + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let canonical_alias = self + .services + .state_accessor + .get_canonical_alias(room_id) + .ok(); + + let avatar_url = self + .services + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + + let room_version = self.services.state.get_room_version(room_id).ok(); + + let encryption = self + .services + .state_accessor + .get_room_encryption(room_id) + .ok(); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + room_version, + encryption, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + room_version, + encryption, + ); + + let summary = SpaceHierarchyParentSummary { + canonical_alias, + name, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + children_state, + encryption, + room_version, + room_id: room_id.to_owned(), + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.clone().into(), + }; + + Ok(summary) +} + +/// With the given identifier, checks if a room is accessable +#[implement(Service)] +async fn is_accessible_child<'a, I>( + &self, + current_room: &RoomId, + join_rule: &SpaceRoomJoinRule, + identifier: &Identifier<'_>, + allowed_rooms: I, +) -> bool +where + I: Iterator + Send, +{ + if let Identifier::ServerName(server_name) = identifier { + // Checks if ACLs allow for the server to participate + if self + .services + .event_handler + .acl_check(server_name, current_room) + .await + .is_err() + { + return false; } } + + if let Identifier::UserId(user_id) = identifier { + let is_joined = self.services.state_cache.is_joined(user_id, current_room); + + let is_invited = self.services.state_cache.is_invited(user_id, current_room); + + pin_mut!(is_joined, is_invited); + if is_joined.or(is_invited).await { + return true; + } + } + + match *join_rule { + | SpaceRoomJoinRule::Public + | SpaceRoomJoinRule::Knock + | SpaceRoomJoinRule::KnockRestricted => true, + | SpaceRoomJoinRule::Restricted => + allowed_rooms + .stream() + .any(async |room| match identifier { + | Identifier::UserId(user) => + self.services.state_cache.is_joined(user, room).await, + | Identifier::ServerName(server) => + self.services.state_cache.server_in_room(server, room).await, + }) + .await, + + // Invite only, Private, or Custom join rule + | _ => false, + } +} + +/// Returns the children of a SpaceHierarchyParentSummary, making use of the +/// children_state field +pub fn get_parent_children_via( + parent: &SpaceHierarchyParentSummary, + suggested_only: bool, +) -> impl DoubleEndedIterator + use<>)> ++ Send ++ '_ { + parent + .children_state + .iter() + .map(Raw::deserialize) + .filter_map(Result::ok) + .filter_map(move |ce| { + (!suggested_only || ce.content.suggested) + .then_some((ce.state_key, ce.content.via.into_iter())) + }) +} + +#[implement(Service)] +async fn cache_insert( + &self, + mut cache: MutexGuard<'_, Cache>, + current_room: &RoomId, + child: SpaceHierarchyChildSummary, +) { + let SpaceHierarchyChildSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + encryption, + room_version, + } = child; + + let summary = SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + room_id: room_id.clone(), + children_state: self + .get_space_child_events(&room_id) + .map(PduEvent::into_stripped_spacechild_state_event) + .collect() + .await, + encryption, + room_version, + }; + + cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); } // Here because cannot implement `From` across ruma-federation-api and @@ -734,7 +537,9 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = value.summary; Self { @@ -749,13 +554,17 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } } /// Here because cannot implement `From` across ruma-federation-api and /// ruma-client-api types -fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { +#[must_use] +pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { let SpaceHierarchyParentSummary { canonical_alias, name, @@ -768,7 +577,9 @@ fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRooms join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = summary; SpaceHierarchyRoomsChunk { @@ -783,38 +594,8 @@ fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRooms join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } - -/// Returns the children of a SpaceHierarchyParentSummary, making use of the -/// children_state field -fn get_parent_children_via( - parent: &SpaceHierarchyParentSummary, - suggested_only: bool, -) -> Vec<(OwnedRoomId, Vec)> { - parent - .children_state - .iter() - .filter_map(|raw_ce| { - raw_ce.deserialize().map_or(None, |ce| { - if suggested_only && !ce.content.suggested { - None - } else { - Some((ce.state_key, ce.content.via)) - } - }) - }) - .collect() -} - -fn next_room_to_traverse( - stack: &mut Vec)>>, - parents: &mut VecDeque, -) -> Option<(OwnedRoomId, Vec)> { - while stack.last().is_some_and(Vec::is_empty) { - stack.pop(); - parents.pop_back(); - } - - stack.last_mut().and_then(Vec::pop) -} diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs new file mode 100644 index 00000000..d97b7a2f --- /dev/null +++ b/src/service/rooms/spaces/pagination_token.rs @@ -0,0 +1,76 @@ +use std::{ + fmt::{Display, Formatter}, + str::FromStr, +}; + +use conduwuit::{Error, Result}; +use ruma::{UInt, api::client::error::ErrorKind}; + +use crate::rooms::short::ShortRoomId; + +// TODO: perhaps use some better form of token rather than just room count +#[derive(Debug, Eq, PartialEq)] +pub struct PaginationToken { + /// Path down the hierarchy of the room to start the response at, + /// excluding the root space. + pub short_room_ids: Vec, + pub limit: UInt, + pub max_depth: UInt, + pub suggested_only: bool, +} + +impl FromStr for PaginationToken { + type Err = Error; + + fn from_str(value: &str) -> Result { + let mut values = value.split('_'); + let mut pag_tok = || { + let short_room_ids = values + .next()? + .split(',') + .filter_map(|room_s| u64::from_str(room_s).ok()) + .collect(); + + let limit = UInt::from_str(values.next()?).ok()?; + let max_depth = UInt::from_str(values.next()?).ok()?; + let slice = values.next()?; + let suggested_only = if values.next().is_none() { + if slice == "true" { + true + } else if slice == "false" { + false + } else { + None? + } + } else { + None? + }; + + Some(Self { + short_room_ids, + limit, + max_depth, + suggested_only, + }) + }; + + if let Some(token) = pag_tok() { + Ok(token) + } else { + Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) + } + } +} + +impl Display for PaginationToken { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let short_room_ids = self + .short_room_ids + .iter() + .map(ToString::to_string) + .collect::>() + .join(","); + + write!(f, "{short_room_ids}_{}_{}_{}", self.limit, self.max_depth, self.suggested_only) + } +} diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index b4c387d7..d0395fdd 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -1,15 +1,13 @@ -#![cfg(test)] - use std::str::FromStr; use ruma::{ + UInt, api::federation::space::{SpaceHierarchyParentSummary, SpaceHierarchyParentSummaryInit}, owned_room_id, owned_server_name, space::SpaceRoomJoinRule, - UInt, }; -use crate::rooms::spaces::{get_parent_children_via, PaginationToken}; +use crate::rooms::spaces::{PaginationToken, get_parent_children_via}; #[test] fn get_summary_children() { @@ -69,15 +67,22 @@ fn get_summary_children() { } .into(); - assert_eq!(get_parent_children_via(&summary, false), vec![ - (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) - ]); - assert_eq!(get_parent_children_via(&summary, true), vec![( - owned_room_id!("!bar:example.org"), - vec![owned_server_name!("example.org")] - )]); + assert_eq!( + get_parent_children_via(&summary, false) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) + ] + ); + assert_eq!( + get_parent_children_via(&summary, true) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![(owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")])] + ); } #[test] diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index fd303667..803ba9d7 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,41 +1,35 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write, - iter::once, - sync::Arc, -}; +use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ - err, + PduEvent, Result, err, result::FlatOk, + state_res::{self, StateMap}, utils::{ - calculate_hash, + IterStream, MutexMap, MutexMapGuard, ReadyExt, calculate_hash, stream::{BroadbandExt, TryIgnore}, - IterStream, MutexMap, MutexMapGuard, ReadyExt, }, - warn, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{ - future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, + FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future::join_all, pin_mut, }; use ruma::{ + EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, events::{ - room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, AnyStrippedStateEvent, StateEventType, TimelineEventType, + room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, }, serde::Raw, - state_res::{self, StateMap}, - EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; use crate::{ - globals, rooms, + Dep, globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::{parse_compressed_state_event, CompressedStateEvent}, + state_compressor::{CompressedState, parse_compressed_state_event}, }, - Dep, }; pub struct Service { @@ -63,6 +57,7 @@ struct Data { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -86,7 +81,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex = self.mutex.len(); writeln!(out, "state_mutex: {mutex}")?; @@ -102,10 +97,9 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - statediffnew: Arc>, - _statediffremoved: Arc>, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + statediffnew: Arc, + _statediffremoved: Arc, + state_lock: &RoomMutexGuard, ) -> Result { let event_ids = statediffnew .iter() @@ -176,7 +170,7 @@ impl Service { &self, event_id: &EventId, room_id: &RoomId, - state_ids_compressed: Arc>, + state_ids_compressed: Arc, ) -> Result { const KEY_LEN: usize = size_of::(); const VAL_LEN: usize = size_of::(); @@ -198,23 +192,23 @@ impl Service { .await; if !already_existed { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services - .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), }; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed + let statediffnew: CompressedState = state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&state_ids_compressed) .copied() @@ -222,7 +216,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (state_ids_compressed, Arc::new(HashSet::new())) + (state_ids_compressed, Arc::new(CompressedState::new())) }; self.services.state_compressor.save_state_from_diff( shortstatehash, @@ -262,63 +256,65 @@ impl Service { .aput::(shorteventid, p); } - if let Some(state_key) = &new_pdu.state_key { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services + match &new_pdu.state_key { + | Some(state_key) => { + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), + }; + + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) + .await; + + let new = self + .services .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() - }; + .compress_state_event(shortstatekey, &new_pdu.event_id) + .await; - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) - .await; + let replaces = states_parents + .last() + .map(|info| { + info.full_state + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); - let new = self - .services - .state_compressor - .compress_state_event(shortstatekey, &new_pdu.event_id) - .await; + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } - let replaces = states_parents - .last() - .map(|info| { - info.full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); + // TODO: statehash with deterministic inputs + let shortstatehash = self.services.globals.next_count()?; - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } + let mut statediffnew = CompressedState::new(); + statediffnew.insert(new); - // TODO: statehash with deterministic inputs - let shortstatehash = self.services.globals.next_count()?; + let mut statediffremoved = CompressedState::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); + self.services.state_compressor.save_state_from_diff( + shortstatehash, + Arc::new(statediffnew), + Arc::new(statediffremoved), + 2, + states_parents, + )?; - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.services.state_compressor.save_state_from_diff( - shortstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) + Ok(shortstatehash) + }, + | _ => + Ok(previous_shortstatehash.expect("first event in room must be a state event")), } } @@ -345,7 +341,7 @@ impl Service { .await .into_iter() .filter_map(Result::ok) - .map(|e| e.to_stripped_state_event()) + .map(PduEvent::into_stripped_state_event) .chain(once(event.to_stripped_state_event())) .collect() } @@ -398,13 +394,14 @@ impl Service { .ignore_err() } - pub async fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: Vec, - _state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ - ) { + pub async fn set_forward_extremities<'a, I>( + &'a self, + room_id: &'a RoomId, + event_ids: I, + _state_lock: &'a RoomMutexGuard, + ) where + I: Iterator + Send + 'a, + { let prefix = (room_id, Interfix); self.db .roomid_pduleaves @@ -413,7 +410,7 @@ impl Service { .ready_for_each(|key| self.db.roomid_pduleaves.remove(key)) .await; - for event_id in &event_ids { + for event_id in event_ids { let key = (room_id, event_id); self.db.roomid_pduleaves.put_raw(key, event_id); } @@ -428,60 +425,54 @@ impl Service { sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, - ) -> Result>> { + ) -> Result> { let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { return Ok(HashMap::new()); }; - let mut sauthevents: HashMap<_, _> = - state_res::auth_types_for_event(kind, sender, state_key, content)? - .iter() - .stream() - .broad_filter_map(|(event_type, state_key)| { - self.services - .short - .get_shortstatekey(event_type, state_key) - .map_ok(move |ssk| (ssk, (event_type, state_key))) - .map(Result::ok) - }) - .map(|(ssk, (event_type, state_key))| { - (ssk, (event_type.to_owned(), state_key.to_owned())) - }) - .collect() - .await; + let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?; + + let sauthevents: HashMap<_, _> = auth_types + .iter() + .stream() + .broad_filter_map(|(event_type, state_key)| { + self.services + .short + .get_shortstatekey(event_type, state_key) + .map_ok(move |ssk| (ssk, (event_type, state_key))) + .map(Result::ok) + }) + .collect() + .await; let (state_keys, event_ids): (Vec<_>, Vec<_>) = self .services .state_accessor .state_full_shortids(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?room_id, ?shortstatehash, "{e:?}"))))? - .into_iter() - .filter_map(|(shortstatekey, shorteventid)| { + .ready_filter_map(Result::ok) + .ready_filter_map(|(shortstatekey, shorteventid)| { sauthevents - .remove(&shortstatekey) - .map(|(event_type, state_key)| ((event_type, state_key), shorteventid)) + .get(&shortstatekey) + .map(|(ty, sk)| ((ty, sk), shorteventid)) }) - .unzip(); + .unzip() + .await; - let auth_pdus = self - .services + self.services .short .multi_get_eventid_from_short(event_ids.into_iter().stream()) .zip(state_keys.into_iter().stream()) - .ready_filter_map(|(event_id, tsk)| Some((tsk, event_id.ok()?))) - .broad_filter_map(|(tsk, event_id): (_, OwnedEventId)| async move { + .ready_filter_map(|(event_id, (ty, sk))| Some(((ty, sk), event_id.ok()?))) + .broad_filter_map(|((ty, sk), event_id): (_, OwnedEventId)| async move { self.services .timeline .get_pdu(&event_id) .await - .map(Arc::new) - .map(move |pdu| (tsk, pdu)) + .map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu)) .ok() }) .collect() - .await; - - Ok(auth_pdus) + .map(Ok) + .await } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 0f5520bb..f719fc7b 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,25 +1,17 @@ -use std::{ - borrow::Borrow, - fmt::Write, - sync::{Arc, Mutex as StdMutex, Mutex}, -}; +mod room_state; +mod server_can; +mod state; +mod user_can; -use conduwuit::{ - at, err, error, - pdu::PduBuilder, - utils, - utils::{ - math::{usize_from_f64, Expected}, - stream::BroadbandExt, - IterStream, ReadyExt, - }, - Err, Error, PduEvent, Result, -}; -use database::{Deserialized, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; -use lru_cache::LruCache; +use std::sync::Arc; + +use async_trait::async_trait; +use conduwuit::{Result, err}; +use database::Map; use ruma::{ + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, RoomId, UserId, events::{ + StateEventType, room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, @@ -27,34 +19,18 @@ use ruma::{ encryption::RoomEncryptionEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, - member::{MembershipState, RoomMemberEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::RoomMemberEventContent, name::RoomNameEventContent, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, topic::RoomTopicEventContent, }, - StateEventType, TimelineEventType, }, room::RoomType, - space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, JsOption, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; -use serde::Deserialize; -use crate::{ - rooms, - rooms::{ - short::{ShortEventId, ShortStateHash, ShortStateKey}, - state::RoomMutexGuard, - state_compressor::parse_compressed_state_event, - }, - Dep, -}; +use crate::{Dep, rooms}; pub struct Service { - pub server_visibility_cache: Mutex>, - pub user_visibility_cache: Mutex>, services: Services, db: Data, } @@ -71,21 +47,10 @@ struct Data { shorteventid_shortstatehash: Arc, } +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let server_visibility_cache_capacity = - f64::from(config.server_visibility_cache_capacity) * config.cache_capacity_modifier; - let user_visibility_cache_capacity = - f64::from(config.user_visibility_cache_capacity) * config.cache_capacity_modifier; - Ok(Arc::new(Self { - server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - server_visibility_cache_capacity, - )?)), - user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - user_visibility_cache_capacity, - )?)), services: Services { state_cache: args.depend::("rooms::state_cache"), timeline: args.depend::("rooms::timeline"), @@ -100,474 +65,10 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { - use utils::bytes::pretty; - - let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - let (uvc_count, uvc_bytes) = self.user_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - writeln!(out, "server_visibility_cache: {svc_count} ({})", pretty(svc_bytes))?; - writeln!(out, "user_visibility_cache: {uvc_count} ({})", pretty(uvc_bytes))?; - - Ok(()) - } - - fn clear_cache(&self) { - self.server_visibility_cache.lock().expect("locked").clear(); - self.user_visibility_cache.lock().expect("locked").clear(); - } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } impl Service { - pub fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - self.state_full_pdus(shortstatehash) - .ready_filter_map(|pdu| { - Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) - }) - } - - pub fn state_full_pdus( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - let short_ids = self - .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(Vec::into_iter) - .map(|iter| iter.map(at!(1))) - .map(IterStream::stream) - .flatten_stream() - .boxed(); - - self.services - .short - .multi_get_eventid_from_short(short_ids) - .ready_filter_map(Result::ok) - .broad_filter_map(move |event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await.ok() - }) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self), level = "debug")] - pub fn state_full_ids<'a, Id>( - &'a self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + 'a - where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, - ::Owned: Borrow, - { - let shortids = self - .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(|vec| vec.into_iter().unzip()) - .boxed() - .shared(); - - let shortstatekeys = shortids - .clone() - .map(at!(0)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - let shorteventids = shortids - .map(at!(1)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - self.services - .short - .multi_get_eventid_from_short(shorteventids) - .zip(shortstatekeys) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let full_state = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? - .pop() - .expect("there is always one layer") - .full_state; - - let compressed = full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .ok_or(err!(Database("No shortstatekey in compressed state")))?; - - let (_, shorteventid) = parse_compressed_state_event(*compressed); - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - #[inline] - pub async fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - let shortids = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); - - Ok(shortids) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub async fn state_get( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - /// Get membership for given user in state - async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, - ) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) - } - - /// The user was a joined member at this state (potentially in the past) - #[inline] - async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - #[inline] - async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite - } - - /// Whether a server is allowed to see an event through federation, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn server_can_see_event( - &self, - origin: &ServerName, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .server_visibility_cache - .lock() - .expect("locked") - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return *visibility; - } - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let current_server_members = self - .services - .state_cache - .room_members(room_id) - .ready_filter(|member| member.server_name() == origin); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members - .any(|member| self.user_was_invited(shortstatehash, member)) - .await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - current_server_members - .any(|member| self.user_was_joined(shortstatehash, member)) - .await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.server_visibility_cache - .lock() - .expect("locked") - .insert((origin.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_event( - &self, - user_id: &UserId, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .user_visibility_cache - .lock() - .expect("locked") - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return *visibility; - } - - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared => currently_member, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, user_id).await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, user_id).await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.user_visibility_cache - .lock() - .expect("locked") - .insert((user_id.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { - if self.services.state_cache.is_joined(user_id, room_id).await { - return true; - } - - let history_visibility = self - .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - match history_visibility { - | HistoryVisibility::Invited => - self.services.state_cache.is_invited(user_id, room_id).await, - | HistoryVisibility::WorldReadable => true, - | _ => false, - } - } - - /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Returns the full room state. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns the full room state pdus - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - pub async fn get_name(&self, room_id: &RoomId) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomName, "") .await @@ -592,28 +93,6 @@ impl Service { .await } - pub async fn user_can_invite( - &self, - room_id: &RoomId, - sender: &UserId, - target_user: &UserId, - state_lock: &RoomMutexGuard, - ) -> bool { - self.services - .timeline - .create_hash_and_sign_event( - PduBuilder::state( - target_user.into(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - sender, - room_id, - state_lock, - ) - .await - .is_ok() - } - /// Checks if guests are able to view room content without joining pub async fn is_world_readable(&self, room_id: &RoomId) -> bool { self.room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") @@ -649,98 +128,12 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Checks if a given user can redact a given event - /// - /// If federation is true, it allows redaction events from any user of the - /// same server as the original event sender - pub async fn user_can_redact( - &self, - redacts: &EventId, - sender: &UserId, - room_id: &RoomId, - federation: bool, - ) -> Result { - let redacting_event = self.services.timeline.get_pdu(redacts).await; - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) - { - return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); - } - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) - { - return Err!(Request(Forbidden( - "Redacting m.room.server_acl will result in the room being inaccessible for \ - everyone (empty allow key), forbidding." - ))); - } - - if let Ok(pl_event_content) = self - .room_state_get_content::( - room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await - { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && if let Ok(redacting_event) = redacting_event { - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - } - } else { - false - }) - } else { - // Falling back on m.room.create to judge power level - if let Ok(room_create) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } - } - } - - /// Returns the join rule (`SpaceRoomJoinRule`) for a given room - pub async fn get_join_rule( - &self, - room_id: &RoomId, - ) -> Result<(SpaceRoomJoinRule, Vec)> { + /// Returns the join rules for a given room (`JoinRule` type). Will default + /// to Invite if doesnt exist or invalid + pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map(|c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }) - .or_else(|_| Ok((SpaceRoomJoinRule::Invite, vec![]))) - } - - /// Returns an empty vec if not a restricted room - pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = Vec::with_capacity(1); - if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { - for rule in r.allow { - if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { - room_ids.push(membership.clone()); - } - } - } - room_ids + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule) } pub async fn get_room_type(&self, room_id: &RoomId) -> Result { diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs new file mode 100644 index 00000000..89fa2a83 --- /dev/null +++ b/src/service/rooms/state_accessor/room_state.rs @@ -0,0 +1,93 @@ +use std::borrow::Borrow; + +use conduwuit::{ + Result, err, implement, + matrix::{PduEvent, StateKey}, +}; +use futures::{Stream, StreamExt, TryFutureExt}; +use ruma::{EventId, RoomId, events::StateEventType}; +use serde::Deserialize; + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn room_state_get_content( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +/// Returns the full room state. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok).boxed()) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns the full room state pdus +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok).boxed()) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await +} diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs new file mode 100644 index 00000000..2befec22 --- /dev/null +++ b/src/service/rooms/state_accessor/server_can.rs @@ -0,0 +1,53 @@ +use conduwuit::{implement, utils::stream::ReadyExt}; +use futures::StreamExt; +use ruma::{ + EventId, RoomId, ServerName, + events::{ + StateEventType, + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + }, +}; + +/// Whether a server is allowed to see an event through federation, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn server_can_see_event( + &self, + origin: &ServerName, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + let current_server_members = self + .services + .state_cache + .room_members(room_id) + .ready_filter(|member| member.server_name() == origin); + + match history_visibility { + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + current_server_members + .any(|member| self.user_was_invited(shortstatehash, member)) + .await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + current_server_members + .any(|member| self.user_was_joined(shortstatehash, member)) + .await + }, + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared | _ => true, + } +} diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs new file mode 100644 index 00000000..169e69e9 --- /dev/null +++ b/src/service/rooms/state_accessor/state.rs @@ -0,0 +1,428 @@ +use std::{borrow::Borrow, ops::Deref, sync::Arc}; + +use conduwuit::{ + Result, at, err, implement, + matrix::{PduEvent, StateKey}, + pair_of, + utils::{ + result::FlatOk, + stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, + }, +}; +use conduwuit_database::Deserialized; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; +use ruma::{ + EventId, OwnedEventId, UserId, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, +}; +use serde::Deserialize; + +use crate::rooms::{ + short::{ShortEventId, ShortStateHash, ShortStateKey}, + state_compressor::{CompressedState, compress_state_event, parse_compressed_state_event}, +}; + +/// The user was a joined member at this state (potentially in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join +} + +/// The user was an invited or joined room member at this state (potentially +/// in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite +} + +/// Get membership for given user in state +#[implement(super::Service)] +pub async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, +) -> MembershipState { + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) +} + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn state_get_content( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +#[implement(super::Service)] +pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await +} + +#[implement(super::Service)] +pub async fn state_contains_type( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, +) -> bool { + let state_keys = self.state_keys(shortstatehash, event_type); + + pin_mut!(state_keys); + state_keys.next().await.is_some() +} + +#[implement(super::Service)] +pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, +) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.load_full_state(shortstatehash) + .map_ok(|full_state| full_state.range(start..=end).next().copied()) + .await + .flat_ok() + .is_some() +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +pub async fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +pub async fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) + .await?; + + self.services + .short + .get_eventid_from_short(shorteventid) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .range(start..=end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? +} + +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let state_keys_with_short_ids = self + .state_keys_with_shortids(shortstatehash, event_type) + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .shared(); + + let state_keys = state_keys_with_short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = state_keys_with_short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(state_keys) + .ready_filter_map(|(eid, sk)| eid.map(move |eid| (sk, eid)).ok()) +} + +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_shortids<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .ignore_err() + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .boxed() + .shared(); + + let shortstatekeys = short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(shorteventids) + .ready_filter_map(|(res, id)| res.map(|res| (res, id)).ok()) + .ready_filter_map(move |((event_type_, state_key), event_id)| { + event_type_.eq(event_type).then_some((state_key, event_id)) + }) +} + +/// Iterates the state_keys for an event_type in the state +#[implement(super::Service)] +pub fn state_keys<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .ignore_err() + .map(at!(0)); + + self.services + .short + .multi_get_statekey_from_short(short_ids) + .ready_filter_map(Result::ok) + .ready_filter_map(move |(event_type_, state_key)| { + event_type_.eq(event_type).then_some(state_key) + }) +} + +/// Returns the state events removed between the interval (present in .0 but +/// not in .1) +#[implement(super::Service)] +#[inline] +pub fn state_removed( + &self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + '_ { + self.state_added((shortstatehash.1, shortstatehash.0)) +} + +/// Returns the state events added between the interval (present in .1 but +/// not in .0) +#[implement(super::Service)] +pub fn state_added( + &self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + '_ { + let a = self.load_full_state(shortstatehash.0); + let b = self.load_full_state(shortstatehash.1); + try_join(a, b) + .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .ignore_err() + .map(parse_compressed_state_event) +} + +#[implement(super::Service)] +pub fn state_full( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + self.state_full_pdus(shortstatehash) + .ready_filter_map(|pdu| { + Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) + }) +} + +#[implement(super::Service)] +pub fn state_full_pdus( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + let short_ids = self + .state_full_shortids(shortstatehash) + .ignore_err() + .map(at!(1)); + + self.services + .short + .multi_get_eventid_from_short(short_ids) + .ready_filter_map(Result::ok) + .broad_filter_map(move |event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await.ok() + }) +} + +/// Builds a StateMap by iterating over all keys that start +/// with state_hash, this gives the full state for the given state_hash. +#[implement(super::Service)] +pub fn state_full_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let shortids = self + .state_full_shortids(shortstatehash) + .ignore_err() + .unzip() + .shared(); + + let shortstatekeys = shortids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = shortids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(shortstatekeys) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) +} + +#[implement(super::Service)] +pub fn state_full_shortids( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream> + Send + '_ { + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .deref() + .iter() + .copied() + .map(parse_compressed_state_event) + .collect() + }) + .map_ok(Vec::into_iter) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .boxed() +} + +#[implement(super::Service)] +#[tracing::instrument(name = "load", level = "debug", skip(self))] +async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .await +} + +/// Returns the state hash for this pdu. +#[implement(super::Service)] +pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) + }) + .await + .deserialized() +} diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs new file mode 100644 index 00000000..67e0b52b --- /dev/null +++ b/src/service/rooms/state_accessor/user_can.rs @@ -0,0 +1,169 @@ +use conduwuit::{Err, Result, implement, pdu::PduBuilder}; +use ruma::{ + EventId, RoomId, UserId, + events::{ + StateEventType, TimelineEventType, + room::{ + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + member::{MembershipState, RoomMemberEventContent}, + power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, + }, +}; + +use crate::rooms::state::RoomMutexGuard; + +/// Checks if a given user can redact a given event +/// +/// If federation is true, it allows redaction events from any user of the +/// same server as the original event sender +#[implement(super::Service)] +pub async fn user_can_redact( + &self, + redacts: &EventId, + sender: &UserId, + room_id: &RoomId, + federation: bool, +) -> Result { + let redacting_event = self.services.timeline.get_pdu(redacts).await; + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) + { + return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); + } + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) + { + return Err!(Request(Forbidden( + "Redacting m.room.server_acl will result in the room being inaccessible for \ + everyone (empty allow key), forbidding." + ))); + } + + match self + .room_state_get_content::( + room_id, + &StateEventType::RoomPowerLevels, + "", + ) + .await + { + | Ok(pl_event_content) => { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && match redacting_event { + | Ok(redacting_event) => + if federation { + redacting_event.sender.server_name() == sender.server_name() + } else { + redacting_event.sender == sender + }, + | _ => false, + }) + }, + | _ => { + // Falling back on m.room.create to judge power level + match self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(room_create) => Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)), + | _ => Err!(Database( + "No m.room.power_levels or m.room.create events in database for room" + )), + } + }, + } +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_event( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + match history_visibility { + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + self.user_was_invited(shortstatehash, user_id).await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + self.user_was_joined(shortstatehash, user_id).await + }, + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared | _ => currently_member, + } +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { + if self.services.state_cache.is_joined(user_id, room_id).await { + return true; + } + + let history_visibility = self + .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + match history_visibility { + | HistoryVisibility::Invited => + self.services.state_cache.is_invited(user_id, room_id).await, + | HistoryVisibility::WorldReadable => true, + | _ => false, + } +} + +#[implement(super::Service)] +pub async fn user_can_invite( + &self, + room_id: &RoomId, + sender: &UserId, + target_user: &UserId, + state_lock: &RoomMutexGuard, +) -> bool { + self.services + .timeline + .create_hash_and_sign_event( + PduBuilder::state( + target_user.as_str(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + sender, + room_id, + state_lock, + ) + .await + .is_ok() +} diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 0d25142d..d3dbc143 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -4,31 +4,31 @@ use std::{ }; use conduwuit::{ - is_not_empty, + Result, is_not_empty, result::LogErr, - utils::{stream::TryIgnore, ReadyExt, StreamTools}, - warn, Result, + utils::{ReadyExt, StreamTools, stream::TryIgnore}, + warn, }; -use database::{serialize_key, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{future::join5, pin_mut, stream::iter, Stream, StreamExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map, serialize_key}; +use futures::{Stream, StreamExt, future::join5, pin_mut, stream::iter}; use itertools::Itertools; use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, events::{ + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, direct::DirectEvent, room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, }, int, serde::Raw, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; -use crate::{account_data, appservice::RegistrationInfo, globals, rooms, users, Dep}; +use crate::{Dep, account_data, appservice::RegistrationInfo, config, globals, rooms, users}; pub struct Service { appservice_in_room_cache: AppServiceInRoomCache, @@ -38,7 +38,9 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, + metadata: Dep, state_accessor: Dep, users: Dep, } @@ -70,7 +72,9 @@ impl crate::Service for Service { appservice_in_room_cache: RwLock::new(HashMap::new()), services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), globals: args.depend::("globals"), + metadata: args.depend::("rooms::metadata"), state_accessor: args .depend::("rooms::state_accessor"), users: args.depend::("users"), @@ -218,7 +222,7 @@ impl Service { ) .await .ok(); - }; + } // Copy direct chat flag if let Ok(mut direct_event) = self @@ -250,7 +254,7 @@ impl Service { ) .await?; } - }; + } } } @@ -267,6 +271,14 @@ impl Service { }, | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); + + if self.services.globals.user_is_local(user_id) + && (self.services.config.forget_forced_upon_leave + || self.services.metadata.is_banned(room_id).await + || self.services.metadata.is_disabled(room_id).await) + { + self.forget(room_id, user_id); + } }, | _ => {}, } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 532df360..56a91d0e 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,15 +1,16 @@ use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeSet, HashMap}, fmt::{Debug, Write}, mem::size_of, sync::{Arc, Mutex}, }; -use arrayvec::ArrayVec; +use async_trait::async_trait; use conduwuit::{ + Result, + arrayvec::ArrayVec, at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, - Result, }; use database::Map; use futures::{Stream, StreamExt}; @@ -17,9 +18,8 @@ use lru_cache::LruCache; use ruma::{EventId, RoomId}; use crate::{ - rooms, + Dep, rooms, rooms::short::{ShortEventId, ShortId, ShortStateHash, ShortStateKey}, - Dep, }; pub struct Service { @@ -63,9 +63,10 @@ type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; type ParentStatesVec = Vec; -pub(crate) type CompressedState = HashSet; -pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; +pub type CompressedState = BTreeSet; +pub type CompressedStateEvent = [u8; 2 * size_of::()]; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -83,7 +84,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (cache_len, ents) = { let cache = self.stateinfo_cache.lock().expect("locked"); let ents = cache.iter().map(at!(1)).flat_map(|vec| vec.iter()).fold( @@ -109,7 +110,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } + async fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } @@ -249,8 +250,8 @@ impl Service { pub fn save_state_from_diff( &self, shortstatehash: ShortStateHash, - statediffnew: Arc>, - statediffremoved: Arc>, + statediffnew: Arc, + statediffremoved: Arc, diff_to_sibling: usize, mut parent_states: ParentStatesVec, ) -> Result { @@ -304,7 +305,7 @@ impl Service { }); return Ok(()); - }; + } // Else we have two options. // 1. We add the current diff on top of the parent layer. @@ -363,7 +364,7 @@ impl Service { pub async fn save_state( &self, room_id: &RoomId, - new_state_ids_compressed: Arc>, + new_state_ids_compressed: Arc, ) -> Result { let previous_shortstatehash = self .services @@ -396,12 +397,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = new_state_ids_compressed + let statediffnew: CompressedState = new_state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&new_state_ids_compressed) .copied() @@ -409,7 +410,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (new_state_ids_compressed, Arc::new(HashSet::new())) + (new_state_ids_compressed, Arc::new(CompressedState::new())) }; if !already_existed { @@ -420,7 +421,7 @@ impl Service { 2, // every state change is 2 event changes on average states_parents, )?; - }; + } Ok(HashSetCompressStateEvent { shortstatehash: new_shortstatehash, @@ -448,11 +449,11 @@ impl Service { .take_if(|parent| *parent != 0); debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); - let num_values = value.len() / STRIDE; + let _num_values = value.len() / STRIDE; let mut add_mode = true; - let mut added = HashSet::with_capacity(num_values); - let mut removed = HashSet::with_capacity(num_values); + let mut added = CompressedState::new(); + let mut removed = CompressedState::new(); let mut i = STRIDE; while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { @@ -469,8 +470,6 @@ impl Service { i = expected!(i + 2 * STRIDE); } - added.shrink_to_fit(); - removed.shrink_to_fit(); Ok(StateDiff { parent, added: Arc::new(added), @@ -507,7 +506,7 @@ impl Service { #[inline] #[must_use] -fn compress_state_event( +pub(crate) fn compress_state_event( shortstatekey: ShortStateKey, shorteventid: ShortEventId, ) -> CompressedStateEvent { @@ -523,7 +522,7 @@ fn compress_state_event( #[inline] #[must_use] -pub fn parse_compressed_state_event( +pub(crate) fn parse_compressed_state_event( compressed_event: CompressedStateEvent, ) -> (ShortStateKey, ShortEventId) { use utils::u64_from_u8; diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index bc995e27..a680df55 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,22 +1,22 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, + Result, err, + matrix::pdu::{PduCount, PduEvent, PduId, RawPduId}, utils::{ - stream::{TryIgnore, WidebandExt}, ReadyExt, + stream::{TryIgnore, WidebandExt}, }, - PduCount, PduEvent, PduId, RawPduId, Result, }; -use database::{Deserialized, Map}; +use conduwuit_database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ - api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, + api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, }; use serde_json::json; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub struct Service { db: Data, @@ -121,10 +121,13 @@ impl Service { } let mut users = Vec::new(); - if let Ok(userids) = self.get_participants(&root_id).await { - users.extend_from_slice(&userids); - } else { - users.push(root_pdu.sender); + match self.get_participants(&root_id).await { + | Ok(userids) => { + users.extend_from_slice(&userids); + }, + | _ => { + users.push(root_pdu.sender); + }, } users.push(pdu.sender.clone()); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 457c1e8d..94c78bb0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,18 +1,17 @@ use std::{borrow::Borrow, sync::Arc}; use conduwuit::{ - at, err, + Err, PduCount, PduEvent, Result, at, err, result::{LogErr, NotFound}, utils, utils::stream::TryReadyExt, - Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{future::select_ok, pin_mut, FutureExt, Stream, TryFutureExt, TryStreamExt}; -use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; +use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, api::Direction}; use super::{PduId, RawPduId}; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub(super) struct Data { eventid_outlierpdu: Arc, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index bf585a6b..947e1c38 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,6 +1,7 @@ mod data; use std::{ + borrow::Borrow, cmp, collections::{BTreeMap, HashSet}, fmt::Write, @@ -8,22 +9,30 @@ use std::{ sync::Arc, }; +use async_trait::async_trait; +pub use conduwuit::matrix::pdu::{PduId, RawPduId}; use conduwuit::{ - at, debug, debug_warn, err, error, implement, info, - pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, - utils::{ - self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, + Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, + matrix::{ + Event, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, + state_res::{self, RoomVersion}, }, - validated, warn, Err, Error, Result, Server, + utils::{ + self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, + }, + validated, warn, }; -pub use conduwuit::{PduId, RawPduId}; use futures::{ - future, future::ready, pin_mut, Future, FutureExt, Stream, StreamExt, TryStreamExt, + Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, ServerName, UserId, api::federation, canonical_json::to_canonical_value, events::{ + GlobalAccountDataEventType, StateEventType, TimelineEventType, push_rules::PushRulesEvent, room::{ create::RoomCreateEventContent, @@ -32,24 +41,21 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, }, - GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - state_res::{self, Event, RoomVersion}, - uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + uint, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use self::data::Data; pub use self::data::PdusIterItem; use crate::{ - account_data, admin, appservice, + Dep, account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, - rooms::{short::ShortRoomId, state_compressor::CompressedStateEvent}, - sending, server_keys, users, Dep, + rooms::{short::ShortRoomId, state_compressor::CompressedState}, + sending, server_keys, users, }; // Update Relationships @@ -107,6 +113,7 @@ struct Services { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -140,7 +147,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_insert = self.mutex_insert.len(); writeln!(out, "insert_mutex: {mutex_insert}")?; @@ -260,14 +267,16 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu( - &self, - pdu: &PduEvent, + pub async fn append_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: Vec, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result { + leafs: Leafs, + state_lock: &'a RoomMutexGuard, + ) -> Result + where + Leafs: Iterator + Send + 'a, + { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -335,12 +344,12 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, leaves, state_lock) + .set_forward_extremities(&pdu.room_id, leafs, state_lock) .await; let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; - let count1 = self.services.globals.next_count()?; + let count1 = self.services.globals.next_count().unwrap(); // Mark as read first so the sending client doesn't get a notification even if // appending fails self.services @@ -358,13 +367,12 @@ impl Service { drop(insert_lock); - // See if the event matches any known pushers + // See if the event matches any known pushers via power level let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await - .map_err(|_| err!(Database("invalid m.room.power_levels event"))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); @@ -373,9 +381,10 @@ impl Service { .services .state_cache .active_local_users_in_room(&pdu.room_id) - // Don't notify the sender of their own events - .ready_filter(|user| user != &pdu.sender) .map(ToOwned::to_owned) + // Don't notify the sender of their own events, and dont send from ignored users + .ready_filter(|user| *user != pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, &recipient_user).await).then_some(recipient_user) }) .collect() .await; @@ -384,10 +393,10 @@ impl Service { if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { - let target_user_id = OwnedUserId::parse(state_key)?; + let target_user_id = UserId::parse(state_key)?; - if self.services.users.is_active_local(&target_user_id).await { - push_target.insert(target_user_id); + if self.services.users.is_active_local(target_user_id).await { + push_target.insert(target_user_id.to_owned()); } } } @@ -418,7 +427,7 @@ impl Service { highlight = true; }, | _ => {}, - }; + } // Break early if both conditions are true if notify && highlight { @@ -480,7 +489,7 @@ impl Service { } } }, - }; + } }, | TimelineEventType::SpaceChild => if let Some(_state_key) = &pdu.state_key { @@ -744,7 +753,7 @@ impl Service { }; let auth_fetch = |k: &StateEventType, s: &str| { - let key = (k.clone(), s.to_owned()); + let key = (k.clone(), s.into()); ready(auth_events.get(&key)) }; @@ -772,7 +781,7 @@ impl Service { | _ => { pdu_json.remove("event_id"); }, - }; + } // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( @@ -819,8 +828,7 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + state_lock: &RoomMutexGuard, ) -> Result { let (pdu, pdu_json) = self .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) @@ -844,7 +852,7 @@ impl Service { { return Err!(Request(Forbidden("User cannot redact this event."))); } - }; + } }, | _ => { let content: RoomRedactionEventContent = pdu.get_content()?; @@ -860,7 +868,7 @@ impl Service { } }, } - }; + } if pdu.kind == TimelineEventType::RoomMember { let content: RoomMemberEventContent = pdu.get_content()?; @@ -896,7 +904,7 @@ impl Service { pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - vec![(*pdu.event_id).to_owned()], + once(pdu.event_id.borrow()), state_lock, ) .boxed() @@ -943,16 +951,18 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu( - &self, - pdu: &PduEvent, + pub async fn append_incoming_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: Vec, - state_ids_compressed: Arc>, + new_room_leafs: Leafs, + state_ids_compressed: Arc, soft_fail: bool, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result> { + state_lock: &'a RoomMutexGuard, + ) -> Result> + where + Leafs: Iterator + Send + 'a, + { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. @@ -968,14 +978,14 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) + .set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock) .await; return Ok(None); } let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .append_pdu(pdu, pdu_json, new_room_leafs, state_lock) .await?; Ok(Some(pdu_id)) @@ -1288,10 +1298,10 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res } }, | _ => {}, - }; + } }, | _ => {}, - }; + } Ok(()) } diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index c710b33a..a81ee95c 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - debug_info, trace, + Result, Server, debug_info, trace, utils::{self, IterStream}, - Result, Server, }; use futures::StreamExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, UserId, api::federation::transactions::edu::{Edu, TypingContent}, events::SyncEphemeralRoomEvent, - OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use tokio::sync::{broadcast, RwLock}; +use tokio::sync::{RwLock, broadcast}; -use crate::{globals, sending, sending::EduBuf, users, Dep}; +use crate::{Dep, globals, sending, sending::EduBuf, users}; pub struct Service { server: Arc, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 6a0c6aa1..bd76f1f4 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,10 +1,10 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Database, Deserialized, Map}; use ruma::{RoomId, UserId}; -use crate::{globals, rooms, rooms::short::ShortStateHash, Dep}; +use crate::{Dep, globals, rooms, rooms::short::ShortStateHash}; pub struct Service { db: Data, diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 6b58d964..c7fae11f 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -1,10 +1,10 @@ use std::{fmt::Debug, mem}; use bytes::BytesMut; -use conduwuit::{debug_error, err, trace, utils, warn, Err, Result}; +use conduwuit::{Err, Result, debug_error, err, trace, utils, warn}; use reqwest::Client; use ruma::api::{ - appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration, }; /// Sends a request to an appservice @@ -25,6 +25,10 @@ where return Ok(None); }; + if dest == *"null" || dest.is_empty() { + return Ok(None); + } + trace!("Appservice URL \"{dest}\", Appservice ID: {}", registration.id); let hs_token = registration.hs_token.as_str(); @@ -34,7 +38,11 @@ where SendAccessToken::IfRequired(hs_token), &VERSIONS, ) - .map_err(|e| err!(BadServerResponse(warn!("Failed to find destination {dest}: {e}"))))? + .map_err(|e| { + err!(BadServerResponse( + warn!(appservice = %registration.id, "Failed to find destination {dest}: {e:?}") + )) + })? .map(BytesMut::freeze); let mut parts = http_request.uri().clone().into_parts(); @@ -51,7 +59,7 @@ where let reqwest_request = reqwest::Request::try_from(http_request)?; let mut response = client.execute(reqwest_request).await.map_err(|e| { - warn!("Could not send request to appservice \"{}\" at {dest}: {e}", registration.id); + warn!("Could not send request to appservice \"{}\" at {dest}: {e:?}", registration.id); e })?; @@ -71,7 +79,7 @@ where if !status.is_success() { debug_error!("Appservice response bytes: {:?}", utils::string_from_bytes(&body)); - return Err!(BadServerResponse(error!( + return Err!(BadServerResponse(warn!( "Appservice \"{}\" returned unsuccessful HTTP response {status} at {dest}", registration.id ))); @@ -84,8 +92,8 @@ where ); response.map(Some).map_err(|e| { - err!(BadServerResponse(error!( - "Appservice \"{}\" returned invalid response bytes {dest}: {e}", + err!(BadServerResponse(warn!( + "Appservice \"{}\" returned invalid/malformed response bytes {dest}: {e}", registration.id ))) }) diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 4dd2d5aa..a6bcc2b2 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,16 +1,15 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - at, utils, - utils::{stream::TryIgnore, ReadyExt}, - Error, Result, + Error, Result, at, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{OwnedServerName, ServerName, UserId}; use super::{Destination, SendingEvent}; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) type OutgoingItem = (Key, SendingEvent, Destination); pub(super) type SendingItem = (Key, SendingEvent); @@ -102,7 +101,7 @@ impl Data { pub fn active_requests_for( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servercurrentevent_data .raw_stream_from(&prefix) @@ -156,7 +155,7 @@ impl Data { pub fn queued_requests( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servernameevent_data .raw_stream_from(&prefix) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b146ad49..08ca7010 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -12,17 +12,17 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, debug_warn, err, error, - utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, - warn, Result, Server, + Result, Server, debug, debug_warn, err, error, + smallvec::SmallVec, + utils::{ReadyExt, TryReadyExt, available_parallelism, math::usize_from_u64_truncated}, + warn, }; use futures::{FutureExt, Stream, StreamExt}; use ruma::{ - api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, + api::{OutgoingRequest, appservice::Registration}, }; -use smallvec::SmallVec; -use tokio::task::JoinSet; +use tokio::{task, task::JoinSet}; use self::data::Data; pub use self::{ @@ -30,8 +30,8 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, federation, globals, presence, pusher, rooms, - rooms::timeline::RawPduId, users, Dep, + Dep, account_data, client, federation, globals, presence, pusher, rooms, + rooms::timeline::RawPduId, users, }; pub struct Service { @@ -111,8 +111,15 @@ impl crate::Service for Service { .enumerate() .fold(JoinSet::new(), |mut joinset, (id, _)| { let self_ = self.clone(); + let worker = self_.sender(id); + let worker = if self.unconstrained() { + task::unconstrained(worker).boxed() + } else { + worker.boxed() + }; + let runtime = self.server.runtime(); - let _abort = joinset.spawn_on(self_.sender(id).boxed(), runtime); + let _abort = joinset.spawn_on(worker, runtime); joinset }); @@ -124,7 +131,7 @@ impl crate::Service for Service { | Err(error) => { error!(id = ?error.id(), ?error, "sender worker finished"); }, - }; + } } Ok(()) @@ -139,6 +146,8 @@ impl crate::Service for Service { } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + + fn unconstrained(&self) -> bool { true } } impl Service { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 363bb994..fab02f6b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -2,27 +2,33 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, + atomic::{AtomicU64, AtomicUsize, Ordering}, }, time::{Duration, Instant}, }; -use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; +use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD}; use conduwuit::{ - debug, err, error, + Error, Result, debug, err, error, result::LogErr, trace, - utils::{calculate_hash, continue_exponential_backoff_secs, stream::IterStream, ReadyExt}, - warn, Error, Result, + utils::{ + ReadyExt, calculate_hash, continue_exponential_backoff_secs, + future::TryExtExt, + stream::{BroadbandExt, IterStream, WidebandExt}, + }, + warn, }; use futures::{ + FutureExt, StreamExt, future::{BoxFuture, OptionFuture}, join, pin_mut, stream::FuturesUnordered, - FutureExt, StreamExt, }; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, + RoomId, RoomVersionId, ServerName, UInt, api::{ appservice::event::push_events::v1::EphemeralData, federation::transactions::{ @@ -35,18 +41,17 @@ use ruma::{ }, device_id, events::{ - push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, - GlobalAccountDataEventType, + AnySyncEphemeralRoomEvent, GlobalAccountDataEventType, push_rules::PushRulesEvent, + receipt::ReceiptType, }, push, serde::Raw, - uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UInt, + uint, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use super::{ - appservice, data::QueueItem, Destination, EduBuf, EduVec, Msg, SendingEvent, Service, + Destination, EduBuf, EduVec, Msg, SendingEvent, Service, appservice, data::QueueItem, }; #[derive(Debug)] @@ -62,8 +67,6 @@ type SendingFuture<'a> = BoxFuture<'a, SendingResult>; type SendingFutures<'a> = FuturesUnordered>; type CurTransactionStatus = HashMap; -const CLEANUP_TIMEOUT_MS: u64 = 3500; - const SELECT_PRESENCE_LIMIT: usize = 256; const SELECT_RECEIPT_LIMIT: usize = 256; const SELECT_EDU_LIMIT: usize = EDU_LIMIT - 2; @@ -135,7 +138,7 @@ impl Service { match response { | Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, | Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), - }; + } } fn handle_response_err(dest: Destination, statuses: &mut CurTransactionStatus, e: &Error) { @@ -143,7 +146,7 @@ impl Service { statuses.entry(dest).and_modify(|e| { *e = match e { | TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - | TransactionStatus::Retrying(ref n) => + | &mut TransactionStatus::Retrying(ref n) => TransactionStatus::Failed(n.saturating_add(1), Instant::now()), | TransactionStatus::Failed(..) => { panic!("Request that was not even running failed?!") @@ -208,11 +211,12 @@ impl Service { async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>) { use tokio::{ select, - time::{sleep_until, Instant}, + time::{Instant, sleep_until}, }; + let timeout = self.server.config.sender_shutdown_timeout; + let timeout = Duration::from_secs(timeout); let now = Instant::now(); - let timeout = Duration::from_millis(CLEANUP_TIMEOUT_MS); let deadline = now.checked_add(timeout).unwrap_or(now); loop { trace!("Waiting for {} requests to complete...", futures.len()); @@ -315,10 +319,7 @@ impl Service { if let Destination::Federation(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); - let select_edus = select_edus - .into_iter() - .map(Into::into) - .map(SendingEvent::Edu); + let select_edus = select_edus.into_iter().map(SendingEvent::Edu); events.extend(select_edus); self.db.set_latest_educount(server_name, last_count); @@ -474,20 +475,25 @@ impl Service { since: (u64, u64), max_edu_count: &AtomicU64, ) -> Option { - let server_rooms = self.services.state_cache.server_rooms(server_name); - - pin_mut!(server_rooms); let mut num = 0; - let mut receipts = BTreeMap::::new(); - while let Some(room_id) = server_rooms.next().await { - let receipt_map = self - .select_edus_receipts_room(room_id, since, max_edu_count, &mut num) - .await; + let receipts: BTreeMap = self + .services + .state_cache + .server_rooms(server_name) + .map(ToOwned::to_owned) + .broad_filter_map(|room_id| async move { + let receipt_map = self + .select_edus_receipts_room(&room_id, since, max_edu_count, &mut num) + .await; - if !receipt_map.read.is_empty() { - receipts.insert(room_id.into(), receipt_map); - } - } + receipt_map + .read + .is_empty() + .eq(&false) + .then_some((room_id, receipt_map)) + }) + .collect() + .await; if receipts.is_empty() { return None; @@ -691,7 +697,7 @@ impl Service { match event { | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { - pdu_jsons.push(pdu.to_room_event()); + pdu_jsons.push(pdu.into_room_event()); } }, | SendingEvent::Edu(edu) => @@ -820,9 +826,8 @@ impl Service { | _ => None, }) .stream() - .then(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id)) - .ready_filter_map(Result::ok) - .then(|pdu| self.convert_to_outgoing_federation_event(pdu)) + .wide_filter_map(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id).ok()) + .wide_then(|pdu| self.convert_to_outgoing_federation_event(pdu)) .collect() .await; diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 305cbfef..64b936b6 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -7,13 +7,13 @@ use std::{ use conduwuit::{ debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn, }; -use futures::{stream::FuturesUnordered, StreamExt}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ - api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, - OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + CanonicalJsonObject, OwnedServerName, OwnedServerSigningKeyId, ServerName, + ServerSigningKeyId, api::federation::discovery::ServerSigningKeys, serde::Raw, }; use serde_json::value::RawValue as RawJsonValue; -use tokio::time::{timeout_at, Instant}; +use tokio::time::{Instant, timeout_at}; use super::key_exists; diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 5a027d64..f9c5bdaf 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -1,12 +1,12 @@ use std::borrow::Borrow; -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use ruma::{ - api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, - ServerSigningKeyId, + CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId, + api::federation::discovery::VerifyKey, }; -use super::{extract_key, PubKeyMap, PubKeys}; +use super::{PubKeyMap, PubKeys, extract_key}; #[implement(super::Service)] pub async fn get_event_keys( @@ -18,8 +18,9 @@ pub async fn get_event_keys( let required = match required_keys(object, version) { | Ok(required) => required, - | Err(e) => - return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")), + | Err(e) => { + return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")); + }, }; let batch = required diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs index 6f983c26..259c37fb 100644 --- a/src/service/server_keys/keypair.rs +++ b/src/service/server_keys/keypair.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug, debug_info, err, error, utils, utils::string_from_bytes, Result}; +use conduwuit::{Result, debug, debug_info, err, error, utils, utils::string_from_bytes}; use database::Database; use ruma::{api::federation::discovery::VerifyKey, serde::Base64, signatures::Ed25519KeyPair}; diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index 3f6a3039..bf6799ba 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -8,22 +8,21 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; use conduwuit::{ - implement, - utils::{timepoint_from_now, IterStream}, - Result, Server, + Result, Server, implement, + utils::{IterStream, timepoint_from_now}, }; use database::{Deserialized, Json, Map}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, + ServerName, ServerSigningKeyId, api::federation::discovery::{ServerSigningKeys, VerifyKey}, serde::Raw, signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, - CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, - ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; -use crate::{globals, sending, Dep}; +use crate::{Dep, globals, sending}; pub struct Service { keypair: Box, diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index afe8958b..d9907616 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -1,13 +1,13 @@ use std::{collections::BTreeMap, fmt::Debug}; -use conduwuit::{debug, implement, Err, Result}; +use conduwuit::{Err, Result, debug, implement}; use ruma::{ - api::federation::discovery::{ - get_remote_server_keys, - get_remote_server_keys_batch::{self, v2::QueryCriteria}, - get_server_keys, ServerSigningKeys, - }, OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + api::federation::discovery::{ + ServerSigningKeys, get_remote_server_keys, + get_remote_server_keys_batch::{self, v2::QueryCriteria}, + get_server_keys, + }, }; #[implement(super::Service)] @@ -43,7 +43,7 @@ where .keys() .rev() .take(self.services.server.config.trusted_server_batch_size) - .last() + .next_back() .cloned() { let request = Request { @@ -79,7 +79,7 @@ pub async fn notary_request( &self, notary: &ServerName, target: &ServerName, -) -> Result + Clone + Debug + Send> { +) -> Result + Clone + Debug + Send + use<>> { use get_remote_server_keys::v2::Request; let request = Request { diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs index 8d6f108c..e8cc485d 100644 --- a/src/service/server_keys/sign.rs +++ b/src/service/server_keys/sign.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use ruma::{CanonicalJsonObject, RoomVersionId}; #[implement(super::Service)] diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index 0f03e59e..84433628 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; +use conduwuit::{Err, Result, implement, pdu::gen_event_id_canonical_json}; use ruma::{ - signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified, }; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/service.rs b/src/service/service.rs index 7adb189e..574efd8f 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -7,7 +7,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{err, error::inspect_log, utils::string::SplitInfallible, Err, Result, Server}; +use conduwuit::{Err, Result, Server, err, error::inspect_log, utils::string::SplitInfallible}; use database::Database; /// Abstract interface for a Service @@ -31,14 +31,19 @@ pub(crate) trait Service: Any + Send + Sync { fn interrupt(&self) {} /// Clear any caches or similar runtime state. - fn clear_cache(&self) {} + async fn clear_cache(&self) {} /// Memory usage report in a markdown string. - fn memory_usage(&self, _out: &mut dyn Write) -> Result<()> { Ok(()) } + async fn memory_usage(&self, _out: &mut (dyn Write + Send)) -> Result { Ok(()) } /// Return the name of the service. /// i.e. `crate::service::make_name(std::module_path!())` fn name(&self) -> &str; + + /// Return true if the service worker opts out of the tokio cooperative + /// budgeting. This can reduce tail latency at the risk of event loop + /// starvation. + fn unconstrained(&self) -> bool { false } } /// Args are passed to `Service::build` when a service is constructed. This diff --git a/src/service/services.rs b/src/service/services.rs index fb334b96..daece245 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -1,20 +1,21 @@ use std::{ any::Any, collections::BTreeMap, - fmt::Write, sync::{Arc, RwLock}, }; -use conduwuit::{debug, debug_info, info, trace, Result, Server}; +use conduwuit::{Result, Server, debug, debug_info, info, trace, utils::stream::IterStream}; use database::Database; +use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, + account_data, admin, announcements, appservice, client, config, emergency, federation, + globals, key_backups, manager::Manager, - media, presence, pusher, resolver, rooms, sending, server_keys, service, + media, moderation, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, - sync, transaction_ids, uiaa, updates, users, + sync, transaction_ids, uiaa, users, }; pub struct Services { @@ -37,8 +38,9 @@ pub struct Services { pub sync: Arc, pub transaction_ids: Arc, pub uiaa: Arc, - pub updates: Arc, pub users: Arc, + pub moderation: Arc, + pub announcements: Arc, manager: Mutex>>, pub(crate) service: Arc, @@ -104,8 +106,9 @@ impl Services { sync: build!(sync::Service), transaction_ids: build!(transaction_ids::Service), uiaa: build!(uiaa::Service), - updates: build!(updates::Service), users: build!(users::Service), + moderation: build!(moderation::Service), + announcements: build!(announcements::Service), manager: Mutex::new(None), service, @@ -171,40 +174,21 @@ impl Services { } pub async fn clear_cache(&self) { - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.clear_cache(); - } - } - - //TODO - self.rooms - .spaces - .roomid_spacehierarchy_cache - .lock() - .await - .clear(); + self.services() + .for_each(|service| async move { + service.clear_cache().await; + }) + .await; } pub async fn memory_usage(&self) -> Result { - let mut out = String::new(); - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.memory_usage(&mut out)?; - } - } - - //TODO - let roomid_spacehierarchy_cache = self - .rooms - .spaces - .roomid_spacehierarchy_cache - .lock() + self.services() + .map(Ok) + .try_fold(String::new(), |mut out, service| async move { + service.memory_usage(&mut out).await?; + Ok(out) + }) .await - .len(); - writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; - - Ok(out) } fn interrupt(&self) { @@ -217,6 +201,18 @@ impl Services { } } + /// Iterate from snapshot of the services map + fn services(&self) -> impl Stream> + Send { + self.service + .read() + .expect("locked for reading") + .values() + .filter_map(|val| val.0.upgrade()) + .collect::>() + .into_iter() + .stream() + } + #[inline] pub fn try_get(&self, name: &str) -> Result> where diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 0b86377a..b095d2c1 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -8,15 +8,15 @@ use std::{ use conduwuit::{Result, Server}; use database::Map; use ruma::{ + OwnedDeviceId, OwnedRoomId, OwnedUserId, api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, v5, }, - DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, }; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, @@ -49,8 +49,8 @@ struct Services { struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, - known_rooms: BTreeMap>, /* For every room, the - * roomsince number */ + // For every room, the roomsince number + known_rooms: BTreeMap>, extensions: ExtensionsConfig, } @@ -98,79 +98,35 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -/// load params from cache if body doesn't contain it, as long as it's allowed -/// in some cases we may need to allow an empty list as an actual value -fn list_or_sticky(target: &mut Vec, cached: &Vec) { - if target.is_empty() { - target.clone_from(cached); - } -} -fn some_or_sticky(target: &mut Option, cached: Option) { - if target.is_none() { - *target = cached; - } -} - impl Service { - pub fn snake_connection_cached( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) -> bool { - self.snake_connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) - } - - pub fn forget_snake_sync_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) { + pub fn snake_connection_cached(&self, key: &SnakeConnectionsKey) -> bool { self.snake_connections .lock() .expect("locked") - .remove(&(user_id, device_id, conn_id)); + .contains_key(key) } - pub fn remembered( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) -> bool { - self.connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) + pub fn forget_snake_sync_connection(&self, key: &SnakeConnectionsKey) { + self.snake_connections.lock().expect("locked").remove(key); } - pub fn forget_sync_request_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) { - self.connections - .lock() - .expect("locked") - .remove(&(user_id, device_id, conn_id)); + pub fn remembered(&self, key: &DbConnectionsKey) -> bool { + self.connections.lock().expect("locked").contains_key(key) + } + + pub fn forget_sync_request_connection(&self, key: &DbConnectionsKey) { + self.connections.lock().expect("locked").remove(key); } pub fn update_snake_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + snake_key: &SnakeConnectionsKey, request: &mut v5::Request, ) -> BTreeMap> { - let conn_id = request.conn_id.clone(); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(snake_key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -268,25 +224,23 @@ impl Service { pub fn update_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + key: &SnakeConnectionsKey, request: &mut sync_events::v4::Request, ) -> BTreeMap> { let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; + let key = into_db_key(key.0.clone(), key.1.clone(), conn_id); let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -371,22 +325,18 @@ impl Service { pub fn update_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, + key: &DbConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -395,90 +345,81 @@ impl Service { pub fn update_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &DbConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id.to_owned(), device_id.to_owned(), conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &SnakeConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { + assert!(key.2.is_some(), "Some(conn_id) required for this call"); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id.to_owned(), device_id.to_owned(), Some(conn_id))) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, + key: &SnakeConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -487,3 +428,37 @@ impl Service { cached.subscriptions = subscriptions; } } + +#[inline] +pub fn into_snake_key(user_id: U, device_id: D, conn_id: C) -> SnakeConnectionsKey +where + U: Into, + D: Into, + C: Into>, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +#[inline] +pub fn into_db_key(user_id: U, device_id: D, conn_id: C) -> DbConnectionsKey +where + U: Into, + D: Into, + C: Into, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +/// load params from cache if body doesn't contain it, as long as it's allowed +/// in some cases we may need to allow an empty list as an actual value +fn list_or_sticky(target: &mut Vec, cached: &Vec) { + if target.is_empty() { + target.clone_from(cached); + } +} + +fn some_or_sticky(target: &mut Option, cached: Option) { + if target.is_none() { + *target = cached; + } +} diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 0a9c5d15..96981472 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -1,5 +1,5 @@ -use conduwuit::{implement, trace, Result}; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; +use conduwuit::{Result, implement, trace}; +use futures::{FutureExt, StreamExt, pin_mut, stream::FuturesUnordered}; use ruma::{DeviceId, UserId}; #[implement(super::Service)] diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 912c0b49..9c284b70 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Handle, Map}; use ruma::{DeviceId, TransactionId, UserId}; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index f7e55251..7803c736 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,23 +1,22 @@ use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, sync::{Arc, RwLock}, }; use conduwuit::{ - err, error, implement, utils, + Err, Error, Result, err, error, implement, utils, utils::{hash, string::EMPTY}, - Error, Result, }; use database::{Deserialized, Json, Map}; use ruma::{ + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, api::client::{ error::ErrorKind, uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; -use crate::{globals, users, Dep}; +use crate::{Dep, config, globals, users}; pub struct Service { userdevicesessionid_uiaarequest: RwLock, @@ -28,6 +27,7 @@ pub struct Service { struct Services { globals: Dep, users: Dep, + config: Dep, } struct Data { @@ -49,6 +49,7 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), users: args.depend::("users"), + config: args.depend::("config"), }, })) } @@ -56,6 +57,26 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +#[implement(Service)] +pub async fn read_tokens(&self) -> Result> { + let mut tokens = HashSet::new(); + if let Some(file) = &self.services.config.registration_token_file.as_ref() { + match std::fs::read_to_string(file) { + | Ok(text) => { + text.split_ascii_whitespace().for_each(|token| { + tokens.insert(token.to_owned()); + }); + }, + | Err(e) => error!("Failed to read the registration token file: {e}"), + } + } + if let Some(token) = &self.services.config.registration_token { + tokens.insert(token.to_owned()); + } + + Ok(tokens) +} + /// Creates a new Uiaa session. Make sure the session token is unique. #[implement(Service)] pub fn create( @@ -122,20 +143,25 @@ pub async fn try_auth( }; #[cfg(not(feature = "element_hacks"))] - let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier - else { + let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier else { return Err(Error::BadRequest( ErrorKind::Unrecognized, "Identifier type not recognized.", )); }; - let user_id = UserId::parse_with_server_name( + let user_id_from_username = UserId::parse_with_server_name( username.clone(), self.services.globals.server_name(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; + // Check if the access token being used matches the credentials used for UIAA + if user_id.localpart() != user_id_from_username.localpart() { + return Err!(Request(Forbidden("User ID and access token mismatch."))); + } + let user_id = user_id_from_username; + // Check if password is correct if let Ok(hash) = self.services.users.password_hash(&user_id).await { let hash_matches = hash::verify_password(password, &hash).is_ok(); @@ -152,13 +178,8 @@ pub async fn try_auth( uiaainfo.completed.push(AuthType::Password); }, | AuthData::RegistrationToken(t) => { - if self - .services - .globals - .registration_token - .as_ref() - .is_some_and(|reg_token| t.token.trim() == reg_token) - { + let tokens = self.read_tokens().await?; + if tokens.contains(t.token.trim()) { uiaainfo.completed.push(AuthType::RegistrationToken); } else { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs deleted file mode 100644 index 7fd93b6c..00000000 --- a/src/service/updates/mod.rs +++ /dev/null @@ -1,142 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use async_trait::async_trait; -use conduwuit::{debug, info, warn, Result, Server}; -use database::{Deserialized, Map}; -use ruma::events::room::message::RoomMessageEventContent; -use serde::Deserialize; -use tokio::{ - sync::Notify, - time::{interval, MissedTickBehavior}, -}; - -use crate::{admin, client, globals, Dep}; - -pub struct Service { - interval: Duration, - interrupt: Notify, - db: Arc, - services: Services, -} - -struct Services { - admin: Dep, - client: Dep, - globals: Dep, - server: Arc, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponse { - updates: Vec, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponseEntry { - id: u64, - date: String, - message: String, -} - -const CHECK_FOR_UPDATES_URL: &str = "https://pupbrain.dev/check-for-updates/stable"; -const CHECK_FOR_UPDATES_INTERVAL: u64 = 7200; // 2 hours -const LAST_CHECK_FOR_UPDATES_COUNT: &[u8; 1] = b"u"; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - interval: Duration::from_secs(CHECK_FOR_UPDATES_INTERVAL), - interrupt: Notify::new(), - db: args.db["global"].clone(), - services: Services { - globals: args.depend::("globals"), - admin: args.depend::("admin"), - client: args.depend::("client"), - server: args.server.clone(), - }, - })) - } - - #[tracing::instrument(skip_all, name = "updates", level = "debug")] - async fn worker(self: Arc) -> Result<()> { - if !self.services.globals.allow_check_for_updates() { - debug!("Disabling update check"); - return Ok(()); - } - - let mut i = interval(self.interval); - i.set_missed_tick_behavior(MissedTickBehavior::Delay); - i.reset_after(self.interval); - loop { - tokio::select! { - () = self.interrupt.notified() => break, - _ = i.tick() => (), - } - - if let Err(e) = self.check().await { - warn!(%e, "Failed to check for updates"); - } - } - - Ok(()) - } - - fn interrupt(&self) { self.interrupt.notify_waiters(); } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - #[tracing::instrument(skip_all)] - async fn check(&self) -> Result<()> { - debug_assert!(self.services.server.running(), "server must not be shutting down"); - - let response = self - .services - .client - .default - .get(CHECK_FOR_UPDATES_URL) - .send() - .await? - .text() - .await?; - - let response = serde_json::from_str::(&response)?; - for update in &response.updates { - if update.id > self.last_check_for_updates_id().await { - self.handle(update).await; - self.update_check_for_updates_id(update.id); - } - } - - Ok(()) - } - - #[tracing::instrument(skip_all)] - async fn handle(&self, update: &CheckForUpdatesResponseEntry) { - info!("{} {:#}", update.date, update.message); - self.services - .admin - .send_message(RoomMessageEventContent::text_markdown(format!( - "### the following is a message from the conduwuit puppy\n\nit was sent on \ - `{}`:\n\n@room: {}", - update.date, update.message - ))) - .await - .ok(); - } - - #[inline] - pub fn update_check_for_updates_id(&self, id: u64) { - self.db.raw_put(LAST_CHECK_FOR_UPDATES_COUNT, id); - } - - pub async fn last_check_for_updates_id(&self) -> u64 { - self.db - .get(LAST_CHECK_FOR_UPDATES_COUNT) - .await - .deserialized() - .unwrap_or(0_u64) - } -} diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b2d3a94a..701561a8 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,25 +1,24 @@ -use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; +use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - debug_warn, err, trace, - utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, - Err, Error, Result, Server, + Err, Error, Result, Server, at, debug_warn, err, trace, + utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, }; -use database::{Database, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, + OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{ - ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType, + AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, - DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, - OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, }; use serde_json::json; -use crate::{account_data, admin, globals, rooms, Dep}; +use crate::{Dep, account_data, admin, globals, rooms}; pub struct Service { services: Services, @@ -28,7 +27,6 @@ pub struct Service { struct Services { server: Arc, - db: Arc, account_data: Dep, admin: Dep, globals: Dep, @@ -64,7 +62,6 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { server: args.server.clone(), - db: args.db.clone(), account_data: args.depend::("account_data"), admin: args.depend::("admin"), globals: args.depend::("globals"), @@ -248,10 +245,13 @@ impl Service { /// Sets a new avatar_url or removes it if avatar_url is None. pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) { - if let Some(avatar_url) = avatar_url { - self.db.userid_avatarurl.insert(user_id, &avatar_url); - } else { - self.db.userid_avatarurl.remove(user_id); + match avatar_url { + | Some(avatar_url) => { + self.db.userid_avatarurl.insert(user_id, &avatar_url); + }, + | _ => { + self.db.userid_avatarurl.remove(user_id); + }, } } @@ -278,11 +278,9 @@ impl Service { initial_device_display_name: Option, client_ip: Option, ) -> Result<()> { - // This method should never be called for nonexistent users. We shouldn't assert - // though... if !self.exists(user_id).await { return Err!(Request(InvalidParam(error!( - "Called create_device for non-existent {user_id}" + "Called create_device for non-existent user {user_id}" )))); } @@ -352,7 +350,6 @@ impl Service { token: &str, ) -> Result<()> { let key = (user_id, device_id); - // should not be None, but we shouldn't assert either lol... if self.db.userdeviceid_metadata.qry(&key).await.is_err() { return Err!(Database(error!( ?user_id, @@ -516,7 +513,7 @@ impl Service { pub async fn add_cross_signing_keys( &self, user_id: &UserId, - master_key: &Raw, + master_key: &Option>, self_signing_key: &Option>, user_signing_key: &Option>, notify: bool, @@ -525,15 +522,17 @@ impl Service { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xFF); - let (master_key_key, _) = parse_master_key(user_id, master_key)?; + if let Some(master_key) = master_key { + let (master_key_key, _) = parse_master_key(user_id, master_key)?; - self.db - .keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes()); + self.db + .keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes()); - self.db - .userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key); + self.db + .userid_masterkeyid + .insert(user_id.as_bytes(), &master_key_key); + } // Self-signing key if let Some(self_signing_key) = self_signing_key { @@ -569,32 +568,16 @@ impl Service { // User-signing key if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids - .next() - .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; - - if user_signing_key_ids.next().is_some() { - return Err!(Request(InvalidParam( - "User signing key contained more than one key." - ))); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + let user_signing_key_id = parse_user_signing_key(user_signing_key)?; + let user_signing_key_key = (user_id, &user_signing_key_id); self.db .keyid_key - .insert(&user_signing_key_key, user_signing_key.json().get().as_bytes()); + .put_raw(user_signing_key_key, user_signing_key.json().get().as_bytes()); self.db .userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key); + .raw_put(user_id, user_signing_key_key); } if notify { @@ -610,7 +593,7 @@ impl Service { key_id: &str, signature: (String, String), sender_id: &UserId, - ) -> Result<()> { + ) -> Result { let key = (target_id, key_id); let mut cross_signing_key: serde_json::Value = self @@ -618,21 +601,27 @@ impl Service { .keyid_key .qry(&key) .await - .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key."))))? + .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key"))))? .deserialized() - .map_err(|e| err!(Database("key in keyid_key is invalid. {e:?}")))?; + .map_err(|e| err!(Database(debug_warn!("key in keyid_key is invalid: {e:?}"))))?; let signatures = cross_signing_key .get_mut("signatures") - .ok_or_else(|| err!(Database("key in keyid_key has no signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has no signatures field"))) + })? .as_object_mut() - .ok_or_else(|| err!(Database("key in keyid_key has invalid signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has invalid signatures field."))) + })? .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures .as_object_mut() - .ok_or_else(|| err!(Database("signatures in keyid_key for a user is invalid.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("signatures in keyid_key for a user is invalid."))) + })? .insert(signature.0, signature.1.into()); let key = (target_id, key_id); @@ -792,44 +781,47 @@ impl Service { &'a self, user_id: &'a UserId, device_id: &'a DeviceId, + since: Option, + to: Option, ) -> impl Stream> + Send + 'a { - let prefix = (user_id, device_id, Interfix); + type Key<'a> = (&'a UserId, &'a DeviceId, u64); + + let from = (user_id, device_id, since.map_or(0, |since| since.saturating_add(1))); + self.db .todeviceid_events - .stream_prefix(&prefix) + .stream_from(&from) .ignore_err() - .map(|(_, val): (Ignore, Raw)| val) + .ready_take_while(move |((user_id_, device_id_, count), _): &(Key<'_>, _)| { + user_id == *user_id_ + && device_id == *device_id_ + && to.is_none_or(|to| *count <= to) + }) + .map(at!(1)) } - pub async fn remove_to_device_events( + pub async fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, - until: u64, - ) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); + until: Until, + ) where + Until: Into> + Send, + { + type Key<'a> = (&'a UserId, &'a DeviceId, u64); - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - let _cork = self.services.db.cork_and_flush(); + let until = until.into().unwrap_or(u64::MAX); + let from = (user_id, device_id, until); self.db .todeviceid_events - .rev_raw_keys_from(&last) // this includes last + .rev_keys_from(&from) .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) - .map(|key| { - let len = key.len(); - let start = len.saturating_sub(size_of::()); - let count = utils::u64_from_u8(&key[start..len]); - (key, count) + .ready_take_while(move |(user_id_, device_id_, _): &Key<'_>| { + user_id == *user_id_ && device_id == *device_id_ + }) + .ready_for_each(|key: Key<'_>| { + self.db.todeviceid_events.del(key); }) - .ready_take_while(move |(_, count)| *count <= until) - .ready_for_each(|(key, _)| self.db.todeviceid_events.remove(&key)) - .boxed() .await; } @@ -1078,6 +1070,24 @@ pub fn parse_master_key( Ok((master_key_key, master_key)) } +pub fn parse_user_signing_key(user_signing_key: &Raw) -> Result { + let mut user_signing_key_ids = user_signing_key + .deserialize() + .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? + .keys + .into_values(); + + let user_signing_key_id = user_signing_key_ids + .next() + .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; + + if user_signing_key_ids.next().is_some() { + return Err!(Request(InvalidParam("User signing key contained more than one key."))); + } + + Ok(user_signing_key_id) +} + /// Ensure that a user only sees signatures from themselves and the target user fn clean_signatures( mut cross_signing_key: serde_json::Value, diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list deleted file mode 100644 index 99091989..00000000 --- a/tests/sytest/are-we-synapse-yet.list +++ /dev/null @@ -1,866 +0,0 @@ -reg GET /register yields a set of flows -reg POST /register can create a user -reg POST /register downcases capitals in usernames -reg POST /register returns the same device_id as that in the request -reg POST /register rejects registration of usernames with '!' -reg POST /register rejects registration of usernames with '"' -reg POST /register rejects registration of usernames with ':' -reg POST /register rejects registration of usernames with '?' -reg POST /register rejects registration of usernames with '\' -reg POST /register rejects registration of usernames with '@' -reg POST /register rejects registration of usernames with '[' -reg POST /register rejects registration of usernames with ']' -reg POST /register rejects registration of usernames with '{' -reg POST /register rejects registration of usernames with '|' -reg POST /register rejects registration of usernames with '}' -reg POST /register rejects registration of usernames with '£' -reg POST /register rejects registration of usernames with 'é' -reg POST /register rejects registration of usernames with '\n' -reg POST /register rejects registration of usernames with ''' -reg POST /r0/admin/register with shared secret -reg POST /r0/admin/register admin with shared secret -reg POST /r0/admin/register with shared secret downcases capitals -reg POST /r0/admin/register with shared secret disallows symbols -reg POST rejects invalid utf-8 in JSON -log GET /login yields a set of flows -log POST /login can log in as a user -log POST /login returns the same device_id as that in the request -log POST /login can log in as a user with just the local part of the id -log POST /login as non-existing user is rejected -log POST /login wrong password is rejected -log Interactive authentication types include SSO -log Can perform interactive authentication with SSO -log The user must be consistent through an interactive authentication session with SSO -log The operation must be consistent through an interactive authentication session -v1s GET /events initially -v1s GET /initialSync initially -csa Version responds 200 OK with valid structure -pro PUT /profile/:user_id/displayname sets my name -pro GET /profile/:user_id/displayname publicly accessible -pro PUT /profile/:user_id/avatar_url sets my avatar -pro GET /profile/:user_id/avatar_url publicly accessible -dev GET /device/{deviceId} -dev GET /device/{deviceId} gives a 404 for unknown devices -dev GET /devices -dev PUT /device/{deviceId} updates device fields -dev PUT /device/{deviceId} gives a 404 for unknown devices -dev DELETE /device/{deviceId} -dev DELETE /device/{deviceId} requires UI auth user to match device owner -dev DELETE /device/{deviceId} with no body gives a 401 -dev The deleted device must be consistent through an interactive auth session -dev Users receive device_list updates for their own devices -pre GET /presence/:user_id/status fetches initial status -pre PUT /presence/:user_id/status updates my presence -crm POST /createRoom makes a public room -crm POST /createRoom makes a private room -crm POST /createRoom makes a private room with invites -crm POST /createRoom makes a room with a name -crm POST /createRoom makes a room with a topic -syn Can /sync newly created room -crm POST /createRoom creates a room with the given version -crm POST /createRoom rejects attempts to create rooms with numeric versions -crm POST /createRoom rejects attempts to create rooms with unknown versions -crm POST /createRoom ignores attempts to set the room version via creation_content -mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -mem GET /rooms/:room_id/joined_members fetches my membership -v1s GET /rooms/:room_id/initialSync fetches initial sync state -pub GET /publicRooms lists newly-created room -ali GET /directory/room/:room_alias yields room ID -mem GET /joined_rooms lists newly-created room -rst POST /rooms/:room_id/state/m.room.name sets name -rst GET /rooms/:room_id/state/m.room.name gets name -rst POST /rooms/:room_id/state/m.room.topic sets topic -rst GET /rooms/:room_id/state/m.room.topic gets topic -rst GET /rooms/:room_id/state fetches entire room state -crm POST /createRoom with creation content -ali PUT /directory/room/:room_alias creates alias -nsp GET /rooms/:room_id/aliases lists aliases -jon POST /rooms/:room_id/join can join a room -jon POST /join/:room_alias can join a room -jon POST /join/:room_id can join a room -jon POST /join/:room_id can join a room with custom content -jon POST /join/:room_alias can join a room with custom content -lev POST /rooms/:room_id/leave can leave a room -inv POST /rooms/:room_id/invite can send an invite -ban POST /rooms/:room_id/ban can ban a user -snd POST /rooms/:room_id/send/:event_type sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -get GET /rooms/:room_id/messages returns a message -get GET /rooms/:room_id/messages lazy loads members correctly -typ PUT /rooms/:room_id/typing/:user_id sets typing notification -typ Typing notifications don't leak (3 subtests) -rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels -rst PUT /rooms/:room_id/state/m.room.power_levels can set levels -rst PUT power_levels should not explode if the old power levels were empty -rst Both GET and PUT work -rct POST /rooms/:room_id/receipt can create receipts -red POST /rooms/:room_id/read_markers can create read marker -med POST /media/r0/upload can create an upload -med GET /media/r0/download can fetch the value again -cap GET /capabilities is present and well formed for registered user -cap GET /r0/capabilities is not public -reg Register with a recaptcha -reg registration is idempotent, without username specified -reg registration is idempotent, with username specified -reg registration remembers parameters -reg registration accepts non-ascii passwords -reg registration with inhibit_login inhibits login -reg User signups are forbidden from starting with '_' -reg Can register using an email address -log Can login with 3pid and password using m.login.password -log login types include SSO -log /login/cas/redirect redirects if the old m.login.cas login type is listed -log Can login with new user via CAS -lox Can logout current device -lox Can logout all devices -lox Request to logout with invalid an access token is rejected -lox Request to logout without an access token is rejected -log After changing password, can't log in with old password -log After changing password, can log in with new password -log After changing password, existing session still works -log After changing password, a different session no longer works by default -log After changing password, different sessions can optionally be kept -psh Pushers created with a different access token are deleted on password change -psh Pushers created with a the same access token are not deleted on password change -acc Can deactivate account -acc Can't deactivate account with wrong password -acc After deactivating account, can't log in with password -acc After deactivating account, can't log in with an email -v1s initialSync sees my presence status -pre Presence change reports an event to myself -pre Friends presence changes reports events -crm Room creation reports m.room.create to myself -crm Room creation reports m.room.member to myself -rst Setting room topic reports m.room.topic to myself -v1s Global initialSync -v1s Global initialSync with limit=0 gives no messages -v1s Room initialSync -v1s Room initialSync with limit=0 gives no messages -rst Setting state twice is idempotent -jon Joining room twice is idempotent -syn New room members see their own join event -v1s New room members see existing users' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new members' presence -v1s All room members see all room members' presence in global initialSync -f,jon Remote users can join room by alias -syn New room members see their own join event -v1s New room members see existing members' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new member's presence -v1s New room members see first user's profile information in global initialSync -v1s New room members see first user's profile information in per-room initialSync -f,jon Remote users may not join unfederated rooms -syn Local room members see posted message events -v1s Fetching eventstream a second time doesn't yield the message again -syn Local non-members don't see posted message events -get Local room members can get room messages -f,syn Remote room members also see posted message events -f,get Remote room members can get room messages -get Message history can be paginated -f,get Message history can be paginated over federation -eph Ephemeral messages received from clients are correctly expired -ali Room aliases can contain Unicode -f,ali Remote room alias queries can handle Unicode -ali Canonical alias can be set -ali Canonical alias can include alt_aliases -ali Regular users can add and delete aliases in the default room configuration -ali Regular users can add and delete aliases when m.room.aliases is restricted -ali Deleting a non-existent alias should return a 404 -ali Users can't delete other's aliases -ali Users with sufficient power-level can delete other's aliases -ali Can delete canonical alias -ali Alias creators can delete alias with no ops -ali Alias creators can delete canonical alias with no ops -ali Only room members can list aliases of a room -inv Can invite users to invite-only rooms -inv Uninvited users cannot join the room -inv Invited user can reject invite -f,inv Invited user can reject invite over federation -f,inv Invited user can reject invite over federation several times -inv Invited user can reject invite for empty room -f,inv Invited user can reject invite over federation for empty room -inv Invited user can reject local invite after originator leaves -inv Invited user can see room metadata -f,inv Remote invited user can see room metadata -inv Users cannot invite themselves to a room -inv Users cannot invite a user that is already in the room -ban Banned user is kicked and may not rejoin until unbanned -f,ban Remote banned user is kicked and may not rejoin until unbanned -ban 'ban' event respects room powerlevel -plv setting 'm.room.name' respects room powerlevel -plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) -plv Unprivileged users can set m.room.topic if it only needs level 0 -plv Users cannot set ban powerlevel higher than their own (2 subtests) -plv Users cannot set kick powerlevel higher than their own (2 subtests) -plv Users cannot set redact powerlevel higher than their own (2 subtests) -v1s Check that event streams started after a client joined a room work (SYT-1) -v1s Event stream catches up fully after many messages -xxx POST /rooms/:room_id/redact/:event_id as power user redacts message -xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message -xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message -xxx POST /redact disallows redaction of event in different room -xxx Redaction of a redaction redacts the redaction reason -v1s A departed room is still included in /initialSync (SPEC-216) -v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) -rst Can get rooms/{roomId}/state for a departed room (SPEC-216) -mem Can get rooms/{roomId}/members for a departed room (SPEC-216) -get Can get rooms/{roomId}/messages for a departed room (SPEC-216) -rst Can get 'm.room.name' state for a departed room (SPEC-216) -syn Getting messages going forward is limited for a departed room (SPEC-216) -3pd Can invite existing 3pid -3pd Can invite existing 3pid with no ops into a private room -3pd Can invite existing 3pid in createRoom -3pd Can invite unbound 3pid -f,3pd Can invite unbound 3pid over federation -3pd Can invite unbound 3pid with no ops into a private room -f,3pd Can invite unbound 3pid over federation with no ops into a private room -f,3pd Can invite unbound 3pid over federation with users from both servers -3pd Can accept unbound 3pid invite after inviter leaves -3pd Can accept third party invite with /join -3pd 3pid invite join with wrong but valid signature are rejected -3pd 3pid invite join valid signature but revoked keys are rejected -3pd 3pid invite join valid signature but unreachable ID server are rejected -gst Guest user cannot call /events globally -gst Guest users can join guest_access rooms -gst Guest users can send messages to guest_access rooms if joined -gst Guest user calling /events doesn't tightloop -gst Guest users are kicked from guest_access rooms on revocation of guest_access -gst Guest user can set display names -gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation -gst Guest user can upgrade to fully featured user -gst Guest user cannot upgrade other users -pub GET /publicRooms lists rooms -pub GET /publicRooms includes avatar URLs -gst Guest users can accept invites to private rooms over federation -gst Guest users denied access over federation if guest access prohibited -mem Room members can override their displayname on a room-specific basis -mem Room members can join a room with an overridden displayname -mem Users cannot kick users from a room they are not in -mem Users cannot kick users who have already left a room -typ Typing notification sent to local room members -f,typ Typing notifications also sent to remote room members -typ Typing can be explicitly stopped -rct Read receipts are visible to /initialSync -rct Read receipts are sent as events -rct Receipts must be m.read -pro displayname updates affect room member events -pro avatar_url updates affect room member events -gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users -gst Guest non-joined user cannot call /events on shared room -gst Guest non-joined user cannot call /events on invited room -gst Guest non-joined user cannot call /events on joined room -gst Guest non-joined user cannot call /events on default room -gst Guest non-joined user can call /events on world_readable room -gst Guest non-joined users can get state for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms -gst Guest non-joined users cannot room initalSync for non-world_readable rooms -gst Guest non-joined users can room initialSync for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms after leaving -gst Guest non-joined users cannot send messages to guest_access rooms if not joined -gst Guest users can sync from world_readable guest_access rooms if joined -gst Guest users can sync from shared guest_access rooms if joined -gst Guest users can sync from invited guest_access rooms if joined -gst Guest users can sync from joined guest_access rooms if joined -gst Guest users can sync from default guest_access rooms if joined -ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users -ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users -ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users -ath m.room.history_visibility == "default" allows/forbids appropriately for Real users -ath Real non-joined user cannot call /events on shared room -ath Real non-joined user cannot call /events on invited room -ath Real non-joined user cannot call /events on joined room -ath Real non-joined user cannot call /events on default room -ath Real non-joined user can call /events on world_readable room -ath Real non-joined users can get state for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms -ath Real non-joined users cannot room initalSync for non-world_readable rooms -ath Real non-joined users can room initialSync for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms after leaving -ath Real non-joined users cannot send messages to guest_access rooms if not joined -ath Real users can sync from world_readable guest_access rooms if joined -ath Real users can sync from shared guest_access rooms if joined -ath Real users can sync from invited guest_access rooms if joined -ath Real users can sync from joined guest_access rooms if joined -ath Real users can sync from default guest_access rooms if joined -ath Only see history_visibility changes on boundaries -f,ath Backfill works correctly with history visibility set to joined -fgt Forgotten room messages cannot be paginated -fgt Forgetting room does not show up in v2 /sync -fgt Can forget room you've been kicked from -fgt Can't forget room you're still in -fgt Can re-join room if re-invited -ath Only original members of the room can see messages from erased users -mem /joined_rooms returns only joined rooms -mem /joined_members return joined members -ctx /context/ on joined room works -ctx /context/ on non world readable room does not work -ctx /context/ returns correct number of events -ctx /context/ with lazy_load_members filter works -get /event/ on joined room works -get /event/ on non world readable room does not work -get /event/ does not allow access to events before the user joined -mem Can get rooms/{roomId}/members -mem Can get rooms/{roomId}/members at a given point -mem Can filter rooms/{roomId}/members -upg /upgrade creates a new room -upg /upgrade should preserve room visibility for public rooms -upg /upgrade should preserve room visibility for private rooms -upg /upgrade copies >100 power levels to the new room -upg /upgrade copies the power levels to the new room -upg /upgrade preserves the power level of the upgrading user in old and new rooms -upg /upgrade copies important state to the new room -upg /upgrade copies ban events to the new room -upg local user has push rules copied to upgraded room -f,upg remote user has push rules copied to upgraded room -upg /upgrade moves aliases to the new room -upg /upgrade moves remote aliases to the new room -upg /upgrade preserves direct room state -upg /upgrade preserves room federation ability -upg /upgrade restricts power levels in the old room -upg /upgrade restricts power levels in the old room when the old PLs are unusual -upg /upgrade to an unknown version is rejected -upg /upgrade is rejected if the user can't send state events -upg /upgrade of a bogus room fails gracefully -upg Cannot send tombstone event that points to the same room -f,upg Local and remote users' homeservers remove a room from their public directory on upgrade -rst Name/topic keys are correct -f,pub Can get remote public room list -pub Can paginate public room list -pub Can search public room list -syn Can create filter -syn Can download filter -syn Can sync -syn Can sync a joined room -syn Full state sync includes joined rooms -syn Newly joined room is included in an incremental sync -syn Newly joined room has correct timeline in incremental sync -syn Newly joined room includes presence in incremental sync -syn Get presence for newly joined members in incremental sync -syn Can sync a room with a single message -syn Can sync a room with a message with a transaction id -syn A message sent after an initial sync appears in the timeline of an incremental sync. -syn A filtered timeline reaches its limit -syn Syncing a new room with a large timeline limit isn't limited -syn A full_state incremental update returns only recent timeline -syn A prev_batch token can be used in the v1 messages API -syn A next_batch token can be used in the v1 messages API -syn User sees their own presence in a sync -syn User is offline if they set_presence=offline in their sync -syn User sees updates to presence from other users in the incremental sync. -syn State is included in the timeline in the initial sync -f,syn State from remote users is included in the state in the initial sync -syn Changes to state are included in an incremental sync -syn Changes to state are included in an gapped incremental sync -f,syn State from remote users is included in the timeline in an incremental sync -syn A full_state incremental update returns all state -syn When user joins a room the state is included in the next sync -syn A change to displayname should not result in a full state sync -syn A change to displayname should appear in incremental /sync -syn When user joins a room the state is included in a gapped sync -syn When user joins and leaves a room in the same batch, the full state is still included in the next sync -syn Current state appears in timeline in private history -syn Current state appears in timeline in private history with many messages before -syn Current state appears in timeline in private history with many messages after -syn Rooms a user is invited to appear in an initial sync -syn Rooms a user is invited to appear in an incremental sync -syn Newly joined room is included in an incremental sync after invite -syn Sync can be polled for updates -syn Sync is woken up for leaves -syn Left rooms appear in the leave section of sync -syn Newly left rooms appear in the leave section of incremental sync -syn We should see our own leave event, even if history_visibility is restricted (SYN-662) -syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -syn Newly left rooms appear in the leave section of gapped sync -syn Previously left rooms don't appear in the leave section of sync -syn Left rooms appear in the leave section of full state sync -syn Archived rooms only contain history from before the user left -syn Banned rooms appear in the leave section of sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Typing events appear in initial sync -syn Typing events appear in incremental sync -syn Typing events appear in gapped sync -syn Read receipts appear in initial v2 /sync -syn New read receipts appear in incremental v2 /sync -syn Can pass a JSON filter as a query parameter -syn Can request federation format via the filter -syn Read markers appear in incremental v2 /sync -syn Read markers appear in initial v2 /sync -syn Read markers can be updated -syn Lazy loading parameters in the filter are strictly boolean -syn The only membership state included in an initial sync is for all the senders in the timeline -syn The only membership state included in an incremental sync is for senders in the timeline -syn The only membership state included in a gapped incremental sync is for senders in the timeline -syn Gapped incremental syncs include all state changes -syn Old leaves are present in gapped incremental syncs -syn Leaves are present in non-gapped incremental syncs -syn Old members are included in gappy incr LL sync if they start speaking -syn Members from the gap are included in gappy incr LL sync -syn We don't send redundant membership state across incremental syncs by default -syn We do send redundant membership state across incremental syncs if asked -syn Unnamed room comes with a name summary -syn Named room comes with just joined member count summary -syn Room summary only has 5 heroes -syn Room summary counts change when membership changes -rmv User can create and send/receive messages in a room with version 1 -rmv User can create and send/receive messages in a room with version 1 (2 subtests) -rmv local user can join room with version 1 -rmv User can invite local user to room with version 1 -rmv remote user can join room with version 1 -rmv User can invite remote user to room with version 1 -rmv Remote user can backfill in a room with version 1 -rmv Can reject invites over federation for rooms with version 1 -rmv Can receive redactions from regular users over federation in room version 1 -rmv User can create and send/receive messages in a room with version 2 -rmv User can create and send/receive messages in a room with version 2 (2 subtests) -rmv local user can join room with version 2 -rmv User can invite local user to room with version 2 -rmv remote user can join room with version 2 -rmv User can invite remote user to room with version 2 -rmv Remote user can backfill in a room with version 2 -rmv Can reject invites over federation for rooms with version 2 -rmv Can receive redactions from regular users over federation in room version 2 -rmv User can create and send/receive messages in a room with version 3 -rmv User can create and send/receive messages in a room with version 3 (2 subtests) -rmv local user can join room with version 3 -rmv User can invite local user to room with version 3 -rmv remote user can join room with version 3 -rmv User can invite remote user to room with version 3 -rmv Remote user can backfill in a room with version 3 -rmv Can reject invites over federation for rooms with version 3 -rmv Can receive redactions from regular users over federation in room version 3 -rmv User can create and send/receive messages in a room with version 4 -rmv User can create and send/receive messages in a room with version 4 (2 subtests) -rmv local user can join room with version 4 -rmv User can invite local user to room with version 4 -rmv remote user can join room with version 4 -rmv User can invite remote user to room with version 4 -rmv Remote user can backfill in a room with version 4 -rmv Can reject invites over federation for rooms with version 4 -rmv Can receive redactions from regular users over federation in room version 4 -rmv User can create and send/receive messages in a room with version 5 -rmv User can create and send/receive messages in a room with version 5 (2 subtests) -rmv local user can join room with version 5 -rmv User can invite local user to room with version 5 -rmv remote user can join room with version 5 -rmv User can invite remote user to room with version 5 -rmv Remote user can backfill in a room with version 5 -rmv Can reject invites over federation for rooms with version 5 -rmv Can receive redactions from regular users over federation in room version 5 -rmv User can create and send/receive messages in a room with version 6 -rmv User can create and send/receive messages in a room with version 6 (2 subtests) -rmv local user can join room with version 6 -rmv User can invite local user to room with version 6 -rmv remote user can join room with version 6 -rmv User can invite remote user to room with version 6 -rmv Remote user can backfill in a room with version 6 -rmv Can reject invites over federation for rooms with version 6 -rmv Can receive redactions from regular users over federation in room version 6 -rmv Inbound federation rejects invites which include invalid JSON for room version 6 -rmv Outbound federation rejects invite response which include invalid JSON for room version 6 -rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 -rmv Server rejects invalid JSON in a version 6 room -pre Presence changes are reported to local room members -f,pre Presence changes are also reported to remote room members -pre Presence changes to UNAVAILABLE are reported to local room members -f,pre Presence changes to UNAVAILABLE are reported to remote room members -v1s Newly created users see their own presence in /initialSync (SYT-34) -dvk Can upload device keys -dvk Should reject keys claiming to belong to a different user -dvk Can query device keys using POST -dvk Can query specific device keys using POST -dvk query for user with no keys returns empty key dict -dvk Can claim one time key using POST -f,dvk Can query remote device keys using POST -f,dvk Can claim remote one time key using POST -dvk Local device key changes appear in v2 /sync -dvk Local new device changes appear in v2 /sync -dvk Local delete device changes appear in v2 /sync -dvk Local update device changes appear in v2 /sync -dvk Can query remote device keys using POST after notification -f,dev Device deletion propagates over federation -f,dev If remote user leaves room, changes device and rejoins we see update in sync -f,dev If remote user leaves room we no longer receive device updates -dvk Local device key changes appear in /keys/changes -dvk New users appear in /keys/changes -f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes -dvk Get left notifs in sync and /keys/changes when other user leaves -dvk Get left notifs for other users in sync and /keys/changes when user leaves -f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -dkb Can create backup version -dkb Can update backup version -dkb Responds correctly when backup is empty -dkb Can backup keys -dkb Can update keys with better versions -dkb Will not update keys with worse versions -dkb Will not back up to an old backup version -dkb Can delete backup -dkb Deleted & recreated backups are empty -dkb Can create more than 10 backup versions -xsk Can upload self-signing keys -xsk Fails to upload self-signing keys with no auth -xsk Fails to upload self-signing key without master key -xsk Changing master key notifies local users -xsk Changing user-signing key notifies local users -f,xsk can fetch self-signing keys over federation -f,xsk uploading self-signing key notifies over federation -f,xsk uploading signed devices gets propagated over federation -tag Can add tag -tag Can remove tag -tag Can list tags for a room -v1s Tags appear in the v1 /events stream -v1s Tags appear in the v1 /initalSync -v1s Tags appear in the v1 room initial sync -tag Tags appear in an initial v2 /sync -tag Newly updated tags appear in an incremental v2 /sync -tag Deleted tags appear in an incremental v2 /sync -tag local user has tags copied to the new room -f,tag remote user has tags copied to the new room -sch Can search for an event by body -sch Can get context around search results -sch Can back-paginate search results -sch Search works across an upgraded room and its predecessor -sch Search results with rank ordering do not include redacted events -sch Search results with recent ordering do not include redacted events -acc Can add account data -acc Can add account data to room -acc Can get account data without syncing -acc Can get room account data without syncing -v1s Latest account data comes down in /initialSync -v1s Latest account data comes down in room initialSync -v1s Account data appears in v1 /events stream -v1s Room account data appears in v1 /events stream -acc Latest account data appears in v2 /sync -acc New account data appears in incremental v2 /sync -oid Can generate a openid access_token that can be exchanged for information about a user -oid Invalid openid access tokens are rejected -oid Requests to userinfo without access tokens are rejected -std Can send a message directly to a device using PUT /sendToDevice -std Can recv a device message using /sync -std Can recv device messages until they are acknowledged -std Device messages with the same txn_id are deduplicated -std Device messages wake up /sync -std Can recv device messages over federation -fsd Device messages over federation wake up /sync -std Can send messages with a wildcard device id -std Can send messages with a wildcard device id to two devices -std Wildcard device messages wake up /sync -fsd Wildcard device messages over federation wake up /sync -adm /whois -nsp /purge_history -nsp /purge_history by ts -nsp Can backfill purged history -nsp Shutdown room -ign Ignore user in existing room -ign Ignore invite in full sync -ign Ignore invite in incremental sync -fky Checking local federation server -fky Federation key API allows unsigned requests for keys -fky Federation key API can act as a notary server via a GET request -fky Federation key API can act as a notary server via a POST request -fky Key notary server should return an expired key if it can't find any others -fky Key notary server must not overwrite a valid key with a spurious result from the origin server -fqu Non-numeric ports in server names are rejected -fqu Outbound federation can query profile data -fqu Inbound federation can query profile data -fqu Outbound federation can query room alias directory -fqu Inbound federation can query room alias directory -fsj Outbound federation can query v1 /send_join -fsj Outbound federation can query v2 /send_join -fmj Outbound federation passes make_join failures through to the client -fsj Inbound federation can receive v1 /send_join -fsj Inbound federation can receive v2 /send_join -fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms -fsj Inbound /v1/send_join rejects incorrectly-signed joins -fsj Inbound /v1/send_join rejects joins from other servers -fau Inbound federation rejects remote attempts to kick local users to rooms -frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support -frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support -frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -frv Inbound federation accepts attempts to join v2 rooms from servers with support -frv Outbound federation correctly handles unsupported room versions -frv A pair of servers can establish a join in a v2 room -fsj Outbound federation rejects send_join responses with no m.room.create event -frv Outbound federation rejects m.room.create events with an unknown room version -fsj Event with an invalid signature in the send_join response should not cause room join to fail -fsj Inbound: send_join rejects invalid JSON for room version 6 -fed Outbound federation can send events -fed Inbound federation can receive events -fed Inbound federation can receive redacted events -fed Ephemeral messages received from servers are correctly expired -fed Events whose auth_events are in the wrong room do not mess up the room state -fed Inbound federation can return events -fed Inbound federation redacts events from erased users -fme Outbound federation can request missing events -fme Inbound federation can return missing events for world_readable visibility -fme Inbound federation can return missing events for shared visibility -fme Inbound federation can return missing events for invite visibility -fme Inbound federation can return missing events for joined visibility -fme outliers whose auth_events are in a different room are correctly rejected -fbk Outbound federation can backfill events -fbk Inbound federation can backfill events -fbk Backfill checks the events requested belong to the room -fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -fiv Outbound federation can send invites via v1 API -fiv Outbound federation can send invites via v2 API -fiv Inbound federation can receive invites via v1 API -fiv Inbound federation can receive invites via v2 API -fiv Inbound federation can receive invite and reject when remote replies with a 403 -fiv Inbound federation can receive invite and reject when remote replies with a 500 -fiv Inbound federation can receive invite and reject when remote is unreachable -fiv Inbound federation rejects invites which are not signed by the sender -fiv Inbound federation can receive invite rejections -fiv Inbound federation rejects incorrectly-signed invite rejections -fsl Inbound /v1/send_leave rejects leaves from other servers -fst Inbound federation can get state for a room -fst Inbound federation of state requires event_id as a mandatory paramater -fst Inbound federation can get state_ids for a room -fst Inbound federation of state_ids requires event_id as a mandatory paramater -fst Federation rejects inbound events where the prev_events cannot be found -fst Room state at a rejected message event is the same as its predecessor -fst Room state at a rejected state event is the same as its predecessor -fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state -fst Federation handles empty auth_events in state_ids sanely -fst Getting state checks the events requested belong to the room -fst Getting state IDs checks the events requested belong to the room -fst Should not be able to take over the room by pretending there is no PL event -fpb Inbound federation can get public room list -fed Outbound federation sends receipts -fed Inbound federation rejects receipts from wrong remote -fed Inbound federation ignores redactions from invalid servers room > v3 -fed An event which redacts an event in a different room should be ignored -fed An event which redacts itself should be ignored -fed A pair of events which redact each other should be ignored -fdk Local device key changes get to remote servers -fdk Server correctly handles incoming m.device_list_update -fdk Server correctly resyncs when client query keys and there is no remote cache -fdk Server correctly resyncs when server leaves and rejoins a room -fdk Local device key changes get to remote servers with correct prev_id -fdk Device list doesn't change if remote server is down -fdk If a device list update goes missing, the server resyncs on the next one -fst Name/topic keys are correct -fau Remote servers cannot set power levels in rooms without existing powerlevels -fau Remote servers should reject attempts by non-creators to set the power levels -fau Inbound federation rejects typing notifications from wrong remote -fau Users cannot set notifications powerlevel higher than their own -fed Forward extremities remain so even after the next events are populated as outliers -fau Banned servers cannot send events -fau Banned servers cannot /make_join -fau Banned servers cannot /send_join -fau Banned servers cannot /make_leave -fau Banned servers cannot /send_leave -fau Banned servers cannot /invite -fau Banned servers cannot get room state -fau Banned servers cannot get room state ids -fau Banned servers cannot backfill -fau Banned servers cannot /event_auth -fau Banned servers cannot get missing events -fau Server correctly handles transactions that break edu limits -fau Inbound federation correctly soft fails events -fau Inbound federation accepts a second soft-failed event -fau Inbound federation correctly handles soft failed events as extremities -med Can upload with Unicode file name -med Can download with Unicode file name locally -f,med Can download with Unicode file name over federation -med Alternative server names do not cause a routing loop -med Can download specifying a different Unicode file name -med Can upload without a file name -med Can download without a file name locally -f,med Can download without a file name over federation -med Can upload with ASCII file name -med Can download file 'ascii' -med Can download file 'name with spaces' -med Can download file 'name;with;semicolons' -med Can download specifying a different ASCII file name -med Can send image in room message -med Can fetch images in room -med POSTed media can be thumbnailed -f,med Remote media can be thumbnailed -med Test URL preview -med Can read configuration endpoint -nsp Can quarantine media in rooms -udr User appears in user directory -udr User in private room doesn't appear in user directory -udr User joining then leaving public room appears and dissappears from directory -udr Users appear/disappear from directory when join_rules are changed -udr Users appear/disappear from directory when history_visibility are changed -udr Users stay in directory when join_rules are changed but history_visibility is world_readable -f,udr User in remote room doesn't appear in user directory after server left room -udr User directory correctly update on display name change -udr User in shared private room does appear in user directory -udr User in shared private room does appear in user directory until leave -udr User in dir while user still shares private rooms -nsp Create group -nsp Add group rooms -nsp Remove group rooms -nsp Get local group profile -nsp Get local group users -nsp Add/remove local group rooms -nsp Get local group summary -nsp Get remote group profile -nsp Get remote group users -nsp Add/remove remote group rooms -nsp Get remote group summary -nsp Add local group users -nsp Remove self from local group -nsp Remove other from local group -nsp Add remote group users -nsp Remove self from remote group -nsp Listing invited users of a remote group when not a member returns a 403 -nsp Add group category -nsp Remove group category -nsp Get group categories -nsp Add group role -nsp Remove group role -nsp Get group roles -nsp Add room to group summary -nsp Adding room to group summary keeps room_id when fetching rooms in group -nsp Adding multiple rooms to group summary have correct order -nsp Remove room from group summary -nsp Add room to group summary with category -nsp Remove room from group summary with category -nsp Add user to group summary -nsp Adding multiple users to group summary have correct order -nsp Remove user from group summary -nsp Add user to group summary with role -nsp Remove user from group summary with role -nsp Local group invites come down sync -nsp Group creator sees group in sync -nsp Group creator sees group in initial sync -nsp Get/set local group publicity -nsp Bulk get group publicity -nsp Joinability comes down summary -nsp Set group joinable and join it -nsp Group is not joinable by default -nsp Group is joinable over federation -nsp Room is transitioned on local and remote groups upon room upgrade -3pd Can bind 3PID via home server -3pd Can bind and unbind 3PID via homeserver -3pd Can unbind 3PID via homeserver when bound out of band -3pd 3PIDs are unbound after account deactivation -3pd Can bind and unbind 3PID via /unbind by specifying the identity server -3pd Can bind and unbind 3PID via /unbind without specifying the identity server -app AS can create a user -app AS can create a user with an underscore -app AS can create a user with inhibit_login -app AS cannot create users outside its own namespace -app Regular users cannot register within the AS namespace -app AS can make room aliases -app Regular users cannot create room aliases within the AS namespace -app AS-ghosted users can use rooms via AS -app AS-ghosted users can use rooms themselves -app Ghost user must register before joining room -app AS can set avatar for ghosted users -app AS can set displayname for ghosted users -app AS can't set displayname for random users -app Inviting an AS-hosted user asks the AS server -app Accesing an AS-hosted room alias asks the AS server -app Events in rooms with AS-hosted room aliases are sent to AS server -app AS user (not ghost) can join room without registering -app AS user (not ghost) can join room without registering, with user_id query param -app HS provides query metadata -app HS can provide query metadata on a single protocol -app HS will proxy request for 3PU mapping -app HS will proxy request for 3PL mapping -app AS can publish rooms in their own list -app AS and main public room lists are separate -app AS can deactivate a user -psh Test that a message is pushed -psh Invites are pushed -psh Rooms with names are correctly named in pushed -psh Rooms with canonical alias are correctly named in pushed -psh Rooms with many users are correctly pushed -psh Don't get pushed for rooms you've muted -psh Rejected events are not pushed -psh Can add global push rule for room -psh Can add global push rule for sender -psh Can add global push rule for content -psh Can add global push rule for override -psh Can add global push rule for underride -psh Can add global push rule for content -psh New rules appear before old rules by default -psh Can add global push rule before an existing rule -psh Can add global push rule after an existing rule -psh Can delete a push rule -psh Can disable a push rule -psh Adding the same push rule twice is idempotent -psh Messages that notify from another user increment unread notification count -psh Messages that highlight from another user increment unread highlight count -psh Can change the actions of default rules -psh Changing the actions of an unknown default rule fails with 404 -psh Can change the actions of a user specified rule -psh Changing the actions of an unknown rule fails with 404 -psh Can fetch a user's pushers -psh Push rules come down in an initial /sync -psh Adding a push rule wakes up an incremental /sync -psh Disabling a push rule wakes up an incremental /sync -psh Enabling a push rule wakes up an incremental /sync -psh Setting actions for a push rule wakes up an incremental /sync -psh Can enable/disable default rules -psh Enabling an unknown default rule fails with 404 -psh Test that rejected pushers are removed. -psh Notifications can be viewed with GET /notifications -psh Trying to add push rule with no scope fails with 400 -psh Trying to add push rule with invalid scope fails with 400 -psh Trying to add push rule with missing template fails with 400 -psh Trying to add push rule with missing rule_id fails with 400 -psh Trying to add push rule with empty rule_id fails with 400 -psh Trying to add push rule with invalid template fails with 400 -psh Trying to add push rule with rule_id with slashes fails with 400 -psh Trying to add push rule with override rule without conditions fails with 400 -psh Trying to add push rule with underride rule without conditions fails with 400 -psh Trying to add push rule with condition without kind fails with 400 -psh Trying to add push rule with content rule without pattern fails with 400 -psh Trying to add push rule with no actions fails with 400 -psh Trying to add push rule with invalid action fails with 400 -psh Trying to add push rule with invalid attr fails with 400 -psh Trying to add push rule with invalid value for enabled fails with 400 -psh Trying to get push rules with no trailing slash fails with 400 -psh Trying to get push rules with scope without trailing slash fails with 400 -psh Trying to get push rules with template without tailing slash fails with 400 -psh Trying to get push rules with unknown scope fails with 400 -psh Trying to get push rules with unknown template fails with 400 -psh Trying to get push rules with unknown attribute fails with 400 -psh Trying to get push rules with unknown rule_id fails with 404 -psh Rooms with names are correctly named in pushes -v1s GET /initialSync with non-numeric 'limit' -v1s GET /events with non-numeric 'limit' -v1s GET /events with negative 'limit' -v1s GET /events with non-numeric 'timeout' -ath Event size limits -syn Check creating invalid filters returns 4xx -f,pre New federated private chats get full presence information (SYN-115) -pre Left room members do not cause problems for presence -crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) -typ Typing notifications don't leak -ban Non-present room members cannot ban others -psh Getting push rules doesn't corrupt the cache SYN-390 -inv Test that we can be reinvited to a room we created -syn Multiple calls to /sync should not cause 500 errors -gst Guest user can call /events on another world_readable room (SYN-606) -gst Real user can call /events on another world_readable room (SYN-606) -gst Events come down the correct room -pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -std Can send a to-device message to two users which both receive it using /sync -fme Outbound federation will ignore a missing event with bad JSON for room version 6 -fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 -jso Invalid JSON integers -jso Invalid JSON floats -jso Invalid JSON special values -inv Can invite users to invite-only rooms (2 subtests) -plv setting 'm.room.name' respects room powerlevel (2 subtests) -psh Messages that notify from another user increment notification_count -psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count -dvk Can claim one time key using POST (2 subtests) -fdk Can query remote device keys using POST (1 subtests) -fdk Can claim remote one time key using POST (2 subtests) -fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py deleted file mode 100755 index 3d21fa41..00000000 --- a/tests/sytest/are-we-synapse-yet.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import division -import argparse -import re -import sys - -# Usage: $ ./are-we-synapse-yet.py [-v] results.tap -# This script scans a results.tap file from Dendrite's CI process and spits out -# a rating of how close we are to Synapse parity, based purely on SyTests. -# The main complexity is grouping tests sensibly into features like 'Registration' -# and 'Federation'. Then it just checks the ones which are passing and calculates -# percentages for each group. Produces results like: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# -# or in verbose mode: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -# -# You can also tack `-v` on to see exactly which tests each category falls under. - -test_mappings = { - "nsp": "Non-Spec API", - "unk": "Unknown API (no group specified)", - "app": "Application Services API", - "f": "Federation", # flag to mark test involves federation - - "federation_apis": { - "fky": "Key API", - "fsj": "send_join API", - "fmj": "make_join API", - "fsl": "send_leave API", - "fiv": "Invite API", - "fqu": "Query API", - "frv": "room versions", - "fau": "Auth", - "fbk": "Backfill API", - "fme": "get_missing_events API", - "fst": "State APIs", - "fpb": "Public Room API", - "fdk": "Device Key APIs", - "fed": "Federation API", - "fsd": "Send-to-Device APIs", - }, - - "client_apis": { - "reg": "Registration", - "log": "Login", - "lox": "Logout", - "v1s": "V1 CS APIs", - "csa": "Misc CS APIs", - "pro": "Profile", - "dev": "Devices", - "dvk": "Device Keys", - "dkb": "Device Key Backup", - "xsk": "Cross-signing Keys", - "pre": "Presence", - "crm": "Create Room", - "syn": "Sync API", - "rmv": "Room Versions", - "rst": "Room State APIs", - "pub": "Public Room APIs", - "mem": "Room Membership", - "ali": "Room Aliases", - "jon": "Joining Rooms", - "lev": "Leaving Rooms", - "inv": "Inviting users to Rooms", - "ban": "Banning users", - "snd": "Sending events", - "get": "Getting events for Rooms", - "rct": "Receipts", - "red": "Read markers", - "med": "Media APIs", - "cap": "Capabilities API", - "typ": "Typing API", - "psh": "Push APIs", - "acc": "Account APIs", - "eph": "Ephemeral Events", - "plv": "Power Levels", - "xxx": "Redaction", - "3pd": "Third-Party ID APIs", - "gst": "Guest APIs", - "ath": "Room Auth", - "fgt": "Forget APIs", - "ctx": "Context APIs", - "upg": "Room Upgrade APIs", - "tag": "Tagging APIs", - "sch": "Search APIs", - "oid": "OpenID API", - "std": "Send-to-Device APIs", - "adm": "Server Admin API", - "ign": "Ignore Users", - "udr": "User Directory APIs", - "jso": "Enforced canonical JSON", - }, -} - -# optional 'not ' with test number then anything but '#' -re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)") - -# Parses lines like the following: -# -# SUCCESS: ok 3 POST /register downcases capitals in usernames -# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version -# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts -# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail -# -# Only SUCCESS lines are treated as success, the rest are not implemented. -# -# Returns a dict like: -# { name: "...", ok: True } -def parse_test_line(line): - if not line.startswith("ok ") and not line.startswith("not ok "): - return - re_match = re_testname.match(line) - test_name = re_match.groups()[1].replace("(expected fail) ", "").strip() - test_pass = False - if line.startswith("ok ") and not "# skip " in line: - test_pass = True - return { - "name": test_name, - "ok": test_pass, - } - -# Prints the stats for a complete section. -# header_name => "Client-Server APIs" -# gid_to_tests => { gid: { : True|False }} -# gid_to_name => { gid: "Group Name" } -# verbose => True|False -# Produces: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# or in verbose mode: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -def print_stats(header_name, gid_to_tests, gid_to_name, verbose): - subsections = [] # Registration: 100% (13/13 tests) - subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"] - total_passing = 0 - total_tests = 0 - for gid, tests in gid_to_tests.items(): - group_total = len(tests) - if group_total == 0: - continue - group_passing = 0 - test_names_and_marks = [] - for name, passing in tests.items(): - if passing: - group_passing += 1 - test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") - - total_tests += group_total - total_passing += group_passing - pct = "{0:.0f}%".format(group_passing/group_total * 100) - line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) - subsections.append(line) - subsection_test_names[line] = test_names_and_marks - - pct = "{0:.0f}%".format(total_passing/total_tests * 100) - print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) - print("-" * (len(header_name)+1)) - for line in subsections: - print(" %s" % (line,)) - if verbose: - for test_name_and_pass_mark in subsection_test_names[line]: - print(" %s" % (test_name_and_pass_mark,)) - print("") - print("") - -def main(results_tap_path, verbose): - # Load up test mappings - test_name_to_group_id = {} - fed_tests = set() - client_tests = set() - with open("./are-we-synapse-yet.list", "r") as f: - for line in f.readlines(): - test_name = " ".join(line.split(" ")[1:]).strip() - groups = line.split(" ")[0].split(",") - for gid in groups: - if gid == "f" or gid in test_mappings["federation_apis"]: - fed_tests.add(test_name) - else: - client_tests.add(test_name) - if gid == "f": - continue # we expect another group ID - test_name_to_group_id[test_name] = gid - - # parse results.tap - summary = { - "client": { - # gid: { - # test_name: OK - # } - }, - "federation": { - # gid: { - # test_name: OK - # } - }, - "appservice": { - "app": {}, - }, - "nonspec": { - "nsp": {}, - "unk": {} - }, - } - with open(results_tap_path, "r") as f: - for line in f.readlines(): - test_result = parse_test_line(line) - if not test_result: - continue - name = test_result["name"] - group_id = test_name_to_group_id.get(name) - if not group_id: - summary["nonspec"]["unk"][name] = test_result["ok"] - if group_id == "nsp": - summary["nonspec"]["nsp"][name] = test_result["ok"] - elif group_id == "app": - summary["appservice"]["app"][name] = test_result["ok"] - elif group_id in test_mappings["federation_apis"]: - group = summary["federation"].get(group_id, {}) - group[name] = test_result["ok"] - summary["federation"][group_id] = group - elif group_id in test_mappings["client_apis"]: - group = summary["client"].get(group_id, {}) - group[name] = test_result["ok"] - summary["client"][group_id] = group - - print("Are We Synapse Yet?") - print("===================") - print("") - print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) - print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) - print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) - print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("tap_file", help="path to results.tap") - parser.add_argument("-v", action="store_true", help="show individual test names in output") - args = parser.parse_args() - main(args.tap_file, args.v) \ No newline at end of file diff --git a/tests/sytest/show-expected-fail-tests.sh b/tests/sytest/show-expected-fail-tests.sh deleted file mode 100755 index 320d4ebd..00000000 --- a/tests/sytest/show-expected-fail-tests.sh +++ /dev/null @@ -1,105 +0,0 @@ -#! /bin/bash -# -# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) -# and checks whether a test name that exists in the whitelist (that should pass), failed or not. -# -# An optional blacklist file can be added, also containing test names, where if a test name is -# present, the script will not error even if the test is in the whitelist file and failed -# -# For each of these files, lines starting with '#' are ignored. -# -# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] - -results_file=$1 -whitelist_file=$2 -blacklist_file=$3 - -fail_build=0 - -if [ $# -lt 2 ]; then - echo "Usage: $0 results.tap whitelist [blacklist]" - exit 1 -fi - -if [ ! -f "$results_file" ]; then - echo "ERROR: Specified results file '${results_file}' doesn't exist." - fail_build=1 -fi - -if [ ! -f "$whitelist_file" ]; then - echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." - fail_build=1 -fi - -blacklisted_tests=() - -# Check if a blacklist file was provided -if [ $# -eq 3 ]; then - # Read test blacklist file - if [ ! -f "$blacklist_file" ]; then - echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." - fail_build=1 - fi - - # Read each line, ignoring those that start with '#' - blacklisted_tests="" - search_non_comments=$(grep -v '^#' ${blacklist_file}) - while read -r line ; do - # Record the blacklisted test name - blacklisted_tests+=("${line}") - done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop -fi - -[ "$fail_build" = 0 ] || exit 1 - -passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') -tests_to_add="" -already_in_whitelist="" - -while read -r test_name; do - # Ignore empty lines - [ "${test_name}" = "" ] && continue - - grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 - if [ "$?" != "0" ]; then - # Check if this test name is blacklisted - if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then - # Don't notify about this test - continue - fi - - # Append this test_name to the existing list - tests_to_add="${tests_to_add}${test_name}\n" - fail_build=1 - else - already_in_whitelist="${already_in_whitelist}${test_name}\n" - fi -done <<< "${passed_but_expected_fail}" - -# TODO: Check that the same test doesn't exist in both the whitelist and blacklist -# TODO: Check that the same test doesn't appear twice in the whitelist|blacklist - -# Trim test output strings -tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") -already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") - -# Format output with markdown for buildkite annotation rendering purposes -if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then - echo "### 📜 SyTest Whitelist Maintenance" -fi - -if [ -n "${tests_to_add}" ]; then - echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" - echo "\`\`\`" - echo -e "${tests_to_add}" - echo "\`\`\`" -fi - -if [ -n "${already_in_whitelist}" ]; then - echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" - echo "\`\`\`" - echo -e "${already_in_whitelist}" - echo "\`\`\`" -fi - -exit ${fail_build} diff --git a/tests/sytest/sytest-blacklist b/tests/sytest/sytest-blacklist deleted file mode 100644 index 009de225..00000000 --- a/tests/sytest/sytest-blacklist +++ /dev/null @@ -1,7 +0,0 @@ -# This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged -POST /createRoom makes a public room -# These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -Can /sync newly created room -POST /createRoom ignores attempts to set the room version via creation_content \ No newline at end of file diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist deleted file mode 100644 index 1c969dba..00000000 --- a/tests/sytest/sytest-whitelist +++ /dev/null @@ -1,516 +0,0 @@ -/event/ does not allow access to events before the user joined -/event/ on joined room works -/event/ on non world readable room does not work -/joined_members return joined members -/joined_rooms returns only joined rooms -/whois -3pid invite join valid signature but revoked keys are rejected -3pid invite join valid signature but unreachable ID server are rejected -3pid invite join with wrong but valid signature are rejected -A change to displayname should appear in incremental /sync -A full_state incremental update returns all state -A full_state incremental update returns only recent timeline -A message sent after an initial sync appears in the timeline of an incremental sync. -A next_batch token can be used in the v1 messages API -A pair of events which redact each other should be ignored -A pair of servers can establish a join in a v2 room -A prev_batch token can be used in the v1 messages API -AS can create a user -AS can create a user with an underscore -AS can create a user with inhibit_login -AS can set avatar for ghosted users -AS can set displayname for ghosted users -AS can't set displayname for random users -AS cannot create users outside its own namespace -AS user (not ghost) can join room without registering -AS user (not ghost) can join room without registering, with user_id query param -After changing password, a different session no longer works by default -After changing password, can log in with new password -After changing password, can't log in with old password -After changing password, different sessions can optionally be kept -After changing password, existing session still works -After deactivating account, can't log in with an email -After deactivating account, can't log in with password -Alias creators can delete alias with no ops -Alias creators can delete canonical alias with no ops -Alternative server names do not cause a routing loop -An event which redacts an event in a different room should be ignored -An event which redacts itself should be ignored -Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -Backfill checks the events requested belong to the room -Backfill works correctly with history visibility set to joined -Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -Banned servers cannot /event_auth -Banned servers cannot /invite -Banned servers cannot /make_join -Banned servers cannot /make_leave -Banned servers cannot /send_join -Banned servers cannot /send_leave -Banned servers cannot backfill -Banned servers cannot get missing events -Banned servers cannot get room state -Banned servers cannot get room state ids -Banned servers cannot send events -Banned user is kicked and may not rejoin until unbanned -Both GET and PUT work -Can /sync newly created room -Can add account data -Can add account data to room -Can add tag -Can claim one time key using POST -Can claim remote one time key using POST -Can create filter -Can deactivate account -Can delete canonical alias -Can download file 'ascii' -Can download file 'name with spaces' -Can download file 'name;with;semicolons' -Can download filter -Can download specifying a different ASCII file name -Can download specifying a different Unicode file name -Can download with Unicode file name locally -Can download with Unicode file name over federation -Can download without a file name locally -Can download without a file name over federation -Can forget room you've been kicked from -Can get 'm.room.name' state for a departed room (SPEC-216) -Can get account data without syncing -Can get remote public room list -Can get room account data without syncing -Can get rooms/{roomId}/members -Can get rooms/{roomId}/members for a departed room (SPEC-216) -Can get rooms/{roomId}/state for a departed room (SPEC-216) -Can invite users to invite-only rooms -Can list tags for a room -Can logout all devices -Can logout current device -Can paginate public room list -Can pass a JSON filter as a query parameter -Can query device keys using POST -Can query remote device keys using POST -Can query specific device keys using POST -Can re-join room if re-invited -Can read configuration endpoint -Can receive redactions from regular users over federation in room version 1 -Can receive redactions from regular users over federation in room version 2 -Can receive redactions from regular users over federation in room version 3 -Can receive redactions from regular users over federation in room version 4 -Can receive redactions from regular users over federation in room version 5 -Can receive redactions from regular users over federation in room version 6 -Can recv a device message using /sync -Can recv a device message using /sync -Can recv device messages over federation -Can recv device messages until they are acknowledged -Can recv device messages until they are acknowledged -Can reject invites over federation for rooms with version 1 -Can reject invites over federation for rooms with version 2 -Can reject invites over federation for rooms with version 3 -Can reject invites over federation for rooms with version 4 -Can reject invites over federation for rooms with version 5 -Can reject invites over federation for rooms with version 6 -Can remove tag -Can search public room list -Can send a message directly to a device using PUT /sendToDevice -Can send a message directly to a device using PUT /sendToDevice -Can send a to-device message to two users which both receive it using /sync -Can send image in room message -Can send messages with a wildcard device id -Can send messages with a wildcard device id -Can send messages with a wildcard device id to two devices -Can send messages with a wildcard device id to two devices -Can sync -Can sync a joined room -Can sync a room with a message with a transaction id -Can sync a room with a single message -Can upload device keys -Can upload with ASCII file name -Can upload with Unicode file name -Can upload without a file name -Can't deactivate account with wrong password -Can't forget room you're still in -Changes to state are included in an gapped incremental sync -Changes to state are included in an incremental sync -Changing the actions of an unknown default rule fails with 404 -Changing the actions of an unknown rule fails with 404 -Checking local federation server -Creators can delete alias -Current state appears in timeline in private history -Current state appears in timeline in private history with many messages before -DELETE /device/{deviceId} -DELETE /device/{deviceId} requires UI auth user to match device owner -DELETE /device/{deviceId} with no body gives a 401 -Deleted tags appear in an incremental v2 /sync -Deleting a non-existent alias should return a 404 -Device list doesn't change if remote server is down -Device messages over federation wake up /sync -Device messages wake up /sync -Device messages wake up /sync -Device messages with the same txn_id are deduplicated -Device messages with the same txn_id are deduplicated -Enabling an unknown default rule fails with 404 -Event size limits -Event with an invalid signature in the send_join response should not cause room join to fail -Events come down the correct room -Events whose auth_events are in the wrong room do not mess up the room state -Existing members see new members' join events -Federation key API allows unsigned requests for keys -Federation key API can act as a notary server via a GET request -Federation key API can act as a notary server via a POST request -Federation rejects inbound events where the prev_events cannot be found -Fetching eventstream a second time doesn't yield the message again -Forgetting room does not show up in v2 /sync -Full state sync includes joined rooms -GET /capabilities is present and well formed for registered user -GET /device/{deviceId} -GET /device/{deviceId} gives a 404 for unknown devices -GET /devices -GET /directory/room/:room_alias yields room ID -GET /events initially -GET /events with negative 'limit' -GET /events with non-numeric 'limit' -GET /events with non-numeric 'timeout' -GET /initialSync initially -GET /joined_rooms lists newly-created room -GET /login yields a set of flows -GET /media/r0/download can fetch the value again -GET /profile/:user_id/avatar_url publicly accessible -GET /profile/:user_id/displayname publicly accessible -GET /publicRooms includes avatar URLs -GET /publicRooms lists newly-created room -GET /publicRooms lists rooms -GET /r0/capabilities is not public -GET /register yields a set of flows -GET /rooms/:room_id/joined_members fetches my membership -GET /rooms/:room_id/messages returns a message -GET /rooms/:room_id/state fetches entire room state -GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -GET /rooms/:room_id/state/m.room.name gets name -GET /rooms/:room_id/state/m.room.power_levels can fetch levels -GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -GET /rooms/:room_id/state/m.room.topic gets topic -Get left notifs for other users in sync and /keys/changes when user leaves -Getting messages going forward is limited for a departed room (SPEC-216) -Getting push rules doesn't corrupt the cache SYN-390 -Getting state IDs checks the events requested belong to the room -Getting state checks the events requested belong to the room -Ghost user must register before joining room -Guest non-joined user cannot call /events on default room -Guest non-joined user cannot call /events on invited room -Guest non-joined user cannot call /events on joined room -Guest non-joined user cannot call /events on shared room -Guest non-joined users can get individual state for world_readable rooms -Guest non-joined users can get individual state for world_readable rooms after leaving -Guest non-joined users can get state for world_readable rooms -Guest non-joined users cannot room initalSync for non-world_readable rooms -Guest non-joined users cannot send messages to guest_access rooms if not joined -Guest user can set display names -Guest user cannot call /events globally -Guest user cannot upgrade other users -Guest users can accept invites to private rooms over federation -Guest users can join guest_access rooms -Guest users can send messages to guest_access rooms if joined -If a device list update goes missing, the server resyncs on the next one -If remote user leaves room we no longer receive device updates -If remote user leaves room, changes device and rejoins we see update in /keys/changes -If remote user leaves room, changes device and rejoins we see update in sync -Inbound /make_join rejects attempts to join rooms where all users have left -Inbound /v1/make_join rejects remote attempts to join local users to rooms -Inbound /v1/send_join rejects incorrectly-signed joins -Inbound /v1/send_join rejects joins from other servers -Inbound /v1/send_leave rejects leaves from other servers -Inbound federation accepts a second soft-failed event -Inbound federation accepts attempts to join v2 rooms from servers with support -Inbound federation can backfill events -Inbound federation can get public room list -Inbound federation can get state for a room -Inbound federation can get state_ids for a room -Inbound federation can query profile data -Inbound federation can query room alias directory -Inbound federation can receive events -Inbound federation can receive invites via v1 API -Inbound federation can receive invites via v2 API -Inbound federation can receive redacted events -Inbound federation can receive v1 /send_join -Inbound federation can receive v2 /send_join -Inbound federation can return events -Inbound federation can return missing events for invite visibility -Inbound federation can return missing events for world_readable visibility -Inbound federation correctly soft fails events -Inbound federation of state requires event_id as a mandatory paramater -Inbound federation of state_ids requires event_id as a mandatory paramater -Inbound federation rejects attempts to join v1 rooms from servers without v1 support -Inbound federation rejects attempts to join v2 rooms from servers lacking version support -Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -Inbound federation rejects invite rejections which include invalid JSON for room version 6 -Inbound federation rejects invites which include invalid JSON for room version 6 -Inbound federation rejects receipts from wrong remote -Inbound federation rejects remote attempts to join local users to rooms -Inbound federation rejects remote attempts to kick local users to rooms -Inbound federation rejects typing notifications from wrong remote -Inbound: send_join rejects invalid JSON for room version 6 -Invalid JSON floats -Invalid JSON integers -Invalid JSON special values -Invited user can reject invite -Invited user can reject invite over federation -Invited user can reject invite over federation for empty room -Invited user can reject invite over federation several times -Invited user can see room metadata -Inviting an AS-hosted user asks the AS server -Lazy loading parameters in the filter are strictly boolean -Left rooms appear in the leave section of full state sync -Local delete device changes appear in v2 /sync -Local device key changes appear in /keys/changes -Local device key changes appear in v2 /sync -Local device key changes get to remote servers -Local new device changes appear in v2 /sync -Local non-members don't see posted message events -Local room members can get room messages -Local room members see posted message events -Local update device changes appear in v2 /sync -Local users can peek by room alias -Local users can peek into world_readable rooms by room ID -Message history can be paginated -Message history can be paginated over federation -Name/topic keys are correct -New account data appears in incremental v2 /sync -New read receipts appear in incremental v2 /sync -New room members see their own join event -New users appear in /keys/changes -Newly banned rooms appear in the leave section of incremental sync -Newly joined room is included in an incremental sync -Newly joined room is included in an incremental sync after invite -Newly left rooms appear in the leave section of gapped sync -Newly left rooms appear in the leave section of incremental sync -Newly updated tags appear in an incremental v2 /sync -Non-numeric ports in server names are rejected -Outbound federation can backfill events -Outbound federation can query profile data -Outbound federation can query room alias directory -Outbound federation can query v1 /send_join -Outbound federation can query v2 /send_join -Outbound federation can request missing events -Outbound federation can send events -Outbound federation can send invites via v1 API -Outbound federation can send invites via v2 API -Outbound federation can send room-join requests -Outbound federation correctly handles unsupported room versions -Outbound federation passes make_join failures through to the client -Outbound federation rejects backfill containing invalid JSON for events in room version 6 -Outbound federation rejects m.room.create events with an unknown room version -Outbound federation rejects send_join responses with no m.room.create event -Outbound federation sends receipts -Outbound federation will ignore a missing event with bad JSON for room version 6 -POST /createRoom creates a room with the given version -POST /createRoom ignores attempts to set the room version via creation_content -POST /createRoom makes a private room -POST /createRoom makes a private room with invites -POST /createRoom makes a public room -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -POST /createRoom rejects attempts to create rooms with numeric versions -POST /createRoom rejects attempts to create rooms with unknown versions -POST /createRoom with creation content -POST /join/:room_alias can join a room -POST /join/:room_alias can join a room with custom content -POST /join/:room_id can join a room -POST /join/:room_id can join a room with custom content -POST /login as non-existing user is rejected -POST /login can log in as a user -POST /login can log in as a user with just the local part of the id -POST /login returns the same device_id as that in the request -POST /login wrong password is rejected -POST /media/r0/upload can create an upload -POST /redact disallows redaction of event in different room -POST /register allows registration of usernames with '-' -POST /register allows registration of usernames with '.' -POST /register allows registration of usernames with '/' -POST /register allows registration of usernames with '3' -POST /register allows registration of usernames with '=' -POST /register allows registration of usernames with '_' -POST /register allows registration of usernames with 'q' -POST /register can create a user -POST /register downcases capitals in usernames -POST /register rejects registration of usernames with '!' -POST /register rejects registration of usernames with '"' -POST /register rejects registration of usernames with ''' -POST /register rejects registration of usernames with ':' -POST /register rejects registration of usernames with '?' -POST /register rejects registration of usernames with '@' -POST /register rejects registration of usernames with '[' -POST /register rejects registration of usernames with '\' -POST /register rejects registration of usernames with '\n' -POST /register rejects registration of usernames with ']' -POST /register rejects registration of usernames with '{' -POST /register rejects registration of usernames with '|' -POST /register rejects registration of usernames with '}' -POST /register rejects registration of usernames with '£' -POST /register rejects registration of usernames with 'é' -POST /register returns the same device_id as that in the request -POST /rooms/:room_id/ban can ban a user -POST /rooms/:room_id/invite can send an invite -POST /rooms/:room_id/join can join a room -POST /rooms/:room_id/leave can leave a room -POST /rooms/:room_id/read_markers can create read marker -POST /rooms/:room_id/receipt can create receipts -POST /rooms/:room_id/redact/:event_id as original message sender redacts message -POST /rooms/:room_id/redact/:event_id as power user redacts message -POST /rooms/:room_id/redact/:event_id as random user does not redact message -POST /rooms/:room_id/send/:event_type sends a message -POST /rooms/:room_id/state/m.room.name sets name -POST /rooms/:room_id/state/m.room.topic sets topic -POST /rooms/:room_id/upgrade can upgrade a room version -POST rejects invalid utf-8 in JSON -POSTed media can be thumbnailed -PUT /device/{deviceId} gives a 404 for unknown devices -PUT /device/{deviceId} updates device fields -PUT /directory/room/:room_alias creates alias -PUT /profile/:user_id/avatar_url sets my avatar -PUT /profile/:user_id/displayname sets my name -PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -PUT /rooms/:room_id/state/m.room.power_levels can set levels -PUT /rooms/:room_id/typing/:user_id sets typing notification -PUT power_levels should not explode if the old power levels were empty -Peeked rooms only turn up in the sync for the device who peeked them -Previously left rooms don't appear in the leave section of sync -Push rules come down in an initial /sync -Read markers appear in incremental v2 /sync -Read markers appear in initial v2 /sync -Read markers can be updated -Read receipts appear in initial v2 /sync -Real non-joined user cannot call /events on default room -Real non-joined user cannot call /events on invited room -Real non-joined user cannot call /events on joined room -Real non-joined user cannot call /events on shared room -Real non-joined users can get individual state for world_readable rooms -Real non-joined users can get individual state for world_readable rooms after leaving -Real non-joined users can get state for world_readable rooms -Real non-joined users cannot room initalSync for non-world_readable rooms -Real non-joined users cannot send messages to guest_access rooms if not joined -Receipts must be m.read -Redaction of a redaction redacts the redaction reason -Regular users can add and delete aliases in the default room configuration -Regular users can add and delete aliases when m.room.aliases is restricted -Regular users cannot create room aliases within the AS namespace -Regular users cannot register within the AS namespace -Remote media can be thumbnailed -Remote room alias queries can handle Unicode -Remote room members also see posted message events -Remote room members can get room messages -Remote user can backfill in a room with version 1 -Remote user can backfill in a room with version 2 -Remote user can backfill in a room with version 3 -Remote user can backfill in a room with version 4 -Remote user can backfill in a room with version 5 -Remote user can backfill in a room with version 6 -Remote users can join room by alias -Remote users may not join unfederated rooms -Request to logout with invalid an access token is rejected -Request to logout without an access token is rejected -Room aliases can contain Unicode -Room creation reports m.room.create to myself -Room creation reports m.room.member to myself -Room members can join a room with an overridden displayname -Room members can override their displayname on a room-specific basis -Room state at a rejected message event is the same as its predecessor -Room state at a rejected state event is the same as its predecessor -Rooms a user is invited to appear in an incremental sync -Rooms a user is invited to appear in an initial sync -Rooms can be created with an initial invite list (SYN-205) -Server correctly handles incoming m.device_list_update -Server correctly handles transactions that break edu limits -Server correctly resyncs when client query keys and there is no remote cache -Server correctly resyncs when server leaves and rejoins a room -Server rejects invalid JSON in a version 6 room -Setting room topic reports m.room.topic to myself -Should not be able to take over the room by pretending there is no PL event -Should reject keys claiming to belong to a different user -State from remote users is included in the state in the initial sync -State from remote users is included in the timeline in an incremental sync -State is included in the timeline in the initial sync -Sync can be polled for updates -Sync is woken up for leaves -Syncing a new room with a large timeline limit isn't limited -Tags appear in an initial v2 /sync -Trying to get push rules with unknown rule_id fails with 404 -Typing can be explicitly stopped -Typing events appear in gapped sync -Typing events appear in incremental sync -Typing events appear in initial sync -Typing notification sent to local room members -Typing notifications also sent to remote room members -Typing notifications don't leak -Uninvited users cannot join the room -Unprivileged users can set m.room.topic if it only needs level 0 -User appears in user directory -User in private room doesn't appear in user directory -User joining then leaving public room appears and dissappears from directory -User in shared private room does appear in user directory until leave -User can create and send/receive messages in a room with version 1 -User can create and send/receive messages in a room with version 2 -User can create and send/receive messages in a room with version 3 -User can create and send/receive messages in a room with version 4 -User can create and send/receive messages in a room with version 5 -User can create and send/receive messages in a room with version 6 -User can invite local user to room with version 1 -User can invite local user to room with version 2 -User can invite local user to room with version 3 -User can invite local user to room with version 4 -User can invite local user to room with version 5 -User can invite local user to room with version 6 -User can invite remote user to room with version 1 -User can invite remote user to room with version 2 -User can invite remote user to room with version 3 -User can invite remote user to room with version 4 -User can invite remote user to room with version 5 -User can invite remote user to room with version 6 -User directory correctly update on display name change -User in dir while user still shares private rooms -User in shared private room does appear in user directory -User is offline if they set_presence=offline in their sync -User signups are forbidden from starting with '_' -Users can't delete other's aliases -Users cannot invite a user that is already in the room -Users cannot invite themselves to a room -Users cannot kick users from a room they are not in -Users cannot kick users who have already left a room -Users cannot set ban powerlevel higher than their own -Users cannot set kick powerlevel higher than their own -Users cannot set notifications powerlevel higher than their own -Users cannot set redact powerlevel higher than their own -Users receive device_list updates for their own devices -Users with sufficient power-level can delete other's aliases -Version responds 200 OK with valid structure -We can't peek into rooms with invited history_visibility -We can't peek into rooms with joined history_visibility -We can't peek into rooms with shared history_visibility -We don't send redundant membership state across incremental syncs by default -We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -We should see our own leave event, even if history_visibility is restricted (SYN-662) -Wildcard device messages over federation wake up /sync -Wildcard device messages wake up /sync -Wildcard device messages wake up /sync -avatar_url updates affect room member events -displayname updates affect room member events -local user can join room with version 1 -local user can join room with version 2 -local user can join room with version 3 -local user can join room with version 4 -local user can join room with version 5 -local user can join room with version 6 -m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -m.room.history_visibility == "joined" allows/forbids appropriately for Real users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -query for user with no keys returns empty key dict -remote user can join room with version 1 -remote user can join room with version 2 -remote user can join room with version 3 -remote user can join room with version 4 -remote user can join room with version 5 -remote user can join room with version 6 -setting 'm.room.name' respects room powerlevel -setting 'm.room.power_levels' respects room powerlevel -Federation publicRoom Name/topic keys are correct diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 11339049..97c2e1b1 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -1,5 +1,26 @@ {"Action":"pass","Test":"TestACLs"} +{"Action":"pass","Test":"TestAddAccountData"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_global_account_data"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_room_account_data"} +{"Action":"fail","Test":"TestArchivedRoomsHistory"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty"} +{"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} +{"Action":"fail","Test":"TestAsyncUpload"} +{"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} +{"Action":"fail","Test":"TestAsyncUpload/Create_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media_over__matrix/client/v1/media/download"} +{"Action":"fail","Test":"TestAsyncUpload/Not_yet_uploaded"} +{"Action":"fail","Test":"TestAsyncUpload/Upload_media"} +{"Action":"pass","Test":"TestAvatarUrlUpdate"} {"Action":"pass","Test":"TestBannedUserCannotSendJoin"} +{"Action":"skip","Test":"TestCanRegisterAdmin"} +{"Action":"pass","Test":"TestCannotKickLeftUser"} +{"Action":"fail","Test":"TestCannotKickNonPresentUser"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} @@ -42,16 +63,91 @@ {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/knock_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/regular_event"} +{"Action":"pass","Test":"TestChangePassword"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_a_different_session_no_longer_works_by_default"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can't_log_in_with_old_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} +{"Action":"pass","Test":"TestChangePasswordPushers"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} +{"Action":"fail","Test":"TestClientSpacesSummary"} +{"Action":"pass","Test":"TestClientSpacesSummary/max_depth"} +{"Action":"fail","Test":"TestClientSpacesSummary/pagination"} +{"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} +{"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} +{"Action":"fail","Test":"TestClientSpacesSummary/suggested_only"} +{"Action":"fail","Test":"TestClientSpacesSummaryJoinRules"} +{"Action":"pass","Test":"TestContent"} +{"Action":"pass","Test":"TestContentCSAPIMediaV1"} {"Action":"pass","Test":"TestContentMediaV1"} +{"Action":"pass","Test":"TestCumulativeJoinLeaveJoinSync"} +{"Action":"pass","Test":"TestDeactivateAccount"} +{"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can_deactivate_account"} +{"Action":"pass","Test":"TestDeactivateAccount/Password_flow_is_available"} +{"Action":"fail","Test":"TestDelayedEvents"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_with_an_invalid_action"} +{"Action":"pass","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_delay_ID"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_request_body"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_an_action"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_events_are_empty_on_startup"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_message_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_another_user"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_the_same_user"} +{"Action":"skip","Test":"TestDelayedEvents/delayed_state_events_are_kept_on_server_restart"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_cancelled"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_restarted"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_sent_on_request"} +{"Action":"pass","Test":"TestDelayedEvents/parallel"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_cancel_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_restart_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_send_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings/Deleting_a_user's_device_should_delete_any_local_notification_settings_entries_from_their_account_data"} +{"Action":"pass","Test":"TestDemotingUsersViaUsersDefault"} +{"Action":"fail","Test":"TestDeviceListUpdates"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_rejoins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_rejoins_a_room"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/good_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/stopped_server"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} +{"Action":"fail","Test":"TestDeviceManagement"} +{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} +{"Action":"pass","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} +{"Action":"pass","Test":"TestDisplayNameUpdate"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestEvent"} +{"Action":"pass","Test":"TestEvent/Parallel"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_State_Event"} {"Action":"pass","Test":"TestEventAuth"} {"Action":"pass","Test":"TestEventAuth/returns_auth_events_for_the_requested_event"} {"Action":"pass","Test":"TestEventAuth/returns_the_auth_chain_for_the_requested_event"} -{"Action":"pass","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestEventRelationships"} +{"Action":"fail","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestFederatedEventRelationships"} {"Action":"fail","Test":"TestFederationKeyUploadQuery"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} @@ -63,9 +159,28 @@ {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_reject_invite_when_homeserver_is_already_participating_in_the_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata"} {"Action":"pass","Test":"TestFederationThumbnail"} +{"Action":"pass","Test":"TestFetchEvent"} +{"Action":"fail","Test":"TestFetchEventNonWorldReadable"} +{"Action":"pass","Test":"TestFetchEventWorldReadable"} +{"Action":"fail","Test":"TestFetchHistoricalInvitedEventFromBeforeInvite"} +{"Action":"pass","Test":"TestFetchHistoricalInvitedEventFromBetweenInvite"} +{"Action":"fail","Test":"TestFetchHistoricalJoinedEventDenied"} +{"Action":"pass","Test":"TestFetchHistoricalSharedEvent"} +{"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} +{"Action":"pass","Test":"TestFilter"} +{"Action":"fail","Test":"TestFilterMessagesByRelType"} +{"Action":"pass","Test":"TestGappedSyncLeaveSection"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/join"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/leave"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/not_membership"} {"Action":"fail","Test":"TestGetMissingEventsGapFilling"} +{"Action":"pass","Test":"TestGetRoomMembers"} +{"Action":"fail","Test":"TestGetRoomMembersAtPoint"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility"} @@ -76,15 +191,41 @@ {"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} {"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} +{"Action":"pass","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} {"Action":"pass","Test":"TestIsDirectFlagFederation"} {"Action":"pass","Test":"TestIsDirectFlagLocal"} {"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser/join_remote_federated_room_as_application_service_user"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinViaRoomIDAndServerName"} +{"Action":"fail","Test":"TestJson"} +{"Action":"fail","Test":"TestJson/Parallel"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_JSON_special_values"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_numerical_values"} +{"Action":"fail","Test":"TestJumpToDateEndpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/can_paginate_after_getting_remote_event_from_timestamp_to_event_endpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_backwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_forwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/when_looking_backwards_before_the_room_was_created,_should_be_able_to_find_event_that_was_imported"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_after_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_before_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_after_given_timestmap_when_all_message_timestamps_are_the_same"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_before_given_timestamp_when_all_message_timestamps_are_the_same"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_after_the_latest_timestmap"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_before_the_earliest_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_private_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_public_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestKeyChangesLocal"} +{"Action":"fail","Test":"TestKeyChangesLocal/New_login_should_create_a_device_lists.changed_entry"} +{"Action":"fail","Test":"TestKeyClaimOrdering"} +{"Action":"pass","Test":"TestKeysQueryWithDeviceIDAsObjectFails"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectory"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room"} {"Action":"fail","Test":"TestKnocking"} @@ -139,9 +280,35 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestLeakyTyping"} +{"Action":"pass","Test":"TestLeaveEventInviteRejection"} +{"Action":"fail","Test":"TestLeaveEventVisibility"} +{"Action":"fail","Test":"TestLeftRoomFixture"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/members_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/messages_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/state_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Getting_messages_going_forward_is_limited_for_a_departed_room"} {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} +{"Action":"pass","Test":"TestLogin"} +{"Action":"pass","Test":"TestLogin/parallel"} +{"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} +{"Action":"pass","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_wrong_password_is_rejected"} +{"Action":"pass","Test":"TestLogout"} +{"Action":"pass","Test":"TestLogout/Can_logout_all_devices"} +{"Action":"pass","Test":"TestLogout/Can_logout_current_device"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_with_invalid_an_access_token_is_rejected"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_without_an_access_token_is_rejected"} +{"Action":"fail","Test":"TestMSC3757OwnedState"} +{"Action":"pass","Test":"TestMSC3967"} +{"Action":"pass","Test":"TestMediaConfig"} {"Action":"pass","Test":"TestMediaFilenames"} {"Action":"pass","Test":"TestMediaFilenames/Parallel"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII"} @@ -178,11 +345,74 @@ {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_locally"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_over_federation"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_upload_without_a_file_name"} +{"Action":"fail","Test":"TestMembersLocal"} +{"Action":"fail","Test":"TestMembersLocal/Parallel"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_join_events"} +{"Action":"fail","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_incremental_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_initial_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/New_room_members_see_their_own_join_event"} +{"Action":"fail","Test":"TestMembershipOnEvents"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} +{"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} +{"Action":"pass","Test":"TestOlderLeftRoomsNotInLeaveSection"} +{"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} +{"Action":"fail","Test":"TestPollsLocalPushRules"} +{"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} +{"Action":"pass","Test":"TestPowerLevels"} +{"Action":"pass","Test":"TestPowerLevels/GET_/rooms/:room_id/state/m.room.power_levels_can_fetch_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_/rooms/:room_id/state/m.room.power_levels_can_set_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_power_levels_should_not_explode_if_the_old_power_levels_were_empty"} +{"Action":"fail","Test":"TestPresence"} +{"Action":"fail","Test":"TestPresence/GET_/presence/:user_id/status_fetches_initial_status"} +{"Action":"pass","Test":"TestPresence/PUT_/presence/:user_id/status_updates_my_presence"} +{"Action":"pass","Test":"TestPresence/Presence_can_be_set_from_sync"} +{"Action":"pass","Test":"TestPresence/Presence_changes_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresence/Presence_changes_to_UNAVAILABLE_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresenceSyncDifferentRooms"} +{"Action":"pass","Test":"TestProfileAvatarURL"} +{"Action":"pass","Test":"TestProfileAvatarURL/GET_/profile/:user_id/avatar_url_publicly_accessible"} +{"Action":"pass","Test":"TestProfileAvatarURL/PUT_/profile/:user_id/avatar_url_sets_my_avatar"} +{"Action":"pass","Test":"TestProfileDisplayName"} +{"Action":"pass","Test":"TestProfileDisplayName/GET_/profile/:user_id/displayname_publicly_accessible"} +{"Action":"pass","Test":"TestProfileDisplayName/PUT_/profile/:user_id/displayname_sets_my_name"} +{"Action":"pass","Test":"TestPushRuleCacheHealth"} +{"Action":"pass","Test":"TestPushSync"} +{"Action":"pass","Test":"TestPushSync/Adding_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Disabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Enabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Push_rules_come_down_in_an_initial_/sync"} +{"Action":"pass","Test":"TestPushSync/Setting_actions_for_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestRegistration"} +{"Action":"pass","Test":"TestRegistration/parallel"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_INVALID_USERNAME_for_invalid_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_USER_IN_USE_for_registered_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_available_for_unregistered_user_name"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_admin_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_disallows_symbols"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_downcases_capitals"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/-"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/."} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_//"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/3"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/="} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/q"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_can_create_a_user"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_downcases_capitals_in_usernames"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_if_user_already_exists"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_usernames_with_special_characters"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_{}_returns_a_set_of_flows"} +{"Action":"pass","Test":"TestRegistration/parallel/Registration_accepts_non-ascii_passwords"} +{"Action":"pass","Test":"TestRelations"} +{"Action":"fail","Test":"TestRelationsPagination"} +{"Action":"pass","Test":"TestRelationsPaginationSync"} {"Action":"pass","Test":"TestRemoteAliasRequestsUnderstandUnicode"} {"Action":"pass","Test":"TestRemotePngThumbnail"} {"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} @@ -191,6 +421,13 @@ {"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"} {"Action":"pass","Test":"TestRemoteTyping"} +{"Action":"fail","Test":"TestRemovingAccountData"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRequestEncodingFails"} +{"Action":"fail","Test":"TestRequestEncodingFails/POST_rejects_invalid_utf-8_in_JSON"} {"Action":"fail","Test":"TestRestrictedRoomsLocalJoin"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"} @@ -221,12 +458,166 @@ {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsSpacesSummaryFederation"} {"Action":"fail","Test":"TestRestrictedRoomsSpacesSummaryLocal"} +{"Action":"pass","Test":"TestRoomAlias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/GET_/rooms/:room_id/aliases_lists_aliases"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Only_room_members_can_list_aliases_of_a_room"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/PUT_/directory/room/:room_alias_creates_alias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Room_aliases_can_contain_Unicode"} +{"Action":"fail","Test":"TestRoomCanonicalAlias"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} +{"Action":"pass","Test":"TestRoomCreate"} +{"Action":"pass","Test":"TestRoomCreate/Parallel"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Can_/sync_newly_created_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_creates_a_room_with_the_given_version"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_ignores_attempts_to_set_the_room_version_via_creation_content"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room_with_invites"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_public_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_name"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_topic"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_numeric_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_unknown_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Rooms_can_be_created_with_an_initial_invite_list_(SYN-205)"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} +{"Action":"pass","Test":"TestRoomDeleteAlias"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} +{"Action":"fail","Test":"TestRoomForget"} +{"Action":"fail","Test":"TestRoomForget/Parallel"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_we_weren't_an_actual_member"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_you've_been_kicked_from"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_re-join_room_if_re-invited"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Forgetting_room_does_not_show_up_in_v2_initial_/sync"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Forgotten_room_messages_cannot_be_paginated"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Leave_for_forgotten_room_shows_up_in_v2_incremental_/sync"} +{"Action":"pass","Test":"TestRoomImageRoundtrip"} +{"Action":"fail","Test":"TestRoomMembers"} +{"Action":"fail","Test":"TestRoomMembers/Parallel"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/ban_can_ban_a_user"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/invite_can_send_an_invite"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/join_can_join_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/leave_can_leave_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoading"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} +{"Action":"pass","Test":"TestRoomReadMarkers"} +{"Action":"pass","Test":"TestRoomReceipts"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"fail","Test":"TestRoomState"} +{"Action":"fail","Test":"TestRoomState/Parallel"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/directory/room/:room_alias_yields_room_ID"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/joined_rooms_lists_newly-created_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/publicRooms_lists_newly-created_room"} +{"Action":"fail","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_is_forbidden_after_leaving_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id?format=event_fetches_my_membership_event"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.name_gets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.power_levels_fetches_powerlevels"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.topic_gets_topic"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state_fetches_entire_room_state"} +{"Action":"pass","Test":"TestRoomState/Parallel/POST_/rooms/:room_id/state/m.room.name_sets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} +{"Action":"pass","Test":"TestRoomSummary"} +{"Action":"pass","Test":"TestRoomsInvite"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_a_user_that_is_already_in_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_themselves_to_a_room"} +{"Action":"fail","Test":"TestSearch"} +{"Action":"fail","Test":"TestSearch/parallel"} +{"Action":"fail","Test":"TestSearch/parallel/Can_back-paginate_search_results"} +{"Action":"fail","Test":"TestSearch/parallel/Can_get_context_around_search_results"} +{"Action":"pass","Test":"TestSearch/parallel/Can_search_for_an_event_by_body"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_rank_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_recent_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_works_across_an_upgraded_room_and_its_predecessor"} +{"Action":"fail","Test":"TestSendAndFetchMessage"} {"Action":"skip","Test":"TestSendJoinPartialStateResponse"} +{"Action":"pass","Test":"TestSendMessageWithTxn"} +{"Action":"pass","Test":"TestServerCapabilities"} +{"Action":"skip","Test":"TestServerNotices"} +{"Action":"fail","Test":"TestSync"} +{"Action":"fail","Test":"TestSync/parallel"} +{"Action":"pass","Test":"TestSync/parallel/Can_sync_a_joined_room"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking/User_is_correctly_listed_when_they_leave,_even_when_lazy_loading_is_enabled"} +{"Action":"pass","Test":"TestSync/parallel/Full_state_sync_includes_joined_rooms"} +{"Action":"fail","Test":"TestSync/parallel/Get_presence_for_newly_joined_members_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} +{"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} +{"Action":"pass","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} +{"Action":"pass","Test":"TestSyncFilter"} +{"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} +{"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} +{"Action":"pass","Test":"TestSyncLeaveSection"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} {"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} +{"Action":"pass","Test":"TestSyncTimelineGap"} +{"Action":"pass","Test":"TestSyncTimelineGap/full"} +{"Action":"pass","Test":"TestSyncTimelineGap/incremental"} +{"Action":"pass","Test":"TestTentativeEventualJoiningAfterRejecting"} +{"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} +{"Action":"fail","Test":"TestThreadedReceipts"} +{"Action":"fail","Test":"TestThreadsEndpoint"} +{"Action":"pass","Test":"TestToDeviceMessages"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/good_connectivity"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation/stopped_server"} +{"Action":"fail","Test":"TestTxnIdWithRefreshToken"} +{"Action":"fail","Test":"TestTxnIdempotency"} +{"Action":"pass","Test":"TestTxnIdempotencyScopedToDevice"} +{"Action":"pass","Test":"TestTxnInEvent"} +{"Action":"pass","Test":"TestTxnScopeOnLocalEcho"} +{"Action":"pass","Test":"TestTyping"} +{"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} +{"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} @@ -234,5 +625,27 @@ {"Action":"pass","Test":"TestUnknownEndpoints/Server-server_endpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Unknown_prefix"} {"Action":"fail","Test":"TestUnrejectRejectedEvents"} +{"Action":"fail","Test":"TestUploadKey"} +{"Action":"fail","Test":"TestUploadKey/Parallel"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Can_claim_one_time_key_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} +{"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} +{"Action":"pass","Test":"TestUploadKeyIdempotency"} +{"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} +{"Action":"fail","Test":"TestUrlPreview"} {"Action":"pass","Test":"TestUserAppearsInChangedDeviceListOnJoinOverFederation"} +{"Action":"pass","Test":"TestVersionStructure"} +{"Action":"pass","Test":"TestVersionStructure/Version_responds_200_OK_with_valid_structure"} +{"Action":"pass","Test":"TestWithoutOwnedState"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_a_non-member_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_malformed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_their_own_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/user_can_set_state_with_their_own_user_ID_as_state_key"} {"Action":"pass","Test":"TestWriteMDirectAccountData"}