diff --git a/.dockerignore b/.dockerignore index 453634df..5054844f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -11,10 +11,11 @@ docker/ *.iml # Git folder -.git +# .git .gitea .gitlab .github +.forgejo # Dot files .env diff --git a/.editorconfig b/.editorconfig index 2d7438a4..91f073bd 100644 --- a/.editorconfig +++ b/.editorconfig @@ -22,3 +22,7 @@ indent_size = 2 [*.rs] indent_style = tab max_line_length = 98 + +[{.forgejo/**/*.yml,.github/**/*.yml}] +indent_size = 2 +indent_style = space diff --git a/.forgejo/actions/rust-toolchain/action.yml b/.forgejo/actions/rust-toolchain/action.yml new file mode 100644 index 00000000..71fb96f5 --- /dev/null +++ b/.forgejo/actions/rust-toolchain/action.yml @@ -0,0 +1,53 @@ +name: rust-toolchain +description: | + Install a Rust toolchain using rustup. + See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification + for more information about toolchains. +inputs: + toolchain: + description: | + Rust toolchain name. + See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification + required: false + target: + description: Target triple to install for this toolchain + required: false + components: + description: Space-separated list of components to be additionally installed for a new toolchain + required: false +outputs: + rustc_version: + description: The rustc version installed + value: ${{ steps.rustc-version.outputs.version }} + +runs: + using: composite + steps: + - name: Cache rustup toolchains + uses: actions/cache@v3 + with: + path: | + ~/.rustup + !~/.rustup/tmp + !~/.rustup/downloads + # Requires repo to be cloned if toolchain is not specified + key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }} + - name: Install Rust toolchain + shell: bash + run: | + if ! command -v rustup &> /dev/null ; then + curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y + echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH + fi + - shell: bash + run: | + set -x + ${{ inputs.toolchain && format('rustup override set {0}', inputs.toolchain) }} + ${{ inputs.target && format('rustup target add {0}', inputs.target) }} + ${{ inputs.components && format('rustup component add {0}', inputs.components) }} + cargo --version + rustc --version + - id: rustc-version + shell: bash + run: | + echo "version=$(rustc --version)" >> $GITHUB_OUTPUT diff --git a/.forgejo/actions/sccache/action.yml b/.forgejo/actions/sccache/action.yml new file mode 100644 index 00000000..b5e5dcf4 --- /dev/null +++ b/.forgejo/actions/sccache/action.yml @@ -0,0 +1,29 @@ +name: sccache +description: | + Install sccache for caching builds in GitHub Actions. + +inputs: + token: + description: 'A Github PAT' + required: false + +runs: + using: composite + steps: + - name: Install sccache + uses: https://github.com/mozilla-actions/sccache-action@v0.0.9 + with: + token: ${{ inputs.token }} + - name: Configure sccache + uses: https://github.com/actions/github-script@v7 + with: + script: | + core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || ''); + core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); + - shell: bash + run: | + echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV + echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV + echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV + echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV diff --git a/.forgejo/actions/timelord/action.yml b/.forgejo/actions/timelord/action.yml new file mode 100644 index 00000000..bb9766d5 --- /dev/null +++ b/.forgejo/actions/timelord/action.yml @@ -0,0 +1,46 @@ +name: timelord +description: | + Use timelord to set file timestamps +inputs: + key: + description: | + The key to use for caching the timelord data. + This should be unique to the repository and the runner. + required: true + default: timelord-v0 + path: + description: | + The path to the directory to be timestamped. + This should be the root of the repository. + required: true + default: . + +runs: + using: composite + steps: + - name: Cache timelord-cli installation + id: cache-timelord-bin + uses: actions/cache@v3 + with: + path: ~/.cargo/bin/timelord + key: timelord-cli-v3.0.1 + - name: Install timelord-cli + uses: https://github.com/cargo-bins/cargo-binstall@main + if: steps.cache-timelord-bin.outputs.cache-hit != 'true' + - run: cargo binstall timelord-cli@3.0.1 + shell: bash + if: steps.cache-timelord-bin.outputs.cache-hit != 'true' + + - name: Load timelord files + uses: actions/cache/restore@v3 + with: + path: /timelord/ + key: ${{ inputs.key }} + - name: Run timelord to set timestamps + shell: bash + run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/ + - name: Save timelord + uses: actions/cache/save@v3 + with: + path: /timelord/ + key: ${{ inputs.key }} diff --git a/.forgejo/workflows/build-alpine.yml b/.forgejo/workflows/build-alpine.yml new file mode 100644 index 00000000..b1757a60 --- /dev/null +++ b/.forgejo/workflows/build-alpine.yml @@ -0,0 +1,49 @@ +on: + - workflow-dispatch + - push + +jobs: + build: + runs-on: ubuntu-latest + container: + image: alpine:edge + + steps: + - name: set up dependencies + run: | + apk update + apk upgrade + apk add nodejs git alpine-sdk + - uses: actions/checkout@v4 + name: checkout the alpine dir + with: + sparse-checkout: "alpine/" + + # - uses: actions/checkout@v4 + # name: checkout the rest in the alpine dir + # with: + # path: 'alpine/continuwuity' + - name: set up user + run: adduser -DG abuild ci + + - name: set up keys + run: | + pwd + mkdir ~/.abuild + echo "${{ secrets.abuild_privkey }}" > ~/.abuild/ci@continuwuity.rsa + echo "${{ secrets.abuild_pubkey }}" > ~/.abuild/ci@continuwuity.rsa.pub + echo $HOME + echo 'PACKAGER_PRIVKEY="/root/.abuild/ci@continuwuity.rsa"' > ~/.abuild/abuild.conf + ls ~/.abuild + + - name: go go gadget abuild + run: | + cd alpine + # modify the APKBUILD to use the current branch instead of the release + # note that it seems to require the repo to be public (as you'll get + # a 404 even if the token is provided) + export ARCHIVE_URL="${{ github.server_url }}/${{ github.repository }}/archive/${{ github.ref_name }}.tar.gz" + echo $ARCHIVE_URL + sed -i '/^source=/c\source="'"$ARCHIVE_URL" APKBUILD + abuild -F checksum + abuild -Fr diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index c08c1abb..7d95a317 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -16,7 +16,7 @@ concurrency: jobs: docs: name: Build and Deploy Documentation - runs-on: not-nexy + runs-on: ubuntu-latest steps: - name: Sync repository @@ -36,9 +36,14 @@ jobs: - name: Prepare static files for deployment run: | mkdir -p ./public/.well-known/matrix + mkdir -p ./public/.well-known/continuwuity + mkdir -p ./public/schema # Copy the Matrix .well-known files cp ./docs/static/server ./public/.well-known/matrix/server cp ./docs/static/client ./public/.well-known/matrix/client + cp ./docs/static/client ./public/.well-known/matrix/support + cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements + cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json # Copy the custom headers file cp ./docs/static/_headers ./public/_headers echo "Copied .well-known files and _headers to ./public" @@ -52,17 +57,17 @@ jobs: run: npm install --save-dev wrangler@latest - name: Deploy to Cloudflare Pages (Production) - if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" - name: Deploy to Cloudflare Pages (Preview) - if: ${{ github.event_name != 'push' || github.ref != 'refs/heads/main' }} + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" diff --git a/.forgejo/workflows/element.yml b/.forgejo/workflows/element.yml new file mode 100644 index 00000000..db771197 --- /dev/null +++ b/.forgejo/workflows/element.yml @@ -0,0 +1,127 @@ +name: Deploy Element Web + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +concurrency: + group: "element-${{ github.ref }}" + cancel-in-progress: true + +jobs: + build-and-deploy: + name: Build and Deploy Element Web + runs-on: ubuntu-latest + + steps: + - name: Setup Node.js + uses: https://code.forgejo.org/actions/setup-node@v4 + with: + node-version: "20" + + - name: Clone, setup, and build Element Web + run: | + echo "Cloning Element Web..." + git clone https://github.com/maunium/element-web + cd element-web + git checkout develop + git pull + + echo "Cloning matrix-js-sdk..." + git clone https://github.com/matrix-org/matrix-js-sdk.git + + echo "Installing Yarn..." + npm install -g yarn + + echo "Installing dependencies..." + yarn install + + echo "Preparing build environment..." + mkdir -p .home + + echo "Cleaning up specific node_modules paths..." + rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing." + + echo "Getting matrix-js-sdk commit hash..." + cd matrix-js-sdk + jsver=$(git rev-parse HEAD) + jsver=${jsver:0:12} + cd .. + echo "matrix-js-sdk version hash: $jsver" + + echo "Getting element-web commit hash..." + ver=$(git rev-parse HEAD) + ver=${ver:0:12} + echo "element-web version hash: $ver" + + chmod +x ./build-sh + + export VERSION="$ver-js-$jsver" + echo "Building Element Web version: $VERSION" + ./build-sh + + echo "Checking for build output..." + ls -la webapp/ + + - name: Create config.json + run: | + cat < ./element-web/webapp/config.json + { + "default_server_name": "continuwuity.org", + "default_server_config": { + "m.homeserver": { + "base_url": "https://matrix.continuwuity.org" + } + }, + "default_country_code": "GB", + "default_theme": "dark", + "mobile_guide_toast": false, + "show_labs_settings": true, + "room_directory": [ + "continuwuity.org", + "matrixrooms.info" + ], + "settings_defaults": { + "UIFeature.urlPreviews": true, + "UIFeature.feedback": false, + "UIFeature.voip": false, + "UIFeature.shareQrCode": false, + "UIFeature.shareSocial": false, + "UIFeature.locationSharing": false, + "enableSyntaxHighlightLanguageDetection": true + }, + "features": { + "feature_pinning": true, + "feature_custom_themes": true + } + } + EOF + echo "Created ./element-web/webapp/config.json" + cat ./element-web/webapp/config.json + + - name: Upload Artifact + uses: https://code.forgejo.org/actions/upload-artifact@v3 + with: + name: element-web + path: ./element-web/webapp/ + retention-days: 14 + + - name: Install Wrangler + run: npm install --save-dev wrangler@latest + + - name: Deploy to Cloudflare Pages (Production) + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" + + - name: Deploy to Cloudflare Pages (Preview) + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 142529ae..ec466c58 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -1,22 +1,24 @@ name: Release Docker Image -concurrency: +concurrency: group: "release-image-${{ github.ref }}" on: - pull_request: push: paths-ignore: - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'debian/**' - - 'docker/**' + - "*.md" + - "**/*.md" + - ".gitlab-ci.yml" + - ".gitignore" + - "renovate.json" + - "debian/**" + - "docker/**" + - "docs/**" # Allows you to run this workflow manually from the Actions tab workflow_dispatch: env: - BUILTIN_REGISTRY: forgejo.ellis.link - BUILTIN_REGISTRY_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" + BUILTIN_REGISTRY: forgejo.ellis.link + BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" jobs: define-variables: @@ -35,7 +37,7 @@ jobs: script: | const githubRepo = '${{ github.repository }}'.toLowerCase() const repoId = githubRepo.split('/')[1] - + core.setOutput('github_repository', githubRepo) const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo let images = [] @@ -46,7 +48,7 @@ jobs: core.setOutput('images_list', images.join(",")) const platforms = ['linux/amd64', 'linux/arm64'] core.setOutput('build_matrix', JSON.stringify({ - platform: platforms, + platform: platforms, include: platforms.map(platform => { return { platform, slug: platform.replace('/', '-') @@ -63,32 +65,29 @@ jobs: attestations: write id-token: write strategy: - matrix: { - "include": [ - { - "platform": "linux/amd64", - "slug": "linux-amd64" - }, - { - "platform": "linux/arm64", - "slug": "linux-arm64" - } - ], - "platform": [ - "linux/amd64", - "linux/arm64" - ] - } + matrix: + { + "include": + [ + { "platform": "linux/amd64", "slug": "linux-amd64" }, + { "platform": "linux/arm64", "slug": "linux-arm64" }, + ], + "platform": ["linux/amd64", "linux/arm64"], + } steps: - name: Echo strategy run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' - name: Echo matrix run: echo '${{ toJSON(matrix) }}' + - name: Checkout repository uses: actions/checkout@v4 with: persist-credentials: false - + - name: Install rust + id: rust-toolchain + uses: ./.forgejo/actions/rust-toolchain + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Set up QEMU @@ -97,9 +96,9 @@ jobs: - name: Login to builtin registry uses: docker/login-action@v3 with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. - name: Extract metadata (labels, annotations) for Docker @@ -122,6 +121,58 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + + - uses: ./.forgejo/actions/timelord + with: + key: timelord-v0 + path: . + + - name: Cache Rust registry + uses: actions/cache@v3 + with: + path: | + .cargo/git + .cargo/git/checkouts + .cargo/registry + .cargo/registry/src + key: rust-registry-image-${{hashFiles('**/Cargo.lock') }} + - name: Cache cargo target + id: cache-cargo-target + uses: actions/cache@v3 + with: + path: | + cargo-target-${{ matrix.slug }} + key: cargo-target-${{ matrix.slug }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}} + - name: Cache apt cache + id: cache-apt + uses: actions/cache@v3 + with: + path: | + var-cache-apt-${{ matrix.slug }} + key: var-cache-apt-${{ matrix.slug }} + - name: Cache apt lib + id: cache-apt-lib + uses: actions/cache@v3 + with: + path: | + var-lib-apt-${{ matrix.slug }} + key: var-lib-apt-${{ matrix.slug }} + - name: inject cache into docker + uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0 + with: + cache-map: | + { + ".cargo/registry": "/usr/local/cargo/registry", + ".cargo/git/db": "/usr/local/cargo/git/db", + "cargo-target-${{ matrix.slug }}": { + "target": "/app/target", + "id": "cargo-target-${{ matrix.platform }}" + }, + "var-cache-apt-${{ matrix.slug }}": "/var/cache/apt", + "var-lib-apt-${{ matrix.slug }}": "/var/lib/apt" + } + skip-extraction: ${{ steps.cache.outputs.cache-hit }} + - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 @@ -129,12 +180,15 @@ jobs: context: . file: "docker/Dockerfile" build-args: | - CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }} + GIT_COMMIT_HASH=${{ github.sha }}) + GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}) + GIT_REMOTE_URL=${{github.event.repository.html_url }} + GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }} platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} annotations: ${{ steps.meta.outputs.annotations }} - # cache-from: type=gha - # cache-to: type=gha,mode=max + cache-from: type=gha + cache-to: type=gha,mode=max sbom: true outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true env: @@ -145,7 +199,7 @@ jobs: run: | mkdir -p /tmp/digests digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" + touch "/tmp/digests/${digest#sha256:}" - name: Upload digest uses: forgejo/upload-artifact@v4 @@ -154,7 +208,7 @@ jobs: path: /tmp/digests/* if-no-files-found: error retention-days: 1 - + merge: runs-on: dind container: ghcr.io/catthehacker/ubuntu:act-latest @@ -170,9 +224,9 @@ jobs: - name: Login to builtin registry uses: docker/login-action@v3 with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -185,13 +239,13 @@ jobs: type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) == github.ref && '' || 'branch-' }} + type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }} type=ref,event=pr type=sha,format=long images: ${{needs.define-variables.outputs.images}} # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: index + DOCKER_METADATA_ANNOTATIONS_LEVELS: index - name: Create manifest list and push working-directory: /tmp/digests diff --git a/.forgejo/workflows/rust-checks.yml b/.forgejo/workflows/rust-checks.yml new file mode 100644 index 00000000..35ca1ad7 --- /dev/null +++ b/.forgejo/workflows/rust-checks.yml @@ -0,0 +1,142 @@ +name: Rust Checks + +on: + push: + +jobs: + format: + name: Format + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install rust + uses: ./.forgejo/actions/rust-toolchain + with: + toolchain: "nightly" + components: "rustfmt" + + - name: Check formatting + run: | + cargo +nightly fmt --all -- --check + + clippy: + name: Clippy + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install rust + uses: ./.forgejo/actions/rust-toolchain + + - uses: https://github.com/actions/create-github-app-token@v2 + id: app-token + with: + app-id: ${{ vars.GH_APP_ID }} + private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} + github-api-url: https://api.github.com + owner: ${{ vars.GH_APP_OWNER }} + repositories: "" + - name: Install sccache + uses: ./.forgejo/actions/sccache + with: + token: ${{ steps.app-token.outputs.token }} + - run: sudo apt-get update + - name: Install system dependencies + uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1 + with: + packages: clang liburing-dev + version: 1 + - name: Cache Rust registry + uses: actions/cache@v3 + with: + path: | + ~/.cargo/git + !~/.cargo/git/checkouts + ~/.cargo/registry + !~/.cargo/registry/src + key: rust-registry-${{hashFiles('**/Cargo.lock') }} + - name: Timelord + uses: ./.forgejo/actions/timelord + with: + key: sccache-v0 + path: . + - name: Clippy + run: | + cargo clippy \ + --workspace \ + --locked \ + --no-deps \ + --profile test \ + -- \ + -D warnings + + - name: Show sccache stats + if: always() + run: sccache --show-stats + + cargo-test: + name: Cargo Test + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install rust + uses: ./.forgejo/actions/rust-toolchain + + - uses: https://github.com/actions/create-github-app-token@v2 + id: app-token + with: + app-id: ${{ vars.GH_APP_ID }} + private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} + github-api-url: https://api.github.com + owner: ${{ vars.GH_APP_OWNER }} + repositories: "" + - name: Install sccache + uses: ./.forgejo/actions/sccache + with: + token: ${{ steps.app-token.outputs.token }} + - run: sudo apt-get update + - name: Install system dependencies + uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1 + with: + packages: clang liburing-dev + version: 1 + - name: Cache Rust registry + uses: actions/cache@v3 + with: + path: | + ~/.cargo/git + !~/.cargo/git/checkouts + ~/.cargo/registry + !~/.cargo/registry/src + key: rust-registry-${{hashFiles('**/Cargo.lock') }} + - name: Timelord + uses: ./.forgejo/actions/timelord + with: + key: sccache-v0 + path: . + - name: Cargo Test + run: | + cargo test \ + --workspace \ + --locked \ + --profile test \ + --all-targets \ + --no-fail-fast + + - name: Show sccache stats + if: always() + run: sccache --show-stats diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 00000000..41c81085 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,9 @@ +[files] +extend-exclude = ["*.csr"] + +[default.extend-words] +"allocatedp" = "allocatedp" +"conduwuit" = "conduwuit" +"continuwuity" = "continuwuity" +"continuwity" = "continuwuity" +"execuse" = "execuse" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index e77154e7..476e68fb 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,3 @@ - # Contributor Covenant Code of Conduct ## Our Pledge @@ -60,8 +59,7 @@ representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over email at - or over Matrix at @strawberry:puppygock.gay. +reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at , and respectively. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fb540011..da426801 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,10 +1,10 @@ # Contributing guide -This page is for about contributing to conduwuit. The +This page is for about contributing to Continuwuity. The [development](./development.md) page may be of interest for you as well. If you would like to work on an [issue][issues] that is not assigned, preferably -ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix], +ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix], and comment on it. ### Linting and Formatting @@ -23,9 +23,9 @@ suggestion, allow the lint and mention that in a comment. ### Running CI tests locally -conduwuit's CI for tests, linting, formatting, audit, etc use +continuwuity's CI for tests, linting, formatting, audit, etc use [`engage`][engage]. engage can be installed from nixpkgs or `cargo install -engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`. +engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`. Use `engage --help` for more usage details. To test, format, lint, etc that CI would do, install engage, allow the `.envrc` @@ -73,7 +73,7 @@ If you'd like to run Complement locally using Nix, see the ### Writing documentation -conduwuit's website uses [`mdbook`][mdbook] and deployed via CI using GitHub +Continuwuity's website uses [`mdbook`][mdbook] and deployed via CI using GitHub Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's mdbook in the devshell. All documentation is in the `docs/` directory at the top level. The compiled mdbook website is also uploaded as an artifact. @@ -111,33 +111,28 @@ applies here. ### Creating pull requests -Please try to keep contributions to the GitHub. While the mirrors of conduwuit -allow for pull/merge requests, there is no guarantee I will see them in a timely +Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity +allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts. -This prevents me from having to ping once in a while to double check the status +This prevents us from having to ping once in a while to double check the status of it, especially when the CI completed successfully and everything so it *looks* done. -If you open a pull request on one of the mirrors, it is your responsibility to -inform me about its existence. In the future I may try to solve this with more -repo bots in the conduwuit Matrix room. There is no mailing list or email-patch -support on the sr.ht mirror, but if you'd like to email me a git patch you can -do so at `strawberry@puppygock.gay`. Direct all PRs/MRs to the `main` branch. By sending a pull request or patch, you are agreeing that your changes are allowed to be licenced under the Apache-2.0 licence and all of your conduct is -in line with the Contributor's Covenant, and conduwuit's Code of Conduct. +in line with the Contributor's Covenant, and continuwuity's Code of Conduct. Contribution by users who violate either of these code of conducts will not have their contributions accepted. This includes users who have been banned from -conduwuit Matrix rooms for Code of Conduct violations. +continuwuityMatrix rooms for Code of Conduct violations. -[issues]: https://github.com/girlbossceo/conduwuit/issues -[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay +[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues +[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org [complement]: https://github.com/matrix-org/complement/ -[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml +[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml [engage]: https://charles.page.computer.surgery/engage/ [sytest]: https://github.com/matrix-org/sytest/ [cargo-deb]: https://github.com/kornelski/cargo-deb @@ -146,4 +141,4 @@ conduwuit Matrix rooms for Code of Conduct violations. [cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit [direnv]: https://direnv.net/ [mdbook]: https://rust-lang.github.io/mdBook/ -[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml +[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml diff --git a/Cargo.lock b/Cargo.lock index def41f68..160be0c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "arbitrary" @@ -109,6 +109,48 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" +[[package]] +name = "askama" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f75363874b771be265f4ffe307ca705ef6f3baa19011c149da8674a87f1b75c4" +dependencies = [ + "askama_derive", + "itoa", + "percent-encoding", + "serde", + "serde_json", +] + +[[package]] +name = "askama_derive" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "129397200fe83088e8a68407a8e2b1f826cf0086b21ccdb866a722c8bcd3a94f" +dependencies = [ + "askama_parser", + "basic-toml", + "memchr", + "proc-macro2", + "quote", + "rustc-hash 2.1.1", + "serde", + "serde_derive", + "syn", +] + +[[package]] +name = "askama_parser" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ab5630b3d5eaf232620167977f95eb51f3432fc76852328774afbd242d4358" +dependencies = [ + "memchr", + "serde", + "serde_derive", + "winnow", +] + [[package]] name = "assign" version = "1.1.1" @@ -128,9 +170,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" +checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" dependencies = [ "brotli", "flate2", @@ -142,17 +184,6 @@ dependencies = [ "zstd-safe", ] -[[package]] -name = "async-recursion" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "async-stream" version = "0.3.6" @@ -242,9 +273,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.28.0" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f7720b74ed28ca77f90769a71fd8c637a0137f6fae4ae947e1050229cff57f" +checksum = "bfa9b6986f250236c27e5a204062434a773a13243d2ffc2955f37bdba4c5c6a1" dependencies = [ "bindgen 0.69.5", "cc", @@ -384,9 +415,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", @@ -415,6 +446,15 @@ version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +[[package]] +name = "basic-toml" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba62675e8242a4c4e806d12f11d136e626e6c8361d6b829310732241652a178a" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.69.5" @@ -509,9 +549,9 @@ dependencies = [ [[package]] name = "brotli" -version = "7.0.0" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" +checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -520,9 +560,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.2" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -534,6 +574,12 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" +[[package]] +name = "built" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4ad8f11f288f48ca24471bbd51ac257aaeaaa07adae295591266b792902ae64" + [[package]] name = "bumpalo" version = "3.17.0" @@ -542,9 +588,9 @@ checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" -version = "1.22.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" +checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" [[package]] name = "byteorder" @@ -592,9 +638,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.17" +version = "1.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" +checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" dependencies = [ "jobserver", "libc", @@ -643,9 +689,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "num-traits", ] @@ -663,9 +709,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.35" +version = "4.5.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" +checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" dependencies = [ "clap_builder", "clap_derive", @@ -673,9 +719,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.35" +version = "4.5.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" +checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" dependencies = [ "anstyle", "clap_lex", @@ -725,7 +771,7 @@ dependencies = [ [[package]] name = "conduwuit" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_admin", @@ -754,7 +800,7 @@ dependencies = [ [[package]] name = "conduwuit_admin" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_api", @@ -775,7 +821,7 @@ dependencies = [ [[package]] name = "conduwuit_api" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "async-trait", "axum", @@ -784,7 +830,6 @@ dependencies = [ "base64 0.22.1", "bytes", "conduwuit_core", - "conduwuit_database", "conduwuit_service", "const-str", "futures", @@ -806,9 +851,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "conduwuit_build_metadata" +version = "0.5.0-rc.5" +dependencies = [ + "built 0.8.0", +] + [[package]] name = "conduwuit_core" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "argon2", "arrayvec", @@ -820,6 +872,7 @@ dependencies = [ "checked_ops", "chrono", "clap", + "conduwuit_build_metadata", "conduwuit_macros", "const-str", "core_affinity", @@ -866,7 +919,7 @@ dependencies = [ [[package]] name = "conduwuit_database" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "async-channel", "conduwuit_core", @@ -884,7 +937,7 @@ dependencies = [ [[package]] name = "conduwuit_macros" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "itertools 0.14.0", "proc-macro2", @@ -894,7 +947,7 @@ dependencies = [ [[package]] name = "conduwuit_router" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "axum", "axum-client-ip", @@ -905,6 +958,7 @@ dependencies = [ "conduwuit_api", "conduwuit_core", "conduwuit_service", + "conduwuit_web", "const-str", "futures", "http", @@ -927,7 +981,7 @@ dependencies = [ [[package]] name = "conduwuit_service" -version = "0.5.0" +version = "0.5.0-rc.5" dependencies = [ "async-trait", "base64 0.22.1", @@ -938,7 +992,7 @@ dependencies = [ "const-str", "either", "futures", - "hickory-resolver 0.25.1", + "hickory-resolver 0.25.2", "http", "image", "ipaddress", @@ -962,6 +1016,20 @@ dependencies = [ "webpage", ] +[[package]] +name = "conduwuit_web" +version = "0.5.0-rc.5" +dependencies = [ + "askama", + "axum", + "conduwuit_build_metadata", + "conduwuit_service", + "futures", + "rand 0.8.5", + "thiserror 2.0.12", + "tracing", +] + [[package]] name = "console-api" version = "0.8.1" @@ -1247,9 +1315,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "date_header" @@ -1368,9 +1436,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" dependencies = [ "libc", "windows-sys 0.59.0", @@ -1611,7 +1679,7 @@ dependencies = [ "libc", "log", "rustversion", - "windows 0.58.0", + "windows", ] [[package]] @@ -1626,9 +1694,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", @@ -1639,9 +1707,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "js-sys", @@ -1675,9 +1743,9 @@ checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "h2" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" dependencies = [ "atomic-waker", "bytes", @@ -1685,7 +1753,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.8.0", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -1694,9 +1762,9 @@ dependencies = [ [[package]] name = "half" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" dependencies = [ "cfg-if", "crunchy", @@ -1716,9 +1784,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" [[package]] name = "hdrhistogram" @@ -1801,14 +1869,12 @@ dependencies = [ [[package]] name = "hickory-proto" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" dependencies = [ - "async-recursion", "async-trait", "cfg-if", - "critical-section", "data-encoding", "enum-as-inner", "futures-channel", @@ -1817,7 +1883,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.0", + "rand 0.9.1", "ring", "serde", "thiserror 2.0.12", @@ -1850,18 +1916,18 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" +checksum = "dc62a9a99b0bfb44d2ab95a7208ac952d31060efc16241c87eaf36406fecf87a" dependencies = [ "cfg-if", "futures-util", - "hickory-proto 0.25.1", + "hickory-proto 0.25.2", "ipconfig", "moka", "once_cell", "parking_lot", - "rand 0.9.0", + "rand 0.9.1", "resolv-conf", "serde", "smallvec", @@ -1890,13 +1956,13 @@ dependencies = [ [[package]] name = "hostname" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" +checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" dependencies = [ "cfg-if", "libc", - "windows 0.52.0", + "windows-link", ] [[package]] @@ -2048,21 +2114,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -2071,31 +2138,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -2103,67 +2150,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "idna" version = "1.0.3" @@ -2177,9 +2211,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -2236,12 +2270,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.15.3", "serde", ] @@ -2339,7 +2373,7 @@ version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "libc", ] @@ -2440,9 +2474,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.171" +version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libfuzzer-sys" @@ -2456,12 +2490,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.53.0", ] [[package]] @@ -2489,9 +2523,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" @@ -2511,9 +2545,9 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "loole" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2998397c725c822c6b2ba605fd9eb4c6a7a0810f1629ba3cc232ef4f0308d96" +checksum = "1a3932a13b27d6b2d37efec3e4047017d59a8c9f2283a29bde29151d22f00fe9" dependencies = [ "futures-core", "futures-sink", @@ -2637,9 +2671,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minicbor" -version = "0.26.3" +version = "0.26.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1936e27fffe7d8557c060eb82cb71668608cd1a5fb56b63e66d22ae8d7564321" +checksum = "8a309f581ade7597820083bc275075c4c6986e57e53f8d26f88507cfefc8c987" dependencies = [ "minicbor-derive", ] @@ -2682,9 +2716,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" dependencies = [ "adler2", "simd-adler32", @@ -2904,7 +2938,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.8.0", + "indexmap 2.9.0", "js-sys", "once_cell", "pin-project-lite", @@ -3174,6 +3208,15 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -3216,9 +3259,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -3347,8 +3390,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom 0.3.2", - "rand 0.9.0", + "getrandom 0.3.3", + "rand 0.9.1", "ring", "rustc-hash 2.1.1", "rustls", @@ -3402,13 +3445,12 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", - "zerocopy", ] [[package]] @@ -3437,7 +3479,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -3446,7 +3488,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", ] [[package]] @@ -3460,7 +3502,7 @@ dependencies = [ "arrayvec", "av1-grain", "bitstream-io", - "built", + "built 0.7.7", "cfg-if", "interpolate_name", "itertools 0.12.1", @@ -3486,9 +3528,9 @@ dependencies = [ [[package]] name = "ravif" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2413fd96bd0ea5cdeeb37eaf446a22e6ed7b981d792828721e74ded1980a45c6" +checksum = "d6a5f31fcf7500f9401fea858ea4ab5525c99f2322cfcee732c0e6c74208c0c6" dependencies = [ "avif-serialize", "imgref", @@ -3521,9 +3563,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ "bitflags 2.9.0", ] @@ -3644,7 +3686,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", @@ -3653,7 +3695,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "assign", "js_int", @@ -3673,7 +3715,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3685,7 +3727,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "assign", @@ -3708,15 +3750,15 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", - "getrandom 0.2.15", + "getrandom 0.2.16", "http", - "indexmap 2.8.0", + "indexmap 2.9.0", "js_int", "konst", "percent-encoding", @@ -3740,10 +3782,10 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", - "indexmap 2.8.0", + "indexmap 2.9.0", "js_int", "js_option", "percent-encoding", @@ -3765,7 +3807,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "bytes", "headers", @@ -3787,7 +3829,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3796,7 +3838,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3806,7 +3848,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3821,7 +3863,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3833,7 +3875,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3914,9 +3956,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.25" +version = "0.23.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" +checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" dependencies = [ "aws-lc-rs", "log", @@ -3951,18 +3993,19 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time 1.1.0", + "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.1" +version = "0.103.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" dependencies = [ "aws-lc-rs", "ring", @@ -4227,7 +4270,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.8.0", + "indexmap 2.9.0", "itoa", "ryu", "serde", @@ -4292,7 +4335,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.9.0", "itoa", "ryu", "serde", @@ -4312,9 +4355,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -4359,9 +4402,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" dependencies = [ "libc", ] @@ -4417,9 +4460,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" dependencies = [ "serde", ] @@ -4498,9 +4541,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.100" +version = "2.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" dependencies = [ "proc-macro2", "quote", @@ -4518,9 +4561,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", @@ -4565,9 +4608,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.31.2" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8e19c6dbf107bec01d0e216bb8219485795b7d75328e4fa5ef2756c1be4f8dc" +checksum = "7301d9c2c4939c97f25376b70d3c13311f8fefdee44092fc361d2a98adc2cbb6" dependencies = [ "coolor", "crokey", @@ -4575,7 +4618,7 @@ dependencies = [ "lazy-regex", "minimad", "serde", - "thiserror 1.0.69", + "thiserror 2.0.12", "unicode-width 0.1.14", ] @@ -4733,9 +4776,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -4758,9 +4801,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.2" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" dependencies = [ "backtrace", "bytes", @@ -4787,9 +4830,9 @@ dependencies = [ [[package]] name = "tokio-metrics" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb2bb07a8451c4c6fa8b3497ad198510d8b8dffa5df5cfb97a64102a58b113c8" +checksum = "7817b32d36c9b94744d7aa3f8fc13526aa0f5112009d7045f3c659413a6e44ac" dependencies = [ "futures-util", "pin-project-lite", @@ -4832,9 +4875,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.14" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" dependencies = [ "bytes", "futures-core", @@ -4845,9 +4888,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.20" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" +checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" dependencies = [ "serde", "serde_spanned", @@ -4857,26 +4900,33 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.22.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", + "toml_write", "winnow", ] +[[package]] +name = "toml_write" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" + [[package]] name = "tonic" version = "0.12.3" @@ -4944,9 +4994,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" dependencies = [ "async-compression", "bitflags 2.9.0", @@ -5188,12 +5238,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -5206,7 +5250,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "serde", ] @@ -5443,32 +5487,13 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" -dependencies = [ - "windows-core 0.52.0", - "windows-targets 0.52.6", -] - [[package]] name = "windows" version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" dependencies = [ - "windows-core 0.58.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ + "windows-core", "windows-targets 0.52.6", ] @@ -5775,9 +5800,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.4" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" +checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" dependencies = [ "memchr", ] @@ -5801,17 +5826,11 @@ dependencies = [ "bitflags 2.9.0", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "xml5ever" @@ -5832,9 +5851,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -5844,9 +5863,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", @@ -5856,18 +5875,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", @@ -5902,10 +5921,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] -name = "zerovec" -version = "0.10.4" +name = "zerotrie" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", @@ -5914,9 +5944,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index e9ae0007..1abff107 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ license = "Apache-2.0" readme = "README.md" repository = "https://forgejo.ellis.link/continuwuation/continuwuity" rust-version = "1.86.0" -version = "0.5.0" +version = "0.5.0-rc.5" [workspace.metadata.crane] name = "conduwuit" @@ -298,7 +298,7 @@ version = "1.15.0" default-features = false features = ["serde"] -# Used for reading the configuration from conduwuit.toml & environment variables +# Used for reading the configuration from continuwuity.toml & environment variables [workspace.dependencies.figment] version = "0.10.19" default-features = false @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" +rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" features = [ "compat", "rand", @@ -626,6 +626,17 @@ package = "conduwuit_macros" path = "src/macros" default-features = false +[workspace.dependencies.conduwuit-web] +package = "conduwuit_web" +path = "src/web" +default-features = false + + +[workspace.dependencies.conduwuit-build-metadata] +package = "conduwuit_build_metadata" +path = "src/build_metadata" +default-features = false + ############################################################################### # # Release profiles @@ -734,7 +745,6 @@ incremental = true [profile.dev.package.conduwuit_core] inherits = "dev" -incremental = false #rustflags = [ # '--cfg', 'conduwuit_mods', # '-Ztime-passes', @@ -774,7 +784,6 @@ inherits = "dev" [profile.dev.package.'*'] inherits = "dev" debug = 'limited' -incremental = false codegen-units = 1 opt-level = 'z' #rustflags = [ @@ -796,7 +805,6 @@ inherits = "dev" strip = false opt-level = 0 codegen-units = 16 -incremental = false [profile.test.package.'*'] inherits = "dev" @@ -804,7 +812,6 @@ debug = 0 strip = false opt-level = 0 codegen-units = 16 -incremental = false ############################################################################### # @@ -981,3 +988,6 @@ let_underscore_future = { level = "allow", priority = 1 } # rust doesnt understand conduwuit's custom log macros literal_string_with_formatting_args = { level = "allow", priority = 1 } + + +needless_raw_string_hashes = "allow" diff --git a/README.md b/README.md index deaed364..e3eb807f 100644 --- a/README.md +++ b/README.md @@ -7,16 +7,21 @@ [continuwuity] is a Matrix homeserver written in Rust. -It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. +It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. +[![forgejo.ellis.link](https://img.shields.io/badge/Ellis%20Git-main+packages-green?style=flat&logo=forgejo&labelColor=fff)](https://forgejo.ellis.link/continuwuation/continuwuity) ![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/stars.svg?style=flat) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/issues/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/pulls/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open) + +[![GitHub](https://img.shields.io/badge/GitHub-mirror-blue?style=flat&logo=github&labelColor=fff&logoColor=24292f)](https://github.com/continuwuity/continuwuity) ![](https://img.shields.io/github/stars/continuwuity/continuwuity?style=flat) + +[![Codeberg](https://img.shields.io/badge/Codeberg-mirror-2185D0?style=flat&logo=codeberg&labelColor=fff)](https://codeberg.org/nexy7574/continuwuity) ![](https://codeberg.org/nexy7574/continuwuity/badges/stars.svg?style=flat) ### Why does this exist? The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. -We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. +We aim to provide a stable, well-maintained alternative for current conduwuit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. ### Who are we? @@ -46,8 +51,9 @@ Continuwuity aims to: ### Can I try it out? -Not right now. We've still got work to do! +Check out the [documentation](introduction) for installation instructions. +There are currently no open registration Continuwuity instances available. ### What are we working on? @@ -105,7 +111,7 @@ When incorporating code from other forks: #### Contact - +Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project! diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..a9aa183e --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,63 @@ +# Security Policy for Continuwuity + +This document outlines the security policy for Continuwuity. Our goal is to maintain a secure platform for all users, and we take security matters seriously. + +## Supported Versions + +We provide security updates for the following versions of Continuwuity: + +| Version | Supported | +| -------------- |:----------------:| +| Latest release | ✅ | +| Main branch | ✅ | +| Older releases | ❌ | + +We may backport fixes to the previous release at our discretion, but we don't guarantee this. + +## Reporting a Vulnerability + +### Responsible Disclosure + +We appreciate the efforts of security researchers and the community in identifying and reporting vulnerabilities. To ensure that potential vulnerabilities are addressed properly, please follow these guidelines: + +1. Contact members of the team over E2EE private message. + - [@jade:ellis.link](https://matrix.to/#/@jade:ellis.link) + - [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk) +2. **Email the security team** directly at [security@continuwuity.org](mailto:security@continuwuity.org). This is not E2EE, so don't include sensitive details. +3. **Do not disclose the vulnerability publicly** until it has been addressed +4. **Provide detailed information** about the vulnerability, including: + - A clear description of the issue + - Steps to reproduce + - Potential impact + - Any possible mitigations + - Version(s) affected, including specific commits if possible + +If you have any doubts about a potential security vulnerability, contact us via private channels first! We'd prefer that you bother us, instead of having a vulnerability disclosed without a fix. + +### What to Expect + +When you report a security vulnerability: + +1. **Acknowledgment**: We will acknowledge receipt of your report. +2. **Assessment**: We will assess the vulnerability and determine its impact on our users +3. **Updates**: We will provide updates on our progress in addressing the vulnerability, and may request you help test mitigations +4. **Resolution**: Once resolved, we will notify you and discuss coordinated disclosure +5. **Credit**: We will recognize your contribution (unless you prefer to remain anonymous) + +## Security Update Process + +When security vulnerabilities are identified: + +1. We will develop and test fixes in a private branch +2. Security updates will be released as soon as possible +3. Release notes will include information about the vulnerabilities, avoiding details that could facilitate exploitation where possible +4. Critical security updates may be backported to the previous stable release + +## Additional Resources + +- [Matrix Security Disclosure Policy](https://matrix.org/security-disclosure-policy/) +- [Continuwuity Documentation](https://continuwuity.org/introduction) + +--- + +This security policy was last updated on May 25, 2025. diff --git a/alpine/APKBUILD b/alpine/APKBUILD new file mode 100644 index 00000000..3b9653b3 --- /dev/null +++ b/alpine/APKBUILD @@ -0,0 +1,70 @@ +# Contributor: magmaus3 +# Maintainer: magmaus3 +pkgname=continuwuity + +# abuild doesn't like the format of v0.5.0-rc.5, so i had to change it +# see https://wiki.alpinelinux.org/wiki/Package_policies +pkgver=0.5.0_rc5 +pkgrel=0 +pkgdesc="a continuwuation of a very cool, featureful fork of conduit" +url="https://continuwuity.org/" +arch="all" +license="Apache-2.0" +depends="liburing" + +# cargo version on alpine v3.21 is too old to use the 2024 edition +# i recommend either building everything on edge, or adding +# the edge repo as a tag +makedepends="cargo liburing-dev clang-dev linux-headers" +checkdepends="" +install="$pkgname.pre-install" +subpackages="$pkgname-openrc" +source="https://forgejo.ellis.link/continuwuation/continuwuity/archive/v0.5.0-rc.5.tar.gz +continuwuity.initd +continuwuity.confd +" +_giturl="https://forgejo.ellis.link/continuwuation/continuwuity" +_gitbranch="main" +builddir="$srcdir/continuwuity" +options="net !check" + +#snapshot() { +# # used for building from git +# git clone --depth=1 $_giturl -b $_gitbranch +#} + +prepare() { + default_prepare + cd $srcdir/continuwuity + + # add the default database path to the config (commented out) + cat conduwuit-example.toml \ + | sed '/#database_path/ s:$: "/var/lib/continuwuity":' \ + > "$srcdir"/continuwuity.toml + + cargo fetch --target="$CTARGET" --locked +} + +build() { + cargo build --frozen --release --all-features +} + +check() { + # TODO: make sure the tests work + #cargo test --frozen + return +} + +package() { + cd $srcdir + install -Dm755 continuwuity/target/release/conduwuit "$pkgdir"/usr/bin/continuwuity + install -Dm644 "$srcdir"/continuwuity.toml -t "$pkgdir"/etc/continuwuity + install -Dm755 "$srcdir"/continuwuity.initd "$pkgdir"/etc/init.d/continuwuity + install -Dm644 "$srcdir"/continuwuity.confd "$pkgdir"/etc/conf.d/continuwuity +} + +sha512sums=" +66f6da5e98b6f7bb8c1082500101d5c87b1b79955c139b44c6ef5123919fb05feb0dffc669a3af1bc8d571ddb9f3576660f08dc10a6b19eab6db9e391175436a v0.5.0-rc.5.tar.gz +0482674be24740496d70da256d4121c5a5e3b749f2445d2bbe0e8991f1449de052724f8427da21a6f55574bc53eac9ca1e47e5012b4c13049b2b39044734d80d continuwuity.initd +38e2576278b450d16ba804dd8f4a128f18cd793e6c3ce55aedee1e186905755b31ee23baaa6586b1ab0e25a1f29bf1ea86bfaae4185b0cb1a29203726a199426 continuwuity.confd +" diff --git a/alpine/README.md b/alpine/README.md new file mode 100644 index 00000000..5f26d772 --- /dev/null +++ b/alpine/README.md @@ -0,0 +1,7 @@ +# building + +1. [set up your build + environment](https://wiki.alpinelinux.org/wiki/Include:Setup_your_system_and_account_for_building_packages) + +2. run `abuild` (or `abuild -K` if you want to keep the source directory to make + rebuilding faster) diff --git a/alpine/continuwuity.confd b/alpine/continuwuity.confd new file mode 100644 index 00000000..03d7b0a0 --- /dev/null +++ b/alpine/continuwuity.confd @@ -0,0 +1,3 @@ +supervisor=supervise-daemon +export CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml + diff --git a/alpine/continuwuity.initd b/alpine/continuwuity.initd new file mode 100644 index 00000000..1354f4bd --- /dev/null +++ b/alpine/continuwuity.initd @@ -0,0 +1,19 @@ +#!/sbin/openrc-run + +command="/usr/bin/continuwuity" +command_user="continuwuity:continuwuity" +command_args="--config ${CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml}" +command_background=true +pidfile="/run/$RC_SVCNAME.pid" + +output_log="/var/log/continuwuity.log" +error_log="/var/log/continuwuity.log" + +depend() { + need net +} + +start_pre() { + checkpath -d -m 0755 -o "$command_user" /var/lib/continuwuity + checkpath -f -m 0644 -o "$command_user" "$output_log" +} diff --git a/alpine/continuwuity.pre-install b/alpine/continuwuity.pre-install new file mode 100644 index 00000000..edac789f --- /dev/null +++ b/alpine/continuwuity.pre-install @@ -0,0 +1,4 @@ +#!/bin/sh +addgroup -S continuwuity 2>/dev/null +adduser -S -D -H -h /var/lib/continuwuity -s /sbin/nologin -G continuwuity -g continuwuity continuwuity 2>/dev/null +exit 0 diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 4f45ddc0..c86e37bd 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -1,11 +1,11 @@ [Unit] -Description=conduwuit Matrix homeserver + +Description=Continuwuity - Matrix homeserver Wants=network-online.target After=network-online.target -Documentation=https://conduwuit.puppyirl.gay/ +Documentation=https://continuwuity.org/ RequiresMountsFor=/var/lib/private/conduwuit Alias=matrix-conduwuit.service - [Service] DynamicUser=yes Type=notify-reload @@ -59,7 +59,7 @@ StateDirectory=conduwuit RuntimeDirectory=conduwuit RuntimeDirectoryMode=0750 -Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" +Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml" BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit diff --git a/conduwuit-example.toml b/conduwuit-example.toml index af8da6bb..1a8be2aa 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1,4 +1,4 @@ -### conduwuit Configuration +### continuwuity Configuration ### ### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE ### OVERWRITTEN! @@ -13,7 +13,7 @@ ### that say "YOU NEED TO EDIT THIS". ### ### For more information, see: -### https://conduwuit.puppyirl.gay/configuration.html +### https://continuwuity.org/configuration.html [global] @@ -21,7 +21,7 @@ # suffix for user and room IDs/aliases. # # See the docs for reverse proxying and delegation: -# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy # # Also see the `[global.well_known]` config section at the very bottom. # @@ -32,11 +32,11 @@ # YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE # WIPE. # -# example: "conduwuit.woof" +# example: "continuwuity.org" # #server_name = -# The default address (IPv4 or IPv6) conduwuit will listen on. +# The default address (IPv4 or IPv6) continuwuity will listen on. # # If you are using Docker or a container NAT networking setup, this must # be "0.0.0.0". @@ -46,10 +46,10 @@ # #address = ["127.0.0.1", "::1"] -# The port(s) conduwuit will listen on. +# The port(s) continuwuity will listen on. # # For reverse proxying, see: -# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy # # If you are using Docker, don't change this, you'll need to map an # external port to this. @@ -58,16 +58,17 @@ # #port = 8008 -# The UNIX socket conduwuit will listen on. +# The UNIX socket continuwuity will listen on. # -# conduwuit cannot listen on both an IP address and a UNIX socket. If +# continuwuity cannot listen on both an IP address and a UNIX socket. If # listening on a UNIX socket, you MUST remove/comment the `address` key. # # Remember to make sure that your reverse proxy has access to this socket -# file, either by adding your reverse proxy to the 'conduwuit' group or -# granting world R/W permissions with `unix_socket_perms` (666 minimum). +# file, either by adding your reverse proxy to the appropriate user group +# or granting world R/W permissions with `unix_socket_perms` (666 +# minimum). # -# example: "/run/conduwuit/conduwuit.sock" +# example: "/run/continuwuity/continuwuity.sock" # #unix_socket_path = @@ -75,23 +76,23 @@ # #unix_socket_perms = 660 -# This is the only directory where conduwuit will save its data, including -# media. Note: this was previously "/var/lib/matrix-conduit". +# This is the only directory where continuwuity will save its data, +# including media. Note: this was previously "/var/lib/matrix-conduit". # # YOU NEED TO EDIT THIS. # -# example: "/var/lib/conduwuit" +# example: "/var/lib/continuwuity" # #database_path = -# conduwuit supports online database backups using RocksDB's Backup engine -# API. To use this, set a database backup path that conduwuit can write -# to. +# continuwuity supports online database backups using RocksDB's Backup +# engine API. To use this, set a database backup path that continuwuity +# can write to. # # For more information, see: -# https://conduwuit.puppyirl.gay/maintenance.html#backups +# https://continuwuity.org/maintenance.html#backups # -# example: "/opt/conduwuit-db-backups" +# example: "/opt/continuwuity-db-backups" # #database_backup_path = @@ -112,18 +113,14 @@ # #new_user_displayname_suffix = "🏳️‍⚧️" -# If enabled, conduwuit will send a simple GET request periodically to -# `https://pupbrain.dev/check-for-updates/stable` for any new -# announcements made. Despite the name, this is not an update check -# endpoint, it is simply an announcement check endpoint. +# If enabled, continuwuity will send a simple GET request periodically to +# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new +# announcements or major updates. This is not an update check endpoint. # -# This is disabled by default as this is rarely used except for security -# updates or major updates. -# -#allow_check_for_updates = false +#allow_announcements_check = true -# Set this to any float value to multiply conduwuit's in-memory LRU caches -# with such as "auth_chain_cache_capacity". +# Set this to any float value to multiply continuwuity's in-memory LRU +# caches with such as "auth_chain_cache_capacity". # # May be useful if you have significant memory to spare to increase # performance. @@ -135,7 +132,7 @@ # #cache_capacity_modifier = 1.0 -# Set this to any float value in megabytes for conduwuit to tell the +# Set this to any float value in megabytes for continuwuity to tell the # database engine that this much memory is available for database read # caches. # @@ -149,7 +146,7 @@ # #db_cache_capacity_mb = varies by system -# Set this to any float value in megabytes for conduwuit to tell the +# Set this to any float value in megabytes for continuwuity to tell the # database engine that this much memory is available for database write # caches. # @@ -254,9 +251,9 @@ # Enable using *only* TCP for querying your specified nameservers instead # of UDP. # -# If you are running conduwuit in a container environment, this config +# If you are running continuwuity in a container environment, this config # option may need to be enabled. For more details, see: -# https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker +# https://continuwuity.org/troubleshooting.html#potential-dns-issues-when-using-docker # #query_over_tcp_only = false @@ -422,9 +419,9 @@ # tokens. Multiple tokens can be added if you separate them with # whitespace # -# conduwuit must be able to access the file, and it must not be empty +# continuwuity must be able to access the file, and it must not be empty # -# example: "/etc/conduwuit/.reg_token" +# example: "/etc/continuwuity/.reg_token" # #registration_token_file = @@ -516,16 +513,16 @@ #allow_room_creation = true # Set to false to disable users from joining or creating room versions -# that aren't officially supported by conduwuit. +# that aren't officially supported by continuwuity. # -# conduwuit officially supports room versions 6 - 11. +# continuwuity officially supports room versions 6 - 11. # -# conduwuit has slightly experimental (though works fine in practice) +# continuwuity has slightly experimental (though works fine in practice) # support for versions 3 - 5. # #allow_unstable_room_versions = true -# Default room version conduwuit will create rooms with. +# Default room version continuwuity will create rooms with. # # Per spec, room version 11 is the default. # @@ -591,7 +588,7 @@ # Servers listed here will be used to gather public keys of other servers # (notary trusted key servers). # -# Currently, conduwuit doesn't support inbound batched key requests, so +# Currently, continuwuity doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # # example: ["matrix.org", "tchncs.de"] @@ -632,7 +629,7 @@ # #trusted_server_batch_size = 1024 -# Max log level for conduwuit. Allows debug, info, warn, or error. +# Max log level for continuwuity. Allows debug, info, warn, or error. # # See also: # https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives @@ -653,8 +650,9 @@ # #log_span_events = "none" -# Configures whether CONDUWUIT_LOG EnvFilter matches values using regular -# expressions. See the tracing_subscriber documentation on Directives. +# Configures whether CONTINUWUITY_LOG EnvFilter matches values using +# regular expressions. See the tracing_subscriber documentation on +# Directives. # #log_filter_regex = true @@ -722,7 +720,7 @@ # This takes priority over "turn_secret" first, and falls back to # "turn_secret" if invalid or failed to open. # -# example: "/etc/conduwuit/.turn_secret" +# example: "/etc/continuwuity/.turn_secret" # #turn_secret_file = @@ -730,12 +728,12 @@ # #turn_ttl = 86400 -# List/vector of room IDs or room aliases that conduwuit will make newly -# registered users join. The rooms specified must be rooms that you have -# joined at least once on the server, and must be public. +# List/vector of room IDs or room aliases that continuwuity will make +# newly registered users join. The rooms specified must be rooms that you +# have joined at least once on the server, and must be public. # -# example: ["#conduwuit:puppygock.gay", -# "!eoIzvAvVwY23LPDay8:puppygock.gay"] +# example: ["#continuwuity:continuwuity.org", +# "!main-1:continuwuity.org"] # #auto_join_rooms = [] @@ -758,10 +756,10 @@ # #auto_deactivate_banned_room_attempts = false -# RocksDB log level. This is not the same as conduwuit's log level. This -# is the log level for the RocksDB engine/library which show up in your -# database folder/path as `LOG` files. conduwuit will log RocksDB errors -# as normal through tracing or panics if severe for safety. +# RocksDB log level. This is not the same as continuwuity's log level. +# This is the log level for the RocksDB engine/library which show up in +# your database folder/path as `LOG` files. continuwuity will log RocksDB +# errors as normal through tracing or panics if severe for safety. # #rocksdb_log_level = "error" @@ -781,7 +779,7 @@ # Set this to true to use RocksDB config options that are tailored to HDDs # (slower device storage). # -# It is worth noting that by default, conduwuit will use RocksDB with +# It is worth noting that by default, continuwuity will use RocksDB with # Direct IO enabled. *Generally* speaking this improves performance as it # bypasses buffered I/O (system page cache). However there is a potential # chance that Direct IO may cause issues with database operations if your @@ -789,7 +787,7 @@ # possibly ZFS filesystem. RocksDB generally deals/corrects these issues # but it cannot account for all setups. If you experience any weird # RocksDB issues, try enabling this option as it turns off Direct IO and -# feel free to report in the conduwuit Matrix room if this option fixes +# feel free to report in the continuwuity Matrix room if this option fixes # your DB issues. # # For more information, see: @@ -844,7 +842,7 @@ # as they all differ. See their `kDefaultCompressionLevel`. # # Note when using the default value we may override it with a setting -# tailored specifically conduwuit. +# tailored specifically for continuwuity. # #rocksdb_compression_level = 32767 @@ -860,7 +858,7 @@ # algorithm. # # Note when using the default value we may override it with a setting -# tailored specifically conduwuit. +# tailored specifically for continuwuity. # #rocksdb_bottommost_compression_level = 32767 @@ -900,13 +898,13 @@ # 0 = AbsoluteConsistency # 1 = TolerateCorruptedTailRecords (default) # 2 = PointInTime (use me if trying to recover) -# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) +# 3 = SkipAnyCorruptedRecord (you now voided your Continuwuity warranty) # # For more information on these modes, see: # https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes # # For more details on recovering a corrupt database, see: -# https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption +# https://continuwuity.org/troubleshooting.html#database-corruption # #rocksdb_recovery_mode = 1 @@ -946,7 +944,7 @@ # - Disabling repair mode and restarting the server is recommended after # running the repair. # -# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. +# See https://continuwuity.org/troubleshooting.html#database-corruption for more details on recovering a corrupt database. # #rocksdb_repair = false @@ -970,10 +968,10 @@ # #rocksdb_compaction_ioprio_idle = true -# Disables RocksDB compaction. You should never ever have to set this -# option to true. If you for some reason find yourself needing to use this -# option as part of troubleshooting or a bug, please reach out to us in -# the conduwuit Matrix room with information and details. +# Enables RocksDB compaction. You should never ever have to set this +# option to false. If you for some reason find yourself needing to use +# this option as part of troubleshooting or a bug, please reach out to us +# in the continuwuity Matrix room with information and details. # # Disabling compaction will lead to a significantly bloated and # explosively large database, gradually poor performance, unnecessarily @@ -999,7 +997,7 @@ # purposes such as recovering/recreating your admin room, or inviting # yourself back. # -# See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. +# See https://continuwuity.org/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. # # Once this password is unset, all sessions will be logged out for # security purposes. @@ -1014,8 +1012,8 @@ # Allow local (your server only) presence updates/requests. # -# Note that presence on conduwuit is very fast unlike Synapse's. If using -# outgoing presence, this MUST be enabled. +# Note that presence on continuwuity is very fast unlike Synapse's. If +# using outgoing presence, this MUST be enabled. # #allow_local_presence = true @@ -1023,7 +1021,7 @@ # # This option receives presence updates from other servers, but does not # send any unless `allow_outgoing_presence` is true. Note that presence on -# conduwuit is very fast unlike Synapse's. +# continuwuity is very fast unlike Synapse's. # #allow_incoming_presence = true @@ -1031,8 +1029,8 @@ # # This option sends presence updates to other servers, but does not # receive any unless `allow_incoming_presence` is true. Note that presence -# on conduwuit is very fast unlike Synapse's. If using outgoing presence, -# you MUST enable `allow_local_presence` as well. +# on continuwuity is very fast unlike Synapse's. If using outgoing +# presence, you MUST enable `allow_local_presence` as well. # #allow_outgoing_presence = true @@ -1085,8 +1083,8 @@ # #typing_client_timeout_max_s = 45 -# Set this to true for conduwuit to compress HTTP response bodies using -# zstd. This option does nothing if conduwuit was not built with +# Set this to true for continuwuity to compress HTTP response bodies using +# zstd. This option does nothing if continuwuity was not built with # `zstd_compression` feature. Please be aware that enabling HTTP # compression may weaken TLS. Most users should not need to enable this. # See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1094,8 +1092,8 @@ # #zstd_compression = false -# Set this to true for conduwuit to compress HTTP response bodies using -# gzip. This option does nothing if conduwuit was not built with +# Set this to true for continuwuity to compress HTTP response bodies using +# gzip. This option does nothing if continuwuity was not built with # `gzip_compression` feature. Please be aware that enabling HTTP # compression may weaken TLS. Most users should not need to enable this. # See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before @@ -1106,8 +1104,8 @@ # #gzip_compression = false -# Set this to true for conduwuit to compress HTTP response bodies using -# brotli. This option does nothing if conduwuit was not built with +# Set this to true for continuwuity to compress HTTP response bodies using +# brotli. This option does nothing if continuwuity was not built with # `brotli_compression` feature. Please be aware that enabling HTTP # compression may weaken TLS. Most users should not need to enable this. # See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1169,7 +1167,7 @@ # Otherwise setting this to false reduces filesystem clutter and overhead # for managing these symlinks in the directory. This is now disabled by # default. You may still return to upstream Conduit but you have to run -# conduwuit at least once with this set to true and allow the +# continuwuity at least once with this set to true and allow the # media_startup_check to take place before shutting down to return to # Conduit. # @@ -1186,26 +1184,40 @@ # #prune_missing_media = false -# Vector list of regex patterns of server names that conduwuit will refuse -# to download remote media from. -# -# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] -# -#prevent_media_downloads_from = [] - # List of forbidden server names via regex patterns that we will block # incoming AND outgoing federation with, and block client room joins / # remote user invites. # +# Note that your messages can still make it to forbidden servers through +# backfilling. Events we receive from forbidden servers via backfill +# from servers we *do* federate with will be stored in the database. +# # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. # -# Basically "global" ACLs. +# You can set this to ["*"] to block all servers by default, and then +# use `allowed_remote_server_names` to allow only specific servers. +# +# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] +# +#forbidden_remote_server_names = [] + +# List of allowed server names via regex patterns that we will allow, +# regardless of if they match `forbidden_remote_server_names`. +# +# This option has no effect if `forbidden_remote_server_names` is empty. +# +# example: ["goodserver\\.tld$", "goodphrase"] +# +#allowed_remote_server_names = [] + +# Vector list of regex patterns of server names that continuwuity will +# refuse to download remote media from. # # example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # -#forbidden_remote_server_names = [] +#prevent_media_downloads_from = [] # List of forbidden server names via regex patterns that we will block all # outgoing federated room directory requests for. Useful for preventing @@ -1215,8 +1227,31 @@ # #forbidden_remote_room_directory_server_names = [] +# Vector list of regex patterns of server names that continuwuity will not +# send messages to the client from. +# +# Note that there is no way for clients to receive messages once a server +# has become unignored without doing a full sync. This is a protocol +# limitation with the current sync protocols. This means this is somewhat +# of a nuclear option. +# +# example: ["reallybadserver\.tld$", "reallybadphrase", +# "69dollarfortnitecards"] +# +#ignore_messages_from_server_names = [] + +# Send messages from users that the user has ignored to the client. +# +# There is no way for clients to receive messages sent while a user was +# ignored without doing a full sync. This is a protocol limitation with +# the current sync protocols. Disabling this option will move +# responsibility of ignoring messages to the client, which can avoid this +# limitation. +# +#send_messages_from_ignored_users_to_client = false + # Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you -# do not want conduwuit to send outbound requests to. Defaults to +# do not want continuwuity to send outbound requests to. Defaults to # RFC1918, unroutable, loopback, multicast, and testnet addresses for # security. # @@ -1366,26 +1401,26 @@ # Allow admins to enter commands in rooms other than "#admins" (admin # room) by prefixing your message with "\!admin" or "\\!admin" followed up -# a normal conduwuit admin command. The reply will be publicly visible to -# the room, originating from the sender. +# a normal continuwuity admin command. The reply will be publicly visible +# to the room, originating from the sender. # # example: \\!admin debug ping puppygock.gay # #admin_escape_commands = true -# Automatically activate the conduwuit admin room console / CLI on -# startup. This option can also be enabled with `--console` conduwuit +# Automatically activate the continuwuity admin room console / CLI on +# startup. This option can also be enabled with `--console` continuwuity # argument. # #admin_console_automatic = false # List of admin commands to execute on startup. # -# This option can also be configured with the `--execute` conduwuit +# This option can also be configured with the `--execute` continuwuity # argument and can take standard shell commands and environment variables # -# For example: `./conduwuit --execute "server admin-notice conduwuit has -# started up at $(date)"` +# For example: `./continuwuity --execute "server admin-notice continuwuity +# has started up at $(date)"` # # example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` # @@ -1393,7 +1428,7 @@ # Ignore errors in startup commands. # -# If false, conduwuit will error and fail to start if an admin execute +# If false, continuwuity will error and fail to start if an admin execute # command (`--execute` / `admin_execute`) fails. # #admin_execute_errors_ignore = false @@ -1414,15 +1449,14 @@ # The default room tag to apply on the admin room. # # On some clients like Element, the room tag "m.server_notice" is a -# special pinned room at the very bottom of your room list. The conduwuit -# admin room can be pinned here so you always have an easy-to-access -# shortcut dedicated to your admin room. +# special pinned room at the very bottom of your room list. The +# continuwuity admin room can be pinned here so you always have an +# easy-to-access shortcut dedicated to your admin room. # #admin_room_tag = "m.server_notice" # Sentry.io crash/panic reporting, performance monitoring/metrics, etc. -# This is NOT enabled by default. conduwuit's default Sentry reporting -# endpoint domain is `o4506996327251968.ingest.us.sentry.io`. +# This is NOT enabled by default. # #sentry = false @@ -1430,7 +1464,7 @@ # #sentry_endpoint = "" -# Report your conduwuit server_name in Sentry.io crash reports and +# Report your continuwuity server_name in Sentry.io crash reports and # metrics. # #sentry_send_server_name = false @@ -1467,7 +1501,7 @@ # Enable the tokio-console. This option is only relevant to developers. # # For more information, see: -# https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console +# https://continuwuity.org/development.html#debugging-with-tokio-console # #tokio_console = false @@ -1607,19 +1641,29 @@ # #server = -# This item is undocumented. Please contribute documentation for it. +# URL to a support page for the server, which will be served as part of +# the MSC1929 server support endpoint at /.well-known/matrix/support. +# Will be included alongside any contact information # #support_page = -# This item is undocumented. Please contribute documentation for it. +# Role string for server support contacts, to be served as part of the +# MSC1929 server support endpoint at /.well-known/matrix/support. # -#support_role = +#support_role = "m.role.admin" -# This item is undocumented. Please contribute documentation for it. +# Email address for server support contacts, to be served as part of the +# MSC1929 server support endpoint. +# This will be used along with support_mxid if specified. # #support_email = -# This item is undocumented. Please contribute documentation for it. +# Matrix ID for server support contacts, to be served as part of the +# MSC1929 server support endpoint. +# This will be used along with support_email if specified. +# +# If no email or mxid is specified, all of the server's admins will be +# listed. # #support_mxid = diff --git a/debian/README.md b/debian/README.md index 800a2e09..4a8e58d2 100644 --- a/debian/README.md +++ b/debian/README.md @@ -1,4 +1,4 @@ -# conduwuit for Debian +# Continuwuity for Debian Information about downloading and deploying the Debian package. This may also be referenced for other `apt`-based distros such as Ubuntu. @@ -22,7 +22,7 @@ options in `/etc/conduwuit/conduwuit.toml`. ### Running -The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop conduwuit. The binary is installed at `/usr/sbin/conduwuit`. +The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary is installed at `/usr/sbin/conduwuit`. This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate. diff --git a/debian/conduwuit.service b/debian/conduwuit.service index a079499e..be2f3dae 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -1,9 +1,10 @@ [Unit] -Description=conduwuit Matrix homeserver + +Description=Continuwuity - Matrix homeserver Wants=network-online.target After=network-online.target +Documentation=https://continuwuity.org/ Alias=matrix-conduwuit.service -Documentation=https://conduwuit.puppyirl.gay/ [Service] DynamicUser=yes @@ -11,7 +12,7 @@ User=conduwuit Group=conduwuit Type=notify -Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" +Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml" ExecStart=/usr/sbin/conduwuit diff --git a/docker/Dockerfile b/docker/Dockerfile index 10f54d94..e734fb81 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,13 +18,14 @@ ARG LLVM_VERSION=19 # Line three: for xx-verify RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ -apt-get update && apt-get install -y \ + apt-get update && apt-get install -y \ clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \ curl git \ file # Create symlinks for LLVM tools RUN <> /etc/environment # Configure pkg-config RUN <> /etc/environment echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment @@ -86,12 +85,14 @@ EOF # Configure cc to use clang version RUN <> /etc/environment echo "CXX=clang++" >> /etc/environment EOF # Cross-language LTO RUN <> /etc/environment echo "CXXFLAGS=-flto" >> /etc/environment # Linker is set to target-compatible clang by xx @@ -102,6 +103,7 @@ EOF ARG TARGET_CPU= RUN <> /etc/environment @@ -115,35 +117,37 @@ RUN mkdir /out FROM toolchain AS builder -# Conduwuit version info -ARG COMMIT_SHA= -ARG CONDUWUIT_VERSION_EXTRA= -ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA -RUN <> /etc/environment -fi -EOF - -ARG TARGETPLATFORM - -# Verify environment configuration -RUN cat /etc/environment -RUN xx-cargo --print-target-triple # Get source COPY . . -# Timelord sync -RUN --mount=type=cache,target=/timelord/ \ - timelord sync --source-dir . --cache-dir /timelord/ +ARG TARGETPLATFORM + +# Verify environment configuration +RUN xx-cargo --print-target-triple + +# Conduwuit version info +ARG GIT_COMMIT_HASH= +ARG GIT_COMMIT_HASH_SHORT= +ARG GIT_REMOTE_URL= +ARG GIT_REMOTE_COMMIT_URL= +ARG CONDUWUIT_VERSION_EXTRA= +ARG CONTINUWUITY_VERSION_EXTRA= +ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH +ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT +ENV GIT_REMOTE_URL=$GIT_REMOTE_URL +ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL +ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA +ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA + # Build the binary RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/local/cargo/git/db \ - --mount=type=cache,target=/app/target \ + --mount=type=cache,target=/app/target,id=cargo-target-${TARGETPLATFORM} \ bash <<'EOF' set -o allexport + set -o xtrace . /etc/environment TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \ jq -r ".target_directory")) @@ -164,6 +168,7 @@ EOF RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/local/cargo/git/db \ bash <<'EOF' + set -o xtrace mkdir /out/sbom typeset -A PACKAGES for BINARY in /out/sbin/*; do @@ -182,6 +187,7 @@ EOF # Extract dynamically linked dependencies RUN <.`)" # Change to the address on which conduwuit is hosted - - "traefik.http.routers.to-conduwuit.tls=true" - - "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt" - - "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker" - - "traefik.http.services.to_conduwuit.loadbalancer.server.port=6167" + - "traefik.http.routers.to-continuwuity.rule=Host(`.`)" # Change to the address on which Continuwuity is hosted + - "traefik.http.routers.to-continuwuity.tls=true" + - "traefik.http.routers.to-continuwuity.tls.certresolver=letsencrypt" + - "traefik.http.routers.to-continuwuity.middlewares=cors-headers@docker" + - "traefik.http.services.to_continuwuity.loadbalancer.server.port=6167" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" - # If you want to have your account on , but host conduwuit on a subdomain, + # If you want to have your account on , but host Continuwuity on a subdomain, # you can let it only handle the well known file on that domain instead #- "traefik.http.routers.to-matrix-wellknown.rule=Host(``) && PathPrefix(`/.well-known/matrix`)" #- "traefik.http.routers.to-matrix-wellknown.tls=true" diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 431cf2d4..3dfc9d85 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -1,6 +1,6 @@ services: caddy: - # This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit! + # This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity! # For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy image: lucaslorentz/caddy-docker-proxy:ci-alpine ports: @@ -20,27 +20,28 @@ services: caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}} homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - - db:/var/lib/conduwuit - #- ./conduwuit.toml:/etc/conduwuit.toml + - db:/var/lib/continuwuity + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. + #- ./continuwuity.toml:/etc/continuwuity.toml environment: - CONDUWUIT_SERVER_NAME: example.com # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above + CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS + CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity + CONTINUWUITY_PORT: 6167 + CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + CONTINUWUITY_ALLOW_REGISTRATION: 'true' + CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' + CONTINUWUITY_ALLOW_FEDERATION: 'true' + CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true' + CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]' + #CONTINUWUITY_LOG: warn,state_res=warn + CONTINUWUITY_ADDRESS: 0.0.0.0 + #CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above networks: - caddy labels: diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 89118c74..9acc4221 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -1,56 +1,57 @@ -# conduwuit - Behind Traefik Reverse Proxy +# Continuwuity - Behind Traefik Reverse Proxy services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image, + ### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - - db:/var/lib/conduwuit - #- ./conduwuit.toml:/etc/conduwuit.toml + - db:/var/lib/continuwuity + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. + #- ./continuwuity.toml:/etc/continuwuity.toml networks: - proxy environment: - CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - CONDUWUIT_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this - CONDUWUIT_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server - #CONDUWUIT_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read - CONDUWUIT_ADDRESS: 0.0.0.0 - CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - #CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above - ### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too + CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS + CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]' + CONTINUWUITY_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this + CONTINUWUITY_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server + #CONTINUWUITY_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read + CONTINUWUITY_ADDRESS: 0.0.0.0 + CONTINUWUITY_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it + CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity + #CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above + ### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUWUIT_LOG: info # default is: "warn,state_res=warn" - # CONDUWUIT_ALLOW_ENCRYPTION: 'true' - # CONDUWUIT_ALLOW_FEDERATION: 'true' - # CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - # CONDUWUIT_ALLOW_INCOMING_PRESENCE: true - # CONDUWUIT_ALLOW_OUTGOING_PRESENCE: true - # CONDUWUIT_ALLOW_LOCAL_PRESENCE: true - # CONDUWUIT_WORKERS: 10 - # CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - # CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧" + # CONTINUWUITY_LOG: info # default is: "warn,state_res=warn" + # CONTINUWUITY_ALLOW_ENCRYPTION: 'true' + # CONTINUWUITY_ALLOW_FEDERATION: 'true' + # CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true' + # CONTINUWUITY_ALLOW_INCOMING_PRESENCE: true + # CONTINUWUITY_ALLOW_OUTGOING_PRESENCE: true + # CONTINUWUITY_ALLOW_LOCAL_PRESENCE: true + # CONTINUWUITY_WORKERS: 10 + # CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + # CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧" - # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN - # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate + # We need some way to serve the client and server .well-known json. The simplest way is via the CONTINUWUITY_WELL_KNOWN + # variable / config option, there are multiple ways to do this, e.g. in the continuwuity.toml file, and in a separate # reverse proxy, but since you do not have a reverse proxy and following this guide, this example is included - CONDUWUIT_WELL_KNOWN: | + CONTINUWUITY_WELL_KNOWN: | { client=https://your.server.name.example, server=your.server.name.example:443 } #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it + ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it nofile: soft: 1048567 hard: 1048567 ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index ca33b5f5..fbb50e35 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -1,34 +1,34 @@ -# conduwuit +# Continuwuity services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped ports: - 8448:6167 volumes: - - db:/var/lib/conduwuit - #- ./conduwuit.toml:/etc/conduwuit.toml + - db:/var/lib/continuwuity + #- ./continuwuity.toml:/etc/continuwuity.toml environment: - CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above + CONTINUWUITY_SERVER_NAME: your.server.name # EDIT THIS + CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity + CONTINUWUITY_PORT: 6167 + CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + CONTINUWUITY_ALLOW_REGISTRATION: 'true' + CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' + CONTINUWUITY_ALLOW_FEDERATION: 'true' + CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true' + CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]' + #CONTINUWUITY_LOG: warn,state_res=warn + CONTINUWUITY_ADDRESS: 0.0.0.0 + #CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index bdbfb59c..051ed89b 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -1,31 +1,20 @@ -# conduwuit for Docker +# Continuwuity for Docker ## Docker -To run conduwuit with Docker you can either build the image yourself or pull it +To run Continuwuity with Docker you can either build the image yourself or pull it from a registry. ### Use a registry -OCI images for conduwuit are available in the registries listed below. +OCI images for Continuwuity are available in the registries listed below. -| Registry | Image | Size | Notes | -| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. | +| Registry | Image | Notes | +| --------------- | --------------------------------------------------------------- | -----------------------| +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. | +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. | -[dh]: https://hub.docker.com/r/girlbossceo/conduwuit -[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit -[gl]: https://gitlab.com/conduwuit/conduwuit/container_registry/6369729 -[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest -[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main - -OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a -commit hash/revision or a tagged release: +[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity Use @@ -41,22 +30,22 @@ When you have the image you can simply run it with ```bash docker run -d -p 8448:6167 \ - -v db:/var/lib/conduwuit/ \ - -e CONDUWUIT_SERVER_NAME="your.server.name" \ - -e CONDUWUIT_ALLOW_REGISTRATION=false \ - --name conduwuit $LINK + -v db:/var/lib/continuwuity/ \ + -e CONTINUWUITY_SERVER_NAME="your.server.name" \ + -e CONTINUWUITY_ALLOW_REGISTRATION=false \ + --name continuwuity $LINK ``` or you can use [docker compose](#docker-compose). The `-d` flag lets the container run in detached mode. You may supply an -optional `conduwuit.toml` config file, the example config can be found +optional `continuwuity.toml` config file, the example config can be found [here](../configuration/examples.md). You can pass in different env vars to -change config values on the fly. You can even configure conduwuit completely by +change config values on the fly. You can even configure Continuwuity completely by using env vars. For an overview of possible values, please take a look at the [`docker-compose.yml`](docker-compose.yml) file. -If you just want to test conduwuit for a short time, you can use the `--rm` +If you just want to test Continuwuity for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. @@ -91,32 +80,32 @@ docker network create caddy After that, you can rename it so it matches `docker-compose.yml` and spin up the containers! -Additional info about deploying conduwuit can be found [here](generic.md). +Additional info about deploying Continuwuity can be found [here](generic.md). ### Build -Official conduwuit images are built using Nix's -[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are -repeatable and reproducible by anyone, keeps the images lightweight, and can be -built offline. +Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently. -This also ensures portability of our images because `buildLayeredImage` builds -OCI images, not Docker images, and works with other container software. +The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd. -The OCI images are OS-less with only a very minimal environment of the `tini` -init system, CA certificates, and the conduwuit binary. This does mean there is -not a shell, but in theory you can get a shell by adding the necessary layers -to the layered image. However it's very unlikely you will need a shell for any -real troubleshooting. +The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition. -The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def]. +To build an image locally using Docker Buildx, you can typically run a command like: -To build an OCI image using Nix, the following outputs can be built: -- `nix build -L .#oci-image` (default features, x86_64 glibc) -- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl) -- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl) +```bash +# Build for the current platform and load into the local Docker daemon +docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile . + +# Example: Build for specific platforms and push to a registry. +# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push + +# Example: Build binary optimized for the current CPU +# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile . +``` + +Refer to the Docker Buildx documentation for more advanced build options. + +[dockerfile-path]: ../../docker/Dockerfile ### Run @@ -138,10 +127,10 @@ web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy -to deploy and use conduwuit, with a little caveat. If you already took a look at +to deploy and use Continuwuity, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and loadbalancer and is not able to -serve any kind of content, but for conduwuit to federate, we need to either +serve any kind of content, but for Continuwuity to federate, we need to either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. @@ -153,4 +142,3 @@ those two files. See the [TURN](../turn.md) page. [nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage -[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix diff --git a/docs/deploying/freebsd.md b/docs/deploying/freebsd.md index 65b40204..3764ffa8 100644 --- a/docs/deploying/freebsd.md +++ b/docs/deploying/freebsd.md @@ -1,5 +1,5 @@ -# conduwuit for FreeBSD +# Continuwuity for FreeBSD -conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB. +Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB. -Contributions for getting conduwuit packaged are welcome. +Contributions for getting Continuwuity packaged are welcome. diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index a07da560..9128f346 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -2,11 +2,11 @@ > ### Getting help > -> If you run into any problems while setting up conduwuit, ask us in -> `#conduwuit:puppygock.gay` or [open an issue on -> GitHub](https://github.com/girlbossceo/conduwuit/issues/new). +> If you run into any problems while setting up Continuwuity, ask us in +> `#continuwuity:continuwuity.org` or [open an issue on +> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). -## Installing conduwuit +## Installing Continuwuity ### Static prebuilt binary @@ -14,12 +14,10 @@ You may simply download the binary that fits your machine architecture (x86_64 or aarch64). Run `uname -m` to see what you need. Prebuilt fully static musl binaries can be downloaded from the latest tagged -release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or +release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or `main` CI branch workflow artifact output. These also include Debian/Ubuntu packages. -Binaries are also available on my website directly at: - These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit hash/revision, and `releases` are tagged releases. Sort by descending last modified for the latest. @@ -37,7 +35,7 @@ for performance. ### Compiling Alternatively, you may compile the binary yourself. We recommend using -Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most +Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most guaranteed reproducibiltiy and easiest to get a build environment and output going. This also allows easy cross-compilation. @@ -51,35 +49,35 @@ If wanting to build using standard Rust toolchains, make sure you install: - `liburing-dev` on the compiling machine, and `liburing` on the target host - LLVM and libclang for RocksDB -You can build conduwuit using `cargo build --release --all-features` +You can build Continuwuity using `cargo build --release --all-features` -## Adding a conduwuit user +## Adding a Continuwuity user -While conduwuit can run as any user it is better to use dedicated users for +While Continuwuity can run as any user it is better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In Debian, you can use this command to create a conduwuit user: +In Debian, you can use this command to create a Continuwuity user: ```bash -sudo adduser --system conduwuit --group --disabled-login --no-create-home +sudo adduser --system continuwuity --group --disabled-login --no-create-home ``` For distros without `adduser` (or where it's a symlink to `useradd`): ```bash -sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit +sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity ``` ## Forwarding ports in the firewall or the router Matrix's default federation port is port 8448, and clients must be using port 443. If you would like to use only port 443, or a different port, you will need to setup -delegation. conduwuit has config options for doing delegation, or you can configure +delegation. Continuwuity has config options for doing delegation, or you can configure your reverse proxy to manually serve the necessary JSON files to do delegation (see the `[global.well_known]` config section). -If conduwuit runs behind a router or in a container and has a different public +If Continuwuity runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. @@ -94,9 +92,9 @@ on the network level, consider something like NextDNS or Pi-Hole. ## Setting up a systemd service -Two example systemd units for conduwuit can be found +Two example systemd units for Continuwuity can be found [on the configuration page](../configuration/examples.md#debian-systemd-unit-file). -You may need to change the `ExecStart=` path to where you placed the conduwuit +You may need to change the `ExecStart=` path to where you placed the Continuwuity binary if it is not `/usr/bin/conduwuit`. On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros @@ -114,10 +112,10 @@ and entering the following: ReadWritePaths=/path/to/custom/database/path ``` -## Creating the conduwuit configuration file +## Creating the Continuwuity configuration file -Now we need to create the conduwuit's config file in -`/etc/conduwuit/conduwuit.toml`. The example config can be found at +Now we need to create the Continuwuity's config file in +`/etc/continuwuity/continuwuity.toml`. The example config can be found at [conduwuit-example.toml](../configuration/examples.md). **Please take a moment to read the config. You need to change at least the @@ -127,7 +125,7 @@ RocksDB is the only supported database backend. ## Setting the correct file permissions -If you are using a dedicated user for conduwuit, you will need to allow it to +If you are using a dedicated user for Continuwuity, you will need to allow it to read the config. To do that you can run this: ```bash @@ -139,7 +137,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/conduwuit/ -sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/ +sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/ sudo chmod 700 /var/lib/conduwuit/ ``` @@ -174,13 +172,13 @@ As we would prefer our users to use Caddy, we will not provide configuration fil You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs -- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and +- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and `/server_version` You can optionally reverse proxy the following individual routes: - `/.well-known/matrix/client` and `/.well-known/matrix/server` if using -conduwuit to perform delegation (see the `[global.well_known]` config section) -- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin +Continuwuity to perform delegation (see the `[global.well_known]` config section) +- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin contact and support page (formerly known as MSC1929) - `/` if you would like to see `hewwo from conduwuit woof!` at the root @@ -200,7 +198,7 @@ header, making federation non-functional. If a workaround is found, feel free to If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). -If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: +If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so: - `proxy_pass http://127.0.0.1:6167$request_uri;` - `proxy_pass http://127.0.0.1:6167;` @@ -209,7 +207,7 @@ Nginx users need to increase `client_max_body_size` (default is 1M) to match ## You're done -Now you can start conduwuit with: +Now you can start Continuwuity with: ```bash sudo systemctl start conduwuit diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md index d7721722..0cbfbbc0 100644 --- a/docs/deploying/kubernetes.md +++ b/docs/deploying/kubernetes.md @@ -1,8 +1,9 @@ -# conduwuit for Kubernetes +# Continuwuity for Kubernetes -conduwuit doesn't support horizontal scalability or distributed loading +Continuwuity doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run conduwuit on Kubernetes: -Should changes need to be made, please reach out to the maintainer in our -Matrix room as this is not maintained/controlled by the conduwuit maintainers. +This should be compatible with continuwuity, but you will need to change the image reference. + +Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers. diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index 3c5b0e69..cf2c09e4 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -1,66 +1,33 @@ -# conduwuit for NixOS +# Continuwuity for NixOS -conduwuit can be acquired by Nix (or [Lix][lix]) from various places: +Continuwuity can be acquired by Nix (or [Lix][lix]) from various places: * The `flake.nix` at the root of the repo * The `default.nix` at the root of the repo -* From conduwuit's binary cache - -A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit) - -### Binary cache - -A binary cache for conduwuit that the CI/CD publishes to is available at the -following places (both are the same just different names): - -``` -https://attic.kennel.juneis.dog/conduit -conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= - -https://attic.kennel.juneis.dog/conduwuit -conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= -``` - -The binary caches were recreated some months ago due to attic issues. The old public -keys were: - -``` -conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= -conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= -``` - -If needed, we have a binary cache on Cachix but it is only limited to 5GB: - -``` -https://conduwuit.cachix.org -conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= -``` - -If specifying a Git remote URL in your flake, you can use any remotes that -are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit` +* From Continuwuity's binary cache ### NixOS module The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure -conduwuit. +Continuwuity. ### Conduit NixOS Config Module and SQLite Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend. -Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB. +Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB. Make sure that you are using the RocksDB backend before migrating! There is a [tool to migrate a Conduit SQLite database to RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/). -If you want to run the latest code, you should get conduwuit from the `flake.nix` +If you want to run the latest code, you should get Continuwuity from the `flake.nix` or `default.nix` and set [`services.matrix-conduit.package`][package] -appropriately to use conduwuit instead of Conduit. +appropriately to use Continuwuity instead of Conduit. ### UNIX sockets -Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module +Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX socket option does not exist in Conduit, and the module forcibly sets the `address` and `port` config options. @@ -84,13 +51,13 @@ disallows the namespace from accessing or creating UNIX sockets and has to be en systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ]; ``` -Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and +Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and published by the community, would be appreciated. ### jemalloc and hardened profile -conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] -due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or +Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] +due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or disable jemalloc like so: ```nix diff --git a/docs/development.md b/docs/development.md index fa7519c0..1e344f41 100644 --- a/docs/development.md +++ b/docs/development.md @@ -4,9 +4,9 @@ Information about developing the project. If you are only interested in using it, you can safely ignore this page. If you plan on contributing, see the [contributor's guide](./contributing.md). -## conduwuit project layout +## Continuwuity project layout -conduwuit uses a collection of sub-crates, packages, or workspace members +Continuwuity uses a collection of sub-crates, packages, or workspace members that indicate what each general area of code is for. All of the workspace members are under `src/`. The workspace definition is at the top level / root `Cargo.toml`. @@ -14,11 +14,11 @@ members are under `src/`. The workspace definition is at the top level / root The crate names are generally self-explanatory: - `admin` is the admin room - `api` is the HTTP API, Matrix C-S and S-S endpoints, etc -- `core` is core conduwuit functionality like config loading, error definitions, +- `core` is core Continuwuity functionality like config loading, error definitions, global utilities, logging infrastructure, etc - `database` is RocksDB methods, helpers, RocksDB config, and general database definitions, utilities, or functions -- `macros` are conduwuit Rust [macros][macros] like general helper macros, logging +- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging and error handling macros, and [syn][syn] and [procedural macros][proc-macro] used for admin room commands and others - `main` is the "primary" sub-crate. This is where the `main()` function lives, @@ -35,7 +35,7 @@ if you truly find yourself needing to, we recommend reaching out to us in the Matrix room for discussions about it beforehand. The primary inspiration for this design was apart of hot reloadable development, -to support "conduwuit as a library" where specific parts can simply be swapped out. +to support "Continuwuity as a library" where specific parts can simply be swapped out. There is evidence Conduit wanted to go this route too as `axum` is technically an optional feature in Conduit, and can be compiled without the binary or axum library for handling inbound web requests; but it was never completed or worked. @@ -68,10 +68,10 @@ do this if Rust supported workspace-level features to begin with. ## List of forked dependencies -During conduwuit development, we have had to fork +During Continuwuity development, we have had to fork some dependencies to support our use-cases in some areas. This ranges from things said upstream project won't accept for any reason, faster-paced -development (unresponsive or slow upstream), conduwuit-specific usecases, or +development (unresponsive or slow upstream), Continuwuity-specific usecases, or lack of time to upstream some things. - [ruma/ruma][1]: - various performance @@ -84,7 +84,7 @@ builds seem to be broken on upstream, fixes some broken/suspicious code in places, additional safety measures, and support redzones for Valgrind - [zyansheep/rustyline-async][4]: - tab completion callback and -`CTRL+\` signal quit event for conduwuit console CLI +`CTRL+\` signal quit event for Continuwuity console CLI - [rust-rocksdb/rust-rocksdb][5]: - [`@zaidoon1`][8]'s fork has quicker updates, more up to date dependencies, etc. Our fork fixes musl build @@ -97,7 +97,7 @@ alongside other logging/metrics things ## Debugging with `tokio-console` [`tokio-console`][7] can be a useful tool for debugging and profiling. To make a -`tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature, +`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature, disable the default `release_max_log_level` feature, and set the `--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might look like this: @@ -109,7 +109,7 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \ --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console ``` -You will also need to enable the `tokio_console` config option in conduwuit when +You will also need to enable the `tokio_console` config option in Continuwuity when starting it. This was due to tokio-console causing gradual memory leak/usage if left enabled. diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index 65fd4adf..194ea3bc 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -5,7 +5,7 @@ guaranteed to work at this time. ### Summary -When developing in debug-builds with the nightly toolchain, conduwuit is modular +When developing in debug-builds with the nightly toolchain, Continuwuity is modular using dynamic libraries and various parts of the application are hot-reloadable while the server is running: http api handlers, admin commands, services, database, etc. These are all split up into individual workspace crates as seen @@ -42,7 +42,7 @@ library, macOS, and likely other host architectures are not supported (if other architectures work, feel free to let us know and/or make a PR updating this). This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you happen to have linker issues it's recommended to try using `mold` or `gold` -linkers, and please let us know in the [conduwuit Matrix room][7] the linker +linkers, and please let us know in the [Continuwuity Matrix room][7] the linker error and what linker solved this issue so we can figure out a solution. Ideally there should be minimal friction to using this, and in the future a build script (`build.rs`) may be suitable to making this easier to use if the capabilities @@ -52,13 +52,13 @@ allow us. As of 19 May 2024, the instructions for using this are: -0. Have patience. Don't hesitate to join the [conduwuit Matrix room][7] to +0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to receive help using this. As indicated by the various rustflags used and some of the interesting issues linked at the bottom, this is definitely not something the Rust ecosystem or toolchain is used to doing. 1. Install the nightly toolchain using rustup. You may need to use `rustup - override set nightly` in your local conduwuit directory, or use `cargo + override set nightly` in your local Continuwuity directory, or use `cargo +nightly` for all actions. 2. Uncomment `cargo-features` at the top level / root Cargo.toml @@ -85,14 +85,14 @@ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown Cargo should only rebuild what was changed / what's necessary, so it should not be rebuilding all the crates. -9. In your conduwuit server terminal, hit/send `CTRL+C` signal. This will tell - conduwuit to find which libraries need to be reloaded, and reloads them as +9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell + Continuwuity to find which libraries need to be reloaded, and reloads them as necessary. 10. If there were no errors, it will tell you it successfully reloaded `#` modules, and your changes should now be visible. Repeat 7 - 9 as needed. -To shutdown conduwuit in this setup, hit/send `CTRL+\`. Normal builds still +To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still shutdown with `CTRL+C` as usual. Steps 1 - 5 are the initial first-time steps for using this. To remove the hot @@ -101,7 +101,7 @@ reload setup, revert/comment all the Cargo.toml changes. As mentioned in the requirements section, if you happen to have some linker issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the `rustflags` definitions in the top level Cargo.toml, and please let us know in -the [conduwuit Matrix room][7] the problem. mold can be installed typically +the [Continuwuity Matrix room][7] the problem. mold can be installed typically through your distro, and gold is provided by the binutils package. It's possible a helper script can be made to do all of this, or most preferably @@ -136,7 +136,7 @@ acyclic graph. The primary rule is simple and illustrated in the figure below: **no crate is allowed to call a function or use a variable from a crate below it.** -![conduwuit's dynamic library setup diagram - created by Jason +![Continuwuity's dynamic library setup diagram - created by Jason Volk](assets/libraries.png) When a symbol is referenced between crates they become bound: **crates cannot be @@ -147,7 +147,7 @@ by using an `RTLD_LOCAL` binding for just one link between the main executable and the first crate, freeing the executable from all modules as no global binding ever occurs between them. -![conduwuit's reload and load order diagram - created by Jason +![Continuwuity's reload and load order diagram - created by Jason Volk](assets/reload_order.png) Proper resource management is essential for reliable reloading to occur. This is @@ -190,11 +190,11 @@ The initial implementation PR is available [here][1]. - [Workspace-level metadata (cargo-deb)](https://github.com/kornelski/cargo-deb/issues/68) -[1]: https://github.com/girlbossceo/conduwuit/pull/387 +[1]: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/387 [2]: https://wiki.musl-libc.org/functional-differences-from-glibc.html#Unloading-libraries [3]: https://github.com/rust-lang/rust/issues/28794 [4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049 [5]: https://github.com/rust-lang/cargo/issues/12746 [6]: https://crates.io/crates/hot-lib-reloader/ -[7]: https://matrix.to/#/#conduwuit:puppygock.gay +[7]: https://matrix.to/#/#continuwuity:continuwuity.org [8]: https://crates.io/crates/libloading diff --git a/docs/development/testing.md b/docs/development/testing.md index a577698a..d28bb874 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -24,8 +24,9 @@ and run the script. If you're on macOS and need to build an image, run `nix build .#linux-complement`. We have a Complement fork as some tests have needed to be fixed. This can be found -at: +at: -[ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo +[ci-workflows]: +https://forgejo.ellis.link/continuwuation/continuwuity/actions/?workflow=ci.yml&actor=0&status=1 [complement]: https://github.com/matrix-org/complement [direnv]: https://direnv.net/docs/hook.html diff --git a/docs/introduction.md b/docs/introduction.md index 9d3a294a..d193f7c7 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -1,4 +1,4 @@ -# conduwuit +# Continuwuity {{#include ../README.md:catchphrase}} @@ -8,7 +8,7 @@ - [Deployment options](deploying.md) -If you want to connect an appservice to conduwuit, take a look at the +If you want to connect an appservice to Continuwuity, take a look at the [appservices documentation](appservices.md). #### How can I contribute? diff --git a/docs/maintenance.md b/docs/maintenance.md index 5c8c853a..16ec5a4e 100644 --- a/docs/maintenance.md +++ b/docs/maintenance.md @@ -1,14 +1,14 @@ -# Maintaining your conduwuit setup +# Maintaining your Continuwuity setup ## Moderation -conduwuit has moderation through admin room commands. "binary commands" (medium +Continuwuity has moderation through admin room commands. "binary commands" (medium priority) and an admin API (low priority) is planned. Some moderation-related config options are available in the example config such as "global ACLs" and blocking media requests to certain servers. See the example config for the moderation config options under the "Moderation / Privacy / Security" section. -conduwuit has moderation admin commands for: +Continuwuity has moderation admin commands for: - managing room aliases (`!admin rooms alias`) - managing room directory (`!admin rooms directory`) @@ -36,7 +36,7 @@ each object being newline delimited. An example of doing this is: ## Database (RocksDB) Generally there is very little you need to do. [Compaction][rocksdb-compaction] -is ran automatically based on various defined thresholds tuned for conduwuit to +is ran automatically based on various defined thresholds tuned for Continuwuity to be high performance with the least I/O amplifcation or overhead. Manually running compaction is not recommended, or compaction via a timer, due to creating unnecessary I/O amplification. RocksDB is built with io_uring support @@ -50,7 +50,7 @@ Some RocksDB settings can be adjusted such as the compression method chosen. See the RocksDB section in the [example config](configuration/examples.md). btrfs users have reported that database compression does not need to be disabled -on conduwuit as the filesystem already does not attempt to compress. This can be +on Continuwuity as the filesystem already does not attempt to compress. This can be validated by using `filefrag -v` on a `.SST` file in your database, and ensure the `physical_offset` matches (no filesystem compression). It is very important to ensure no additional filesystem compression takes place as this can render @@ -70,8 +70,8 @@ they're server logs or database logs, however they are critical RocksDB files related to WAL tracking. The only safe files that can be deleted are the `LOG` files (all caps). These -are the real RocksDB telemetry/log files, however conduwuit has already -configured to only store up to 3 RocksDB `LOG` files due to generall being +are the real RocksDB telemetry/log files, however Continuwuity has already +configured to only store up to 3 RocksDB `LOG` files due to generally being useless for average users unless troubleshooting something low-level. If you would like to store nearly none at all, see the `rocksdb_max_log_files` config option. @@ -88,7 +88,7 @@ still be joined together. To restore a backup from an online RocksDB backup: -- shutdown conduwuit +- shutdown Continuwuity - create a new directory for merging together the data - in the online backup created, copy all `.sst` files in `$DATABASE_BACKUP_PATH/shared_checksum` to your new directory @@ -99,9 +99,9 @@ To restore a backup from an online RocksDB backup: if you have multiple) to your new directory - set your `database_path` config option to your new directory, or replace your old one with the new one you crafted -- start up conduwuit again and it should open as normal +- start up Continuwuity again and it should open as normal -If you'd like to do an offline backup, shutdown conduwuit and copy your +If you'd like to do an offline backup, shutdown Continuwuity and copy your `database_path` directory elsewhere. This can be restored with no modifications needed. @@ -110,7 +110,7 @@ directory. ## Media -Media still needs various work, however conduwuit implements media deletion via: +Media still needs various work, however Continuwuity implements media deletion via: - MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the event) @@ -118,17 +118,17 @@ event) - Delete remote media in the past `N` seconds/minutes via filesystem metadata on the file created time (`btime`) or file modified time (`mtime`) -See the `!admin media` command for further information. All media in conduwuit +See the `!admin media` command for further information. All media in Continuwuity is stored at `$DATABASE_DIR/media`. This will be configurable soon. If you are finding yourself needing extensive granular control over media, we recommend looking into [Matrix Media -Repo](https://github.com/t2bot/matrix-media-repo). conduwuit intends to +Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to implement various utilities for media, but MMR is dedicated to extensive media management. Built-in S3 support is also planned, but for now using a "S3 filesystem" on -`media/` works. conduwuit also sends a `Cache-Control` header of 1 year and +`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and immutable for all media requests (download and thumbnail) to reduce unnecessary media requests from browsers, reduce bandwidth usage, and reduce load. diff --git a/docs/security.md b/docs/security.md new file mode 100644 index 00000000..b4474cf5 --- /dev/null +++ b/docs/security.md @@ -0,0 +1 @@ +{{#include ../SECURITY.md}} diff --git a/docs/static/_headers b/docs/static/_headers index 5e960241..6e52de9f 100644 --- a/docs/static/_headers +++ b/docs/static/_headers @@ -1,3 +1,6 @@ /.well-known/matrix/* Access-Control-Allow-Origin: * Content-Type: application/json +/.well-known/continuwuity/* + Access-Control-Allow-Origin: * + Content-Type: application/json \ No newline at end of file diff --git a/docs/static/announcements.json b/docs/static/announcements.json new file mode 100644 index 00000000..9b97d091 --- /dev/null +++ b/docs/static/announcements.json @@ -0,0 +1,9 @@ +{ + "$schema": "https://continuwuity.org/schema/announcements.schema.json", + "announcements": [ + { + "id": 1, + "message": "Welcome to Continuwuity! Important announcements about the project will appear here." + } + ] +} \ No newline at end of file diff --git a/docs/static/announcements.schema.json b/docs/static/announcements.schema.json new file mode 100644 index 00000000..95b1d153 --- /dev/null +++ b/docs/static/announcements.schema.json @@ -0,0 +1,31 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://continwuity.org/schema/announcements.schema.json", + "type": "object", + "properties": { + "updates": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "integer" + }, + "message": { + "type": "string" + }, + "date": { + "type": "string" + } + }, + "required": [ + "id", + "message" + ] + } + } + }, + "required": [ + "updates" + ] + } \ No newline at end of file diff --git a/docs/static/support b/docs/static/support new file mode 100644 index 00000000..6b7a9860 --- /dev/null +++ b/docs/static/support @@ -0,0 +1,24 @@ +{ + "contacts": [ + { + "email_address": "security@continuwuity.org", + "role": "m.role.security" + }, + { + "matrix_id": "@tom:continuwuity.org", + "email_address": "tom@tcpip.uk", + "role": "m.role.admin" + }, + { + "matrix_id": "@jade:continuwuity.org", + "email_address": "jade@continuwuity.org", + "role": "m.role.admin" + }, + { + "matrix_id": "@nex:continuwuity.org", + "email_address": "nex@continuwuity.org", + "role": "m.role.admin" + } + ], + "support_page": "https://continuwuity.org/introduction#contact" +} \ No newline at end of file diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d25c9762..d84dbc7a 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,47 +1,48 @@ -# Troubleshooting conduwuit +# Troubleshooting Continuwuity -> ## Docker users ⚠️ +> **Docker users ⚠️** > -> Docker is extremely UX unfriendly. Because of this, a ton of issues or support -> is actually Docker support, not conduwuit support. We also cannot document the -> ever-growing list of Docker issues here. -> -> If you intend on asking for support and you are using Docker, **PLEASE** -> triple validate your issues are **NOT** because you have a misconfiguration in -> your Docker setup. -> -> If there are things like Compose file issues or Dockerhub image issues, those -> can still be mentioned as long as they're something we can fix. +> Docker can be difficult to use and debug. It's common for Docker +> misconfigurations to cause issues, particularly with networking and permissions. +> Please check that your issues are not due to problems with your Docker setup. -## conduwuit and Matrix issues +## Continuwuity and Matrix issues -#### Lost access to admin room +### Lost access to admin room You can reinvite yourself to the admin room through the following methods: -- Use the `--execute "users make_user_admin "` conduwuit binary + +- Use the `--execute "users make_user_admin "` Continuwuity binary argument once to invite yourslf to the admin room on startup -- Use the conduwuit console/CLI to run the `users make_user_admin` command +- Use the Continuwuity console/CLI to run the `users make_user_admin` command - Or specify the `emergency_password` config option to allow you to temporarily log into the server account (`@conduit`) from a web client ## General potential issues -#### Potential DNS issues when using Docker +### Potential DNS issues when using Docker -Docker has issues with its default DNS setup that may cause DNS to not be -properly functional when running conduwuit, resulting in federation issues. The -symptoms of this have shown in excessively long room joins (30+ minutes) from -very long DNS timeouts, log entries of "mismatching responding nameservers", +Docker's DNS setup for containers in a non-default network intercepts queries to +enable resolving of container hostnames to IP addresses. However, due to +performance issues with Docker's built-in resolver, this can cause DNS queries +to take a long time to resolve, resulting in federation issues. + +This is particularly common with Docker Compose, as custom networks are easily +created and configured. + +Symptoms of this include excessively long room joins (30+ minutes) from very +long DNS timeouts, log entries of "mismatching responding nameservers", and/or partial or non-functional inbound/outbound federation. -This is **not** a conduwuit issue, and is purely a Docker issue. It is not -sustainable for heavy DNS activity which is normal for Matrix federation. The -workarounds for this are: -- Use DNS over TCP via the config option `query_over_tcp_only = true` -- Don't use Docker's default DNS setup and instead allow the container to use -and communicate with your host's DNS servers (host's `/etc/resolv.conf`) +This is not a bug in continuwuity. Docker's default DNS resolver is not suitable +for heavy DNS activity, which is normal for federated protocols like Matrix. -#### DNS No connections available error message +Workarounds: + +- Use DNS over TCP via the config option `query_over_tcp_only = true` +- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`. + +### DNS No connections available error message If you receive spurious amounts of error logs saying "DNS No connections available", this is due to your DNS server (servers from `/etc/resolv.conf`) @@ -64,7 +65,7 @@ very computationally expensive, and is extremely susceptible to denial of service, especially on Matrix. Many servers also strangely have broken DNSSEC setups and will result in non-functional federation. -conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but +Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch] may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors config options and removing the `validator` module. @@ -75,9 +76,9 @@ high load, and we have identified its DNS caching to not be very effective. dnsmasq can possibly work, but it does **not** support TCP fallback which can be problematic when receiving large DNS responses such as from large SRV records. If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback` -in conduwuit config. +in Continuwuity config. -Raising `dns_cache_entries` in conduwuit config from the default can also assist +Raising `dns_cache_entries` in Continuwuity config from the default can also assist in DNS caching, but a full-fledged external caching resolver is better and more reliable. @@ -91,13 +92,13 @@ reliability at a slight performance cost due to TCP overhead. ## RocksDB / database issues -#### Database corruption +### Database corruption If your database is corrupted *and* is failing to start (e.g. checksum mismatch), it may be recoverable but careful steps must be taken, and there is no guarantee it may be recoverable. -The first thing that can be done is launching conduwuit with the +The first thing that can be done is launching Continuwuity with the `rocksdb_repair` config option set to true. This will tell RocksDB to attempt to repair itself at launch. If this does not work, disable the option and continue reading. @@ -109,7 +110,7 @@ RocksDB has the following recovery modes: - `PointInTime` - `SkipAnyCorruptedRecord` -By default, conduwuit uses `TolerateCorruptedTailRecords` as generally these may +By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may be due to bad federation and we can re-fetch the correct data over federation. The RocksDB default is `PointInTime` which will attempt to restore a "snapshot" of the data when it was last known to be good. This data can be either a few @@ -126,12 +127,12 @@ if `PointInTime` does not work as a last ditch effort. With this in mind: -- First start conduwuit with the `PointInTime` recovery method. See the [example +- First start Continuwuity with the `PointInTime` recovery method. See the [example config](configuration/examples.md) for how to do this using `rocksdb_recovery_mode` - If your database successfully opens, clients are recommended to clear their client cache to account for the rollback -- Leave your conduwuit running in `PointInTime` for at least 30-60 minutes so as +- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as much possible corruption is restored - If all goes will, you should be able to restore back to using `TolerateCorruptedTailRecords` and you have successfully recovered your database @@ -142,16 +143,16 @@ Note that users should not really be debugging things. If you find yourself debugging and find the issue, please let us know and/or how we can fix it. Various debug commands can be found in `!admin debug`. -#### Debug/Trace log level +### Debug/Trace log level -conduwuit builds without debug or trace log levels at compile time by default +Continuwuity builds without debug or trace log levels at compile time by default for substantial performance gains in CPU usage and improved compile times. If you need to access debug/trace log levels, you will need to build without the `release_max_log_level` feature or use our provided static debug binaries. -#### Changing log level dynamically +### Changing log level dynamically -conduwuit supports changing the tracing log environment filter on-the-fly using +Continuwuity supports changing the tracing log environment filter on-the-fly using the admin command `!admin debug change-log-level `. This accepts a string **without quotes** the same format as the `log` config option. @@ -166,9 +167,9 @@ load, simply pass the `--reset` flag. `!admin debug change-log-level --reset` -#### Pinging servers +### Pinging servers -conduwuit can ping other servers using `!admin debug ping `. This takes +Continuwuity can ping other servers using `!admin debug ping `. This takes a server name and goes through the server discovery process and queries `/_matrix/federation/v1/version`. Errors are outputted. @@ -177,15 +178,15 @@ server performance on either side as that endpoint is completely unauthenticated and simply fetches a string on a static JSON endpoint. It is very low cost both bandwidth and computationally. -#### Allocator memory stats +### Allocator memory stats When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you -can see conduwuit's high-level allocator stats by using +can see Continuwuity's high-level allocator stats by using `!admin server memory-usage` at the bottom. If you are a developer, you can also view the raw jemalloc statistics with `!admin debug memory-stats`. Please note that this output is extremely large -which may only be visible in the conduwuit console CLI due to PDU size limits, +which may only be visible in the Continuwuity console CLI due to PDU size limits, and is not easy for non-developers to understand. [unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html diff --git a/docs/turn.md b/docs/turn.md index 287f2545..5dba823c 100644 --- a/docs/turn.md +++ b/docs/turn.md @@ -1,6 +1,6 @@ # Setting up TURN/STURN -In order to make or receive calls, a TURN server is required. conduwuit suggests +In order to make or receive calls, a TURN server is required. Continuwuity suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. @@ -17,9 +17,9 @@ realm= A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. -These same values need to be set in conduwuit. See the [example +These same values need to be set in Continuwuity. See the [example config](configuration/examples.md) in the TURN section for configuring these and -restart conduwuit after. +restart Continuwuity after. `turn_secret` or a path to `turn_secret_file` must have a value of your coturn `static-auth-secret`, or use `turn_username` and `turn_password` @@ -34,7 +34,7 @@ If you are using TURN over TLS, you can replace `turn:` with `turns:` in the TURN over TLS. This is highly recommended. If you need unauthenticated access to the TURN URIs, or some clients may be -having trouble, you can enable `turn_guest_access` in conduwuit which disables +having trouble, you can enable `turn_guest_access` in Continuwuity which disables authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer` ### Run diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index 9b010e14..1295cb03 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -75,9 +75,9 @@ dockerTools.buildImage { else []; Env = [ - "CONDUWUIT_TLS__KEY=${./private_key.key}" - "CONDUWUIT_TLS__CERTS=${./certificate.crt}" - "CONDUWUIT_CONFIG=${./config.toml}" + "CONTINUWUITY_TLS__KEY=${./private_key.key}" + "CONTINUWUITY_TLS__CERTS=${./certificate.crt}" + "CONTINUWUITY_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 9c8038a7..f2fffec0 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -130,7 +130,8 @@ buildDepsOnlyEnv = }); buildPackageEnv = { - CONDUWUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; + GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or ""; + GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or ""; } // buildDepsOnlyEnv // { # Only needed in static stdenv because these are transitive dependencies of rocksdb CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 1650053d..953407ef 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -33,13 +33,13 @@ dockerTools.buildLayeredImage { "; "org.opencontainers.image.created" ="@${toString inputs.self.lastModified}"; "org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust"; - "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; + "org.opencontainers.image.documentation" = "https://continuwuity.org/"; "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; - "org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit"; + "org.opencontainers.image.source" = "https://forgejo.ellis.link/continuwuation/continuwuity"; "org.opencontainers.image.title" = main.pname; - "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; - "org.opencontainers.image.vendor" = "girlbossceo"; + "org.opencontainers.image.url" = "https://continuwuity.org/"; + "org.opencontainers.image.vendor" = "continuwuation"; "org.opencontainers.image.version" = main.version; }; }; diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml index ca865969..7896ef97 100644 --- a/src/admin/Cargo.toml +++ b/src/admin/Cargo.toml @@ -17,12 +17,61 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", +] +gzip_compression = [ + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", +] +io_uring = [ + "conduwuit-api/io_uring", + "conduwuit-database/io_uring", + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", + "conduwuit-service/zstd_compression", +] [dependencies] clap.workspace = true diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 9e010a59..0d636c72 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -2,7 +2,7 @@ use clap::Parser; use conduwuit::Result; use crate::{ - appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, + appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context, debug, debug::DebugCommand, federation, federation::FederationCommand, media, media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server, server::ServerCommand, user, user::UserCommand, @@ -49,20 +49,18 @@ pub(super) enum AdminCommand { } #[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result { use AdminCommand::*; match command { - | Appservices(command) => appservice::process(command, context).await?, - | Media(command) => media::process(command, context).await?, - | Users(command) => user::process(command, context).await?, - | Rooms(command) => room::process(command, context).await?, - | Federation(command) => federation::process(command, context).await?, - | Server(command) => server::process(command, context).await?, - | Debug(command) => debug::process(command, context).await?, - | Query(command) => query::process(command, context).await?, - | Check(command) => check::process(command, context).await?, + | Appservices(command) => appservice::process(command, context).await, + | Media(command) => media::process(command, context).await, + | Users(command) => user::process(command, context).await, + | Rooms(command) => room::process(command, context).await, + | Federation(command) => federation::process(command, context).await, + | Server(command) => server::process(command, context).await, + | Debug(command) => debug::process(command, context).await, + | Query(command) => query::process(command, context).await, + | Check(command) => check::process(command, context).await, } - - Ok(()) } diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 88f28431..3575e067 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -1,84 +1,80 @@ -use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; +use conduwuit::{Err, Result, checked}; +use futures::{FutureExt, StreamExt, TryFutureExt}; -use crate::{Result, admin_command}; +use crate::admin_command; #[admin_command] -pub(super) async fn register(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" +pub(super) async fn register(&self) -> Result { + let body = &self.body; + let body_len = self.body.len(); + if body_len < 2 + || !body[0].trim().starts_with("```") + || body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } - let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config_body); + let range = 1..checked!(body_len - 1)?; + let appservice_config_body = body[range].join("\n"); + let parsed_config = serde_yaml::from_str(&appservice_config_body); match parsed_config { + | Err(e) => return Err!("Could not parse appservice config as YAML: {e}"), | Ok(registration) => match self .services .appservice .register_appservice(®istration, &appservice_config_body) .await + .map(|()| registration.id) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {}", - registration.id - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {e}" - ))), + | Err(e) => return Err!("Failed to register appservice: {e}"), + | Ok(id) => write!(self, "Appservice registered with ID: {id}"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config as YAML: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn unregister( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn unregister(&self, appservice_identifier: String) -> Result { match self .services .appservice .unregister_appservice(&appservice_identifier) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {e}" - ))), + | Err(e) => return Err!("Failed to unregister appservice: {e}"), + | Ok(()) => write!(self, "Appservice unregistered."), } + .await } #[admin_command] -pub(super) async fn show_appservice_config( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result { match self .services .appservice .get_registration(&appservice_identifier) .await { + | None => return Err!("Appservice does not exist."), | Some(config) => { - let config_str = serde_yaml::to_string(&config) - .expect("config should've been validated on register"); - let output = - format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",); - Ok(RoomMessageEventContent::notice_markdown(output)) + let config_str = serde_yaml::to_string(&config)?; + write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```") }, - | None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), } + .await } #[admin_command] -pub(super) async fn list_registered(&self) -> Result { - let appservices = self.services.appservice.iter_ids().await; - let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", ")); - Ok(RoomMessageEventContent::text_plain(output)) +pub(super) async fn list_registered(&self) -> Result { + self.services + .appservice + .iter_ids() + .collect() + .map(Ok) + .and_then(|appservices: Vec<_>| { + let len = appservices.len(); + let list = appservices.join(", "); + write!(self, "Appservices ({len}): {list}") + }) + .await } diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs index 7e27362f..1ffc3ae5 100644 --- a/src/admin/check/commands.rs +++ b/src/admin/check/commands.rs @@ -1,15 +1,14 @@ use conduwuit::Result; use conduwuit_macros::implement; use futures::StreamExt; -use ruma::events::room::message::RoomMessageEventContent; -use crate::Command; +use crate::Context; /// Uses the iterator in `src/database/key_value/users.rs` to iterator over /// every user in our database (remote and local). Reports total count, any /// errors if there were any, etc -#[implement(Command, params = "<'_>")] -pub(super) async fn check_all_users(&self) -> Result { +#[implement(Context, params = "<'_>")] +pub(super) async fn check_all_users(&self) -> Result { let timer = tokio::time::Instant::now(); let users = self.services.users.iter().collect::>().await; let query_time = timer.elapsed(); @@ -18,11 +17,10 @@ pub(super) async fn check_all_users(&self) -> Result { let err_count = users.iter().filter(|_user| false).count(); let ok_count = users.iter().filter(|_user| true).count(); - let message = format!( + self.write_str(&format!( "Database query completed in {query_time:?}:\n\n```\nTotal entries: \ {total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \ {ok_count:?}\n```" - ); - - Ok(RoomMessageEventContent::notice_markdown(message)) + )) + .await } diff --git a/src/admin/command.rs b/src/admin/context.rs similarity index 67% rename from src/admin/command.rs rename to src/admin/context.rs index 5df980d6..270537be 100644 --- a/src/admin/command.rs +++ b/src/admin/context.rs @@ -3,13 +3,13 @@ use std::{fmt, time::SystemTime}; use conduwuit::Result; use conduwuit_service::Services; use futures::{ - Future, FutureExt, + Future, FutureExt, TryFutureExt, io::{AsyncWriteExt, BufWriter}, lock::Mutex, }; use ruma::EventId; -pub(crate) struct Command<'a> { +pub(crate) struct Context<'a> { pub(crate) services: &'a Services, pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, @@ -17,14 +17,14 @@ pub(crate) struct Command<'a> { pub(crate) output: Mutex>>, } -impl Command<'_> { +impl Context<'_> { pub(crate) fn write_fmt( &self, arguments: fmt::Arguments<'_>, ) -> impl Future + Send + '_ + use<'_> { let buf = format!("{arguments}"); - self.output.lock().then(|mut output| async move { - output.write_all(buf.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(buf.as_bytes()).map_err(Into::into).await }) } @@ -32,8 +32,8 @@ impl Command<'_> { &'a self, s: &'a str, ) -> impl Future + Send + 'a { - self.output.lock().then(move |mut output| async move { - output.write_all(s.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(s.as_bytes()).map_err(Into::into).await }) } } diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 87ca03a0..d0debc2a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,7 @@ use std::{ }; use conduwuit::{ - Error, Result, debug_error, err, info, + Err, Result, debug_error, err, info, matrix::pdu::{PduEvent, PduId, RawPduId}, trace, utils, utils::{ @@ -17,10 +17,9 @@ use conduwuit::{ }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, - ServerName, - api::{client::error::ErrorKind, federation::event::get_room_state}, - events::room::message::RoomMessageEventContent, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, + api::federation::event::get_room_state, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -31,28 +30,24 @@ use tracing_subscriber::EnvFilter; use crate::admin_command; #[admin_command] -pub(super) async fn echo(&self, message: Vec) -> Result { +pub(super) async fn echo(&self, message: Vec) -> Result { let message = message.join(" "); - - Ok(RoomMessageEventContent::notice_plain(message)) + self.write_str(&message).await } #[admin_command] -pub(super) async fn get_auth_chain( - &self, - event_id: Box, -) -> Result { +pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { - return Ok(RoomMessageEventContent::notice_plain("Event not found.")); + return Err!("Event not found."); }; let room_id_str = event .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + .and_then(CanonicalJsonValue::as_str) + .ok_or_else(|| err!(Database("Invalid event in database")))?; let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + .map_err(|_| err!(Database("Invalid room id field in event in database")))?; let start = Instant::now(); let count = self @@ -65,51 +60,39 @@ pub(super) async fn get_auth_chain( .await; let elapsed = start.elapsed(); - Ok(RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {count} in {elapsed:?}" - ))) + let out = format!("Loaded auth chain with length {count} in {elapsed:?}"); + + self.write_str(&out).await } #[admin_command] -pub(super) async fn parse_pdu(&self) -> Result { +pub(super) async fn parse_pdu(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().saturating_sub(1)].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json in command body: {e}"), | Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + | Err(e) => return Err!("Could not parse PDU JSON: {e:?}"), | Ok(hash) => { let event_id = OwnedEventId::parse(format!("${hash}")); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - | Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\n{pdu:#?}" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\nCould not parse event: {e}" - ))), + match serde_json::from_value::(serde_json::to_value(value)?) { + | Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"), + | Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"), } }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {e:?}" - ))), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_pdu(&self, event_id: Box) -> Result { +pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { let mut outlier = false; let mut pdu_json = self .services @@ -124,21 +107,18 @@ pub(super) async fn get_pdu(&self, event_id: Box) -> Result return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - if outlier { - "Outlier (Rejected / Soft Failed) PDU found in our database" - } else { - "PDU found in our database" - }, - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = if outlier { + "Outlier (Rejected / Soft Failed) PDU found in our database" + } else { + "PDU found in our database" + }; + write!(self, "{msg}\n```json\n{text}\n```",) }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] @@ -146,7 +126,7 @@ pub(super) async fn get_short_pdu( &self, shortroomid: ShortRoomId, shorteventid: ShortEventId, -) -> Result { +) -> Result { let pdu_id: RawPduId = PduId { shortroomid, shorteventid: shorteventid.into(), @@ -161,41 +141,33 @@ pub(super) async fn get_short_pdu( .await; match pdu_json { + | Err(_) => return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",))) + let json_text = serde_json::to_string_pretty(&json)?; + write!(self, "```json\n{json_text}\n```") }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] -pub(super) async fn get_remote_pdu_list( - &self, - server: Box, - force: bool, -) -> Result { +pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs from the database.", - )); + ); } if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let list = self @@ -209,18 +181,19 @@ pub(super) async fn get_remote_pdu_list( let mut failed_count: usize = 0; let mut success_count: usize = 0; - for pdu in list { + for event_id in list { if force { - match self.get_remote_pdu(Box::from(pdu), server.clone()).await { + match self + .get_remote_pdu(event_id.to_owned(), server.clone()) + .await + { | Err(e) => { failed_count = failed_count.saturating_add(1); self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to get remote PDU, ignoring error: {e}" - ))) - .await - .ok(); + .send_text(&format!("Failed to get remote PDU, ignoring error: {e}")) + .await; + warn!("Failed to get remote PDU, ignoring error: {e}"); }, | _ => { @@ -228,44 +201,48 @@ pub(super) async fn get_remote_pdu_list( }, } } else { - self.get_remote_pdu(Box::from(pdu), server.clone()).await?; + self.get_remote_pdu(event_id.to_owned(), server.clone()) + .await?; success_count = success_count.saturating_add(1); } } - Ok(RoomMessageEventContent::text_plain(format!( - "Fetched {success_count} remote PDUs successfully with {failed_count} failures" - ))) + let out = + format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures"); + + self.write_str(&out).await } #[admin_command] pub(super) async fn get_remote_pdu( &self, - event_id: Box, - server: Box, -) -> Result { + event_id: OwnedEventId, + server: OwnedServerName, +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver."); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } match self .services .sending .send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request { - event_id: event_id.clone().into(), + event_id: event_id.clone(), include_unredacted_content: None, }) .await { + | Err(e) => + return Err!( + "Remote server did not have PDU or failed sending request to remote server: {e}" + ), | Ok(response) => { let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { @@ -273,10 +250,9 @@ pub(super) async fn get_remote_pdu( "Requested event ID {event_id} from server but failed to convert from \ RawValue to CanonicalJsonObject (malformed event/response?): {e}" ); - Error::BadRequest( - ErrorKind::Unknown, - "Received response from server but failed to parse PDU", - ) + err!(Request(Unknown( + "Received response from server but failed to parse PDU" + ))) })?; trace!("Attempting to parse PDU: {:?}", &response.pdu); @@ -286,6 +262,7 @@ pub(super) async fn get_remote_pdu( .rooms .event_handler .parse_incoming_pdu(&response.pdu) + .boxed() .await; let (event_id, value, room_id) = match parsed_result { @@ -293,9 +270,7 @@ pub(super) async fn get_remote_pdu( | Err(e) => { warn!("Failed to parse PDU: {e}"); info!("Full PDU: {:?}", &response.pdu); - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse PDU remote server {server} sent us: {e}" - ))); + return Err!("Failed to parse PDU remote server {server} sent us: {e}"); }, }; @@ -307,30 +282,18 @@ pub(super) async fn get_remote_pdu( .rooms .timeline .backfill_pdu(&server, response.pdu) - .boxed() .await?; - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - "Got PDU from specified server and handled as backfilled PDU successfully. \ - Event body:", - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = "Got PDU from specified server and handled as backfilled"; + write!(self, "{msg}. Event body:\n```json\n{text}\n```") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Remote server did not have PDU or failed sending request to remote server: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_room_state( - &self, - room: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room).await?; let room_state: Vec<_> = self .services @@ -342,28 +305,24 @@ pub(super) async fn get_room_state( .await?; if room_state.is_empty() { - return Ok(RoomMessageEventContent::text_plain( - "Unable to find room state in our database (vector is empty)", - )); + return Err!("Unable to find room state in our database (vector is empty)",); } let json = serde_json::to_string_pretty(&room_state).map_err(|e| { - warn!("Failed converting room state vector in our database to pretty JSON: {e}"); - Error::bad_database( + err!(Database( "Failed to convert room state events to pretty JSON, possible invalid room state \ - events in our database", - ) + events in our database {e}", + )) })?; - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json}\n```"))) + let out = format!("```json\n{json}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn ping(&self, server: Box) -> Result { +pub(super) async fn ping(&self, server: OwnedServerName) -> Result { if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves.", - )); + return Err!("Not allowed to send federation requests to ourselves."); } let timer = tokio::time::Instant::now(); @@ -377,35 +336,27 @@ pub(super) async fn ping(&self, server: Box) -> Result { + return Err!("Failed sending federation request to specified server:\n\n{e}"); + }, | Ok(response) => { let ping_time = timer.elapsed(); - let json_text_res = serde_json::to_string_pretty(&response.server); - if let Ok(json) = json_text_res { - return Ok(RoomMessageEventContent::notice_markdown(format!( - "Got response which took {ping_time:?} time:\n```json\n{json}\n```" - ))); - } + let out = if let Ok(json) = json_text_res { + format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```") + } else { + format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}") + }; - Ok(RoomMessageEventContent::text_plain(format!( - "Got non-JSON response which took {ping_time:?} time:\n{response:?}" - ))) - }, - | Err(e) => { - warn!( - "Failed sending federation request to specified server from ping debug command: \ - {e}" - ); - Ok(RoomMessageEventContent::text_plain(format!( - "Failed sending federation request to specified server:\n\n{e}", - ))) + write!(self, "{out}") }, } + .await } #[admin_command] -pub(super) async fn force_device_list_updates(&self) -> Result { +pub(super) async fn force_device_list_updates(&self) -> Result { // Force E2EE device list updates for all users self.services .users @@ -413,27 +364,17 @@ pub(super) async fn force_device_list_updates(&self) -> Result, - reset: bool, -) -> Result { +pub(super) async fn change_log_level(&self, filter: Option, reset: bool) -> Result { let handles = &["console"]; if reset { let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Log level from config appears to be invalid now: {e}" - ))); - }, + | Err(e) => return Err!("Log level from config appears to be invalid now: {e}"), }; match self @@ -443,16 +384,12 @@ pub(super) async fn change_log_level( .reload .reload(&old_filter_layer, Some(handles)) { + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Successfully changed log level back to config value {}", - self.services.server.config.log - ))); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); + let value = &self.services.server.config.log; + let out = format!("Successfully changed log level back to config value {value}"); + return self.write_str(&out).await; }, } } @@ -460,11 +397,7 @@ pub(super) async fn change_log_level( if let Some(filter) = filter { let new_filter_layer = match EnvFilter::try_new(filter) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Invalid log level filter specified: {e}" - ))); - }, + | Err(e) => return Err!("Invalid log level filter specified: {e}"), }; match self @@ -474,90 +407,75 @@ pub(super) async fn change_log_level( .reload .reload(&new_filter_layer, Some(handles)) { - | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); - }, + | Ok(()) => return self.write_str("Successfully changed log level").await, + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), } } - Ok(RoomMessageEventContent::text_plain("No log level was specified.")) + Err!("No log level was specified.") } #[admin_command] -pub(super) async fn sign_json(&self) -> Result { +pub(super) async fn sign_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(mut value) => { - self.services - .server_keys - .sign_json(&mut value) - .expect("our request json is what ruma expects"); - let json_text = - serde_json::to_string_pretty(&value).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::text_plain(json_text)) + self.services.server_keys.sign_json(&mut value)?; + let json_text = serde_json::to_string_pretty(&value)?; + write!(self, "{json_text}") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_json(&self) -> Result { +pub(super) async fn verify_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str::(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(value) => match self.services.server_keys.verify_json(&value, None).await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - ))), + | Err(e) => return Err!("Signature verification failed: {e}"), + | Ok(()) => write!(self, "Signature correct"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_pdu(&self, event_id: Box) -> Result { +pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { + use ruma::signatures::Verified; + let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; event.remove("event_id"); let msg = match self.services.server_keys.verify_event(&event, None).await { - | Ok(ruma::signatures::Verified::Signatures) => - "signatures OK, but content hash failed (redaction).", - | Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.", | Err(e) => return Err(e), + | Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).", + | Ok(Verified::All) => "signatures and hashes OK.", }; - Ok(RoomMessageEventContent::notice_plain(msg)) + self.write_str(msg).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn first_pdu_in_room( - &self, - room_id: Box, -) -> Result { +pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -565,9 +483,7 @@ pub(super) async fn first_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID.",); } let first_pdu = self @@ -576,17 +492,15 @@ pub(super) async fn first_pdu_in_room( .timeline .first_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the first PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}"))) + let out = format!("{first_pdu:?}"); + self.write_str(&out).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn latest_pdu_in_room( - &self, - room_id: Box, -) -> Result { +pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -594,9 +508,7 @@ pub(super) async fn latest_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let latest_pdu = self @@ -605,18 +517,19 @@ pub(super) async fn latest_pdu_in_room( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}"))) + let out = format!("{latest_pdu:?}"); + self.write_str(&out).await } #[admin_command] #[tracing::instrument(skip(self))] pub(super) async fn force_set_room_state_from_server( &self, - room_id: Box, - server_name: Box, -) -> Result { + room_id: OwnedRoomId, + server_name: OwnedServerName, +) -> Result { if !self .services .rooms @@ -624,9 +537,7 @@ pub(super) async fn force_set_room_state_from_server( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let first_pdu = self @@ -635,7 +546,7 @@ pub(super) async fn force_set_room_state_from_server( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; let room_version = self.services.rooms.state.get_room_version(&room_id).await?; @@ -645,10 +556,9 @@ pub(super) async fn force_set_room_state_from_server( .services .sending .send_federation_request(&server_name, get_room_state::v1::Request { - room_id: room_id.clone().into(), + room_id: room_id.clone(), event_id: first_pdu.event_id.clone(), }) - .boxed() .await?; for pdu in remote_state_response.pdus.clone() { @@ -657,7 +567,6 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .parse_incoming_pdu(&pdu) - .boxed() .await { | Ok(t) => t, @@ -721,7 +630,6 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .resolve_state(&room_id, &room_version, state) - .boxed() .await?; info!("Forcing new room state"); @@ -737,6 +645,7 @@ pub(super) async fn force_set_room_state_from_server( .await?; let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await; + self.services .rooms .state @@ -753,21 +662,18 @@ pub(super) async fn force_set_room_state_from_server( .update_joined_count(&room_id) .await; - drop(state_lock); - - Ok(RoomMessageEventContent::text_plain( - "Successfully forced the room state from the requested remote server.", - )) + self.write_str("Successfully forced the room state from the requested remote server.") + .await } #[admin_command] pub(super) async fn get_signing_keys( &self, - server_name: Option>, - notary: Option>, + server_name: Option, + notary: Option, query: bool, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); if let Some(notary) = notary { let signing_keys = self @@ -776,9 +682,8 @@ pub(super) async fn get_signing_keys( .notary_request(¬ary, &server_name) .await?; - return Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))); + let out = format!("```rs\n{signing_keys:#?}\n```"); + return self.write_str(&out).await; } let signing_keys = if query { @@ -793,17 +698,13 @@ pub(super) async fn get_signing_keys( .await? }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))) + let out = format!("```rs\n{signing_keys:#?}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn get_verify_keys( - &self, - server_name: Option>, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +pub(super) async fn get_verify_keys(&self, server_name: Option) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); let keys = self .services @@ -818,26 +719,24 @@ pub(super) async fn get_verify_keys( writeln!(out, "| {key_id} | {key:?} |")?; } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&out).await } #[admin_command] pub(super) async fn resolve_true_destination( &self, - server_name: Box, + server_name: OwnedServerName, no_cache: bool, -) -> Result { +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server_name == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } let actual = self @@ -846,13 +745,12 @@ pub(super) async fn resolve_true_destination( .resolve_actual_dest(&server_name, !no_cache) .await?; - let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host,); - - Ok(RoomMessageEventContent::text_markdown(msg)) + let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host); + self.write_str(&msg).await } #[admin_command] -pub(super) async fn memory_stats(&self, opts: Option) -> Result { +pub(super) async fn memory_stats(&self, opts: Option) -> Result { const OPTS: &str = "abcdefghijklmnopqrstuvwxyz"; let opts: String = OPTS @@ -871,13 +769,12 @@ pub(super) async fn memory_stats(&self, opts: Option) -> Result Result { +pub(super) async fn runtime_metrics(&self) -> Result { let out = self.services.server.metrics.runtime_metrics().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| { @@ -890,51 +787,51 @@ pub(super) async fn runtime_metrics(&self) -> Result { }, ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_metrics(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_metrics(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[cfg(tokio_unstable)] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { +pub(super) async fn runtime_interval(&self) -> Result { let out = self.services.server.metrics.runtime_interval().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| format!("```rs\n{metrics:#?}\n```"), ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_interval(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[admin_command] -pub(super) async fn time(&self) -> Result { +pub(super) async fn time(&self) -> Result { let now = SystemTime::now(); - Ok(RoomMessageEventContent::text_markdown(utils::time::format(now, "%+"))) + let now = utils::time::format(now, "%+"); + + self.write_str(&now).await } #[admin_command] -pub(super) async fn list_dependencies(&self, names: bool) -> Result { +pub(super) async fn list_dependencies(&self, names: bool) -> Result { if names { let out = info::cargo::dependencies_names().join(" "); - return Ok(RoomMessageEventContent::notice_markdown(out)); + return self.write_str(&out).await; } - let deps = info::cargo::dependencies(); let mut out = String::new(); + let deps = info::cargo::dependencies(); writeln!(out, "| name | version | features |")?; writeln!(out, "| ---- | ------- | -------- |")?; for (name, dep) in deps { @@ -945,10 +842,11 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result, map: Option, -) -> Result { +) -> Result { let map_name = map.as_ref().map_or(EMPTY, String::as_str); let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); self.services @@ -968,17 +866,11 @@ pub(super) async fn database_stats( let res = map.property(&property).expect("invalid property"); writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) }) - .await?; - - Ok(RoomMessageEventContent::notice_plain("")) + .await } #[admin_command] -pub(super) async fn database_files( - &self, - map: Option, - level: Option, -) -> Result { +pub(super) async fn database_files(&self, map: Option, level: Option) -> Result { let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; files.sort_by_key(|f| f.name.clone()); @@ -1005,16 +897,12 @@ pub(super) async fn database_files( file.column_family_name, ) }) - .await?; - - Ok(RoomMessageEventContent::notice_plain("")) + .await } #[admin_command] -pub(super) async fn trim_memory(&self) -> Result { +pub(super) async fn trim_memory(&self) -> Result { conduwuit::alloc::trim(None)?; - writeln!(self, "done").await?; - - Ok(RoomMessageEventContent::notice_plain("")) + writeln!(self, "done").await } diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index db04ccf4..9b86f18c 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -3,7 +3,7 @@ pub(crate) mod tester; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName}; use service::rooms::short::{ShortEventId, ShortRoomId}; use self::tester::TesterCommand; @@ -20,7 +20,7 @@ pub(super) enum DebugCommand { /// - Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Parse and print a PDU from a JSON @@ -35,7 +35,7 @@ pub(super) enum DebugCommand { /// - Retrieve and print a PDU by EventID from the conduwuit database GetPdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Retrieve and print a PDU by PduId from the conduwuit database @@ -52,11 +52,11 @@ pub(super) enum DebugCommand { /// (following normal event auth rules, handles it as an incoming PDU). GetRemotePdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, /// Argument for us to attempt to fetch the event from the /// specified remote server. - server: Box, + server: OwnedServerName, }, /// - Same as `get-remote-pdu` but accepts a codeblock newline delimited @@ -64,7 +64,7 @@ pub(super) enum DebugCommand { GetRemotePduList { /// Argument for us to attempt to fetch all the events from the /// specified remote server. - server: Box, + server: OwnedServerName, /// If set, ignores errors, else stops at the first error/failure. #[arg(short, long)] @@ -88,10 +88,10 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetSigningKeys { - server_name: Option>, + server_name: Option, #[arg(long)] - notary: Option>, + notary: Option, #[arg(short, long)] query: bool, @@ -99,14 +99,14 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetVerifyKeys { - server_name: Option>, + server_name: Option, }, /// - Sends a federation request to the remote server's /// `/_matrix/federation/v1/version` endpoint and measures the latency it /// took for the server to respond Ping { - server: Box, + server: OwnedServerName, }, /// - Forces device lists for all local and remote users to be updated (as @@ -141,21 +141,21 @@ pub(super) enum DebugCommand { /// /// This re-verifies a PDU existing in the database found by ID. VerifyPdu { - event_id: Box, + event_id: OwnedEventId, }, /// - Prints the very first PDU in the specified room (typically /// m.room.create) FirstPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Prints the latest ("last") PDU in the specified room (typically a /// message) LatestPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Forcefully replaces the room state of our local copy of the specified @@ -174,9 +174,9 @@ pub(super) enum DebugCommand { /// `/_matrix/federation/v1/state/{roomId}`. ForceSetRoomStateFromServer { /// The impacted room ID - room_id: Box, + room_id: OwnedRoomId, /// The server we will use to query the room state for - server_name: Box, + server_name: OwnedServerName, }, /// - Runs a server name through conduwuit's true destination resolution @@ -184,7 +184,7 @@ pub(super) enum DebugCommand { /// /// Useful for debugging well-known issues ResolveTrueDestination { - server_name: Box, + server_name: OwnedServerName, #[arg(short, long)] no_cache: bool, diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 005ee775..0a2b1516 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,7 +1,6 @@ -use conduwuit::Err; -use ruma::events::room::message::RoomMessageEventContent; +use conduwuit::{Err, Result}; -use crate::{Result, admin_command, admin_command_dispatch}; +use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] @@ -14,14 +13,14 @@ pub(crate) enum TesterCommand { #[rustfmt::skip] #[admin_command] -async fn panic(&self) -> Result { +async fn panic(&self) -> Result { panic!("panicked") } #[rustfmt::skip] #[admin_command] -async fn failure(&self) -> Result { +async fn failure(&self) -> Result { Err!("failed") } @@ -29,20 +28,20 @@ async fn failure(&self) -> Result { #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn tester(&self) -> Result { +async fn tester(&self) -> Result { - Ok(RoomMessageEventContent::notice_plain("legacy")) + self.write_str("Ok").await } #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn timer(&self) -> Result { +async fn timer(&self) -> Result { let started = std::time::Instant::now(); timed(self.body); let elapsed = started.elapsed(); - Ok(RoomMessageEventContent::notice_plain(format!("completed in {elapsed:#?}"))) + self.write_str(&format!("completed in {elapsed:#?}")).await } #[inline(never)] diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 240ffa6a..545dcbca 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -1,49 +1,48 @@ use std::fmt::Write; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{ - OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::{admin_command, get_room_info}; #[admin_command] -pub(super) async fn disable_room(&self, room_id: Box) -> Result { +pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); - Ok(RoomMessageEventContent::text_plain("Room disabled.")) + self.write_str("Room disabled.").await } #[admin_command] -pub(super) async fn enable_room(&self, room_id: Box) -> Result { +pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, false); - Ok(RoomMessageEventContent::text_plain("Room enabled.")) + self.write_str("Room enabled.").await } #[admin_command] -pub(super) async fn incoming_federation(&self) -> Result { - let map = self - .services - .rooms - .event_handler - .federation_handletime - .read() - .expect("locked"); - let mut msg = format!("Handling {} incoming pdus:\n", map.len()); +pub(super) async fn incoming_federation(&self) -> Result { + let msg = { + let map = self + .services + .rooms + .event_handler + .federation_handletime + .read() + .expect("locked"); - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; - } + let mut msg = format!("Handling {} incoming pdus:\n", map.len()); + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; + } - Ok(RoomMessageEventContent::text_plain(&msg)) + msg + }; + + self.write_str(&msg).await } #[admin_command] -pub(super) async fn fetch_support_well_known( - &self, - server_name: Box, -) -> Result { +pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result { let response = self .services .client @@ -55,54 +54,44 @@ pub(super) async fn fetch_support_well_known( let text = response.text().await?; if text.is_empty() { - return Ok(RoomMessageEventContent::text_plain("Response text/body is empty.")); + return Err!("Response text/body is empty."); } if text.len() > 1500 { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Response text/body is over 1500 characters, assuming no support well-known.", - )); + ); } let json: serde_json::Value = match serde_json::from_str(&text) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; let pretty_json: String = match serde_json::to_string_pretty(&json) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "Got JSON response:\n\n```json\n{pretty_json}\n```" - ))) + self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```")) + .await } #[admin_command] -pub(super) async fn remote_user_in_rooms( - &self, - user_id: Box, -) -> Result { +pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result { if user_id.server_name() == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", - )); + ); } if !self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain( - "Remote user does not exist in our database.", - )); + return Err!("Remote user does not exist in our database.",); } let mut rooms: Vec<(OwnedRoomId, u64, String)> = self @@ -115,21 +104,19 @@ pub(super) async fn remote_user_in_rooms( .await; if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); + return Err!("User is not in any rooms."); } rooms.sort_by_key(|r| r.1); rooms.reverse(); - let output = format!( - "Rooms {user_id} shares with us ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) - .collect::>() - .join("\n") - ); + let num = rooms.len(); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::text_markdown(output)) + self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/federation/mod.rs b/src/admin/federation/mod.rs index 3adfd459..2c539adc 100644 --- a/src/admin/federation/mod.rs +++ b/src/admin/federation/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{RoomId, ServerName, UserId}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::admin_command_dispatch; @@ -14,12 +14,12 @@ pub(super) enum FederationCommand { /// - Disables incoming federation handling for a room. DisableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Enables incoming federation handling for a room again. EnableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Fetch `/.well-known/matrix/support` from the specified server @@ -32,11 +32,11 @@ pub(super) enum FederationCommand { /// moderation, and security inquiries. This command provides a way to /// easily fetch that information. FetchSupportWellKnown { - server_name: Box, + server_name: OwnedServerName, }, /// - Lists all the rooms we share/track with the specified *remote* user RemoteUserInRooms { - user_id: Box, + user_id: OwnedUserId, }, } diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index aeefa9f2..7aed28db 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,26 +1,22 @@ use std::time::Duration; use conduwuit::{ - Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, + Err, Result, debug, debug_info, debug_warn, error, info, trace, + utils::time::parse_timepoint_ago, warn, }; use conduwuit_service::media::Dim; -use ruma::{ - EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, - events::room::message::RoomMessageEventContent, -}; +use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::{admin_command, utils::parse_local_user_id}; #[admin_command] pub(super) async fn delete( &self, - mxc: Option>, - event_id: Option>, -) -> Result { + mxc: Option, + event_id: Option, +) -> Result { if event_id.is_some() && mxc.is_some() { - return Ok(RoomMessageEventContent::text_plain( - "Please specify either an MXC or an event ID, not both.", - )); + return Err!("Please specify either an MXC or an event ID, not both.",); } if let Some(mxc) = mxc { @@ -30,9 +26,7 @@ pub(super) async fn delete( .delete(&mxc.as_str().try_into()?) .await?; - return Ok(RoomMessageEventContent::text_plain( - "Deleted the MXC from our database and on our filesystem.", - )); + return Err!("Deleted the MXC from our database and on our filesystem.",); } if let Some(event_id) = event_id { @@ -113,41 +107,36 @@ pub(super) async fn delete( let final_url = url.to_string().replace('"', ""); mxc_urls.push(final_url); } else { - info!( + warn!( "Found a URL in the event ID {event_id} but did not \ start with mxc://, ignoring" ); } } else { - info!("No \"url\" key in \"file\" key."); + error!("No \"url\" key in \"file\" key."); } } } } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Event ID does not have a \"content\" key or failed parsing the \ event ID JSON.", - )); + ); } } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Event ID does not have a \"content\" key, this is not a message or an \ event type that contains media.", - )); + ); } }, | _ => { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); + return Err!("Event ID does not exist or is not known to us.",); }, } if mxc_urls.is_empty() { - info!("Parsed event ID {event_id} but did not contain any MXC URLs."); - return Ok(RoomMessageEventContent::text_plain( - "Parsed event ID but found no MXC URLs.", - )); + return Err!("Parsed event ID but found no MXC URLs.",); } let mut mxc_deletion_count: usize = 0; @@ -170,27 +159,27 @@ pub(super) async fn delete( } } - return Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \ - event ID {event_id}." - ))); + return self + .write_str(&format!( + "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \ + from event ID {event_id}." + )) + .await; } - Ok(RoomMessageEventContent::text_plain( + Err!( "Please specify either an MXC using --mxc or an event ID using --event-id of the \ - message containing an image. See --help for details.", - )) + message containing an image. See --help for details." + ) } #[admin_command] -pub(super) async fn delete_list(&self) -> Result { +pub(super) async fn delete_list(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let mut failed_parsed_mxcs: usize = 0; @@ -204,7 +193,6 @@ pub(super) async fn delete_list(&self) -> Result { .try_into() .inspect_err(|e| { debug_warn!("Failed to parse user-provided MXC URI: {e}"); - failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1); }) .ok() @@ -227,10 +215,11 @@ pub(super) async fn delete_list(&self) -> Result { } } - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \ and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.", - ))) + )) + .await } #[admin_command] @@ -240,11 +229,9 @@ pub(super) async fn delete_past_remote_media( before: bool, after: bool, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if before && after { - return Ok(RoomMessageEventContent::text_plain( - "Please only pick one argument, --before or --after.", - )); + return Err!("Please only pick one argument, --before or --after.",); } assert!(!(before && after), "--before and --after should not be specified together"); @@ -260,35 +247,28 @@ pub(super) async fn delete_past_remote_media( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn delete_all_from_user( - &self, - username: String, -) -> Result { +pub(super) async fn delete_all_from_user(&self, username: String) -> Result { let user_id = parse_local_user_id(self.services, &username)?; let deleted_count = self.services.media.delete_from_user(&user_id).await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] pub(super) async fn delete_all_from_server( &self, - server_name: Box, + server_name: OwnedServerName, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { - return Ok(RoomMessageEventContent::text_plain( - "This command only works for remote media by default.", - )); + return Err!("This command only works for remote media by default.",); } let Ok(all_mxcs) = self @@ -298,9 +278,7 @@ pub(super) async fn delete_all_from_server( .await .inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}")) else { - return Ok(RoomMessageEventContent::text_plain( - "Failed to get MXC URIs from our database", - )); + return Err!("Failed to get MXC URIs from our database",); }; let mut deleted_count: usize = 0; @@ -336,17 +314,16 @@ pub(super) async fn delete_all_from_server( } } - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { +pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let metadata = self.services.media.get_metadata(&mxc).await; - Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```"))) + self.write_str(&format!("```\n{metadata:#?}\n```")).await } #[admin_command] @@ -355,7 +332,7 @@ pub(super) async fn get_remote_file( mxc: OwnedMxcUri, server: Option, timeout: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let mut result = self @@ -368,8 +345,8 @@ pub(super) async fn get_remote_file( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } #[admin_command] @@ -380,7 +357,7 @@ pub(super) async fn get_remote_thumbnail( timeout: u32, width: u32, height: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let dim = Dim::new(width, height, None); @@ -394,6 +371,6 @@ pub(super) async fn get_remote_thumbnail( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 641834b2..d1e6cd3a 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -3,7 +3,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName}; +use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::admin_command_dispatch; @@ -15,12 +15,12 @@ pub(super) enum MediaCommand { Delete { /// The MXC URL to delete #[arg(long)] - mxc: Option>, + mxc: Option, /// - The message event ID which contains the media and thumbnail MXC /// URLs #[arg(long)] - event_id: Option>, + event_id: Option, }, /// - Deletes a codeblock list of MXC URLs from our database and on the @@ -57,7 +57,7 @@ pub(super) enum MediaCommand { /// - Deletes all remote media from the specified remote server. This will /// always ignore errors by default. DeleteAllFromServer { - server_name: Box, + server_name: OwnedServerName, /// Long argument to delete local media #[arg(long)] diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 695155e8..1f777fa9 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -4,7 +4,7 @@ #![allow(clippy::too_many_arguments)] pub(crate) mod admin; -pub(crate) mod command; +pub(crate) mod context; pub(crate) mod processor; mod tests; pub(crate) mod utils; @@ -23,13 +23,9 @@ extern crate conduwuit_api as api; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::Result; pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch}; -pub(crate) use crate::{ - command::Command, - utils::{escape_html, get_room_info}, -}; +pub(crate) use crate::{context::Context, utils::get_room_info}; pub(crate) const PAGE_SIZE: usize = 100; diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 53a15098..f7b7140f 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -33,7 +33,7 @@ use service::{ use tracing::Level; use tracing_subscriber::{EnvFilter, filter::LevelFilter}; -use crate::{Command, admin, admin::AdminCommand}; +use crate::{admin, admin::AdminCommand, context::Context}; #[must_use] pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } @@ -58,7 +58,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce | Ok(parsed) => parsed, }; - let context = Command { + let context = Context { services: &services, body: &body, timer: SystemTime::now(), @@ -94,7 +94,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce #[allow(clippy::result_large_err)] fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { let link = - "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; + "Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺"; let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}"); let content = RoomMessageEventContent::notice_markdown(msg); error!("Panic while processing command: {error:?}"); @@ -103,7 +103,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { /// Parse and process a message from the admin room async fn process( - context: &Command<'_>, + context: &Context<'_>, command: AdminCommand, args: &[String], ) -> (Result, String) { @@ -132,7 +132,7 @@ async fn process( (result, output) } -fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { +fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { let env_config = &context.services.server.config.admin_log_capture; let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| { warn!("admin_log_capture filter invalid: {e:?}"); diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index b2bf5e6d..228d2120 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -12,31 +12,31 @@ pub(crate) enum AccountDataCommand { /// - Returns all changes to the account data that happened after `since`. ChangesSince { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// UNIX timestamp since (u64) since: u64, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, /// - Searches the account data for a specific kind. AccountDataGet { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// Account data event type kind: String, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, } #[admin_command] async fn changes_since( &self, - user_id: Box, + user_id: OwnedUserId, since: u64, - room_id: Option>, -) -> Result { + room_id: Option, +) -> Result { let timer = tokio::time::Instant::now(); let results: Vec<_> = self .services @@ -46,18 +46,17 @@ async fn changes_since( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } #[admin_command] async fn account_data_get( &self, - user_id: Box, + user_id: OwnedUserId, kind: String, - room_id: Option>, -) -> Result { + room_id: Option, +) -> Result { let timer = tokio::time::Instant::now(); let results = self .services @@ -66,7 +65,6 @@ async fn account_data_get( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index f9e1fd2c..28bf6451 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -1,7 +1,8 @@ use clap::Subcommand; use conduwuit::Result; +use futures::TryStreamExt; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/appservice.rs @@ -9,7 +10,7 @@ pub(crate) enum AppserviceCommand { /// - Gets the appservice registration info/details from the ID as a string GetRegistration { /// Appservice registration ID - appservice_id: Box, + appservice_id: String, }, /// - Gets all appservice registrations with their ID and registration info @@ -17,7 +18,7 @@ pub(crate) enum AppserviceCommand { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> }, | AppserviceCommand::All => { let timer = tokio::time::Instant::now(); - let results = services.appservice.all().await; + let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 1642f7cd..c8c1f512 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -1,8 +1,8 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::ServerName; +use ruma::OwnedServerName; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/globals.rs @@ -11,17 +11,17 @@ pub(crate) enum GlobalsCommand { CurrentCount, - LastCheckForUpdatesId, + LastCheckForAnnouncementsId, /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. SigningKeysFor { - origin: Box, + origin: OwnedServerName, }, } /// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -39,9 +39,12 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, - | GlobalsCommand::LastCheckForUpdatesId => { + | GlobalsCommand::LastCheckForAnnouncementsId => { let timer = tokio::time::Instant::now(); - let results = services.updates.last_check_for_updates_id().await; + let results = services + .announcements + .last_check_for_announcements_id() + .await; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 38272749..5b7ead4b 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,9 +1,9 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::UserId; +use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/presence.rs @@ -11,7 +11,7 @@ pub(crate) enum PresenceCommand { /// - Returns the latest presence event for the given user. GetPresence { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, /// - Iterator of the most recent presence updates that happened after the @@ -23,7 +23,7 @@ pub(crate) enum PresenceCommand { } /// All the getters and iterators in key_value/presence.rs -pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 34edf4db..0d0e6cc9 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -1,19 +1,19 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::UserId; +use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum PusherCommand { /// - Returns all the pushers for the user. GetPushers { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, } -pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index c503eee5..0e248c65 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -11,7 +11,6 @@ use conduwuit::{ use conduwuit_database::Map; use conduwuit_service::Services; use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; -use ruma::events::room::message::RoomMessageEventContent; use tokio::time::Instant; use crate::{admin_command, admin_command_dispatch}; @@ -170,7 +169,7 @@ pub(super) async fn compact( into: Option, parallelism: Option, exhaustive: bool, -) -> Result { +) -> Result { use conduwuit_database::compact::Options; let default_all_maps: Option<_> = map.is_none().then(|| { @@ -221,17 +220,11 @@ pub(super) async fn compact( let results = results.await; let query_time = timer.elapsed(); self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_count( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_count(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -242,17 +235,11 @@ pub(super) async fn raw_count( let query_time = timer.elapsed(); self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_keys( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").boxed().await?; let map = self.services.db.get(map.as_str())?; @@ -266,18 +253,12 @@ pub(super) async fn raw_keys( .await?; let query_time = timer.elapsed(); - let out = format!("\n```\n\nQuery completed in {query_time:?}"); - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -294,18 +275,12 @@ pub(super) async fn raw_keys_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -318,19 +293,12 @@ pub(super) async fn raw_keys_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_vals_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -348,18 +316,12 @@ pub(super) async fn raw_vals_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_vals_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -373,19 +335,12 @@ pub(super) async fn raw_vals_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_iter( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_iter(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -401,9 +356,7 @@ pub(super) async fn raw_iter( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -412,7 +365,7 @@ pub(super) async fn raw_keys_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -426,9 +379,7 @@ pub(super) async fn raw_keys_from( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -437,7 +388,7 @@ pub(super) async fn raw_iter_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let result = map @@ -449,41 +400,38 @@ pub(super) async fn raw_iter_from( .await?; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_del(&self, map: String, key: String) -> Result { +pub(super) async fn raw_del(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); map.remove(&key); - let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Operation completed in {query_time:?}" - ))) + let query_time = timer.elapsed(); + self.write_str(&format!("Operation completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_get(&self, map: String, key: String) -> Result { +pub(super) async fn raw_get(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let handle = map.get(&key).await?; + let query_time = timer.elapsed(); let result = String::from_utf8_lossy(&handle); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_maps(&self) -> Result { +pub(super) async fn raw_maps(&self) -> Result { let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect(); - Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) + self.write_str(&format!("{list:#?}")).await } fn with_maps_or<'a>( diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 10748d88..4a39a40e 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Result, utils::time}; use futures::StreamExt; -use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent}; +use ruma::OwnedServerName; use crate::{admin_command, admin_command_dispatch}; @@ -21,10 +21,7 @@ pub(crate) enum ResolverCommand { } #[admin_command] -async fn destinations_cache( - &self, - server_name: Option, -) -> Result { +async fn destinations_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedDest; writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; @@ -44,11 +41,11 @@ async fn destinations_cache( .await?; } - Ok(RoomMessageEventContent::notice_plain("")) + Ok(()) } #[admin_command] -async fn overrides_cache(&self, server_name: Option) -> Result { +async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?; @@ -70,5 +67,5 @@ async fn overrides_cache(&self, server_name: Option) -> Result, + alias: OwnedRoomAliasId, }, /// - Iterator of all our local room aliases for the room ID LocalAliasesForRoom { /// Full room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Iterator of all our local aliases in our database with their room IDs @@ -24,7 +24,7 @@ pub(crate) enum RoomAliasCommand { } /// All the getters and iterators in src/database/key_value/rooms/alias.rs -pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 1de5c02d..c64cd173 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,85 +1,85 @@ use clap::Subcommand; -use conduwuit::{Error, Result}; +use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomStateCacheCommand { ServerInRoom { - server: Box, - room_id: Box, + server: OwnedServerName, + room_id: OwnedRoomId, }, RoomServers { - room_id: Box, + room_id: OwnedRoomId, }, ServerRooms { - server: Box, + server: OwnedServerName, }, RoomMembers { - room_id: Box, + room_id: OwnedRoomId, }, LocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, ActiveLocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, RoomJoinedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomInvitedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomUserOnceJoined { - room_id: Box, + room_id: OwnedRoomId, }, RoomMembersInvited { - room_id: Box, + room_id: OwnedRoomId, }, GetInviteCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, GetLeftCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, RoomsJoined { - user_id: Box, + user_id: OwnedUserId, }, RoomsLeft { - user_id: Box, + user_id: OwnedUserId, }, RoomsInvited { - user_id: Box, + user_id: OwnedUserId, }, InviteState { - user_id: Box, - room_id: Box, + user_id: OwnedUserId, + room_id: OwnedRoomId, }, } -pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result { let services = context.services; - let c = match subcommand { + match subcommand { | RoomStateCacheCommand::ServerInRoom { server, room_id } => { let timer = tokio::time::Instant::now(); let result = services @@ -89,9 +89,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomServers { room_id } => { let timer = tokio::time::Instant::now(); @@ -104,9 +106,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ServerRooms { server } => { let timer = tokio::time::Instant::now(); @@ -119,9 +123,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembers { room_id } => { let timer = tokio::time::Instant::now(); @@ -134,9 +140,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::LocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -149,9 +157,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -164,18 +174,22 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomJoinedCount { room_id } => { let timer = tokio::time::Instant::now(); let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomInvitedCount { room_id } => { let timer = tokio::time::Instant::now(); @@ -186,9 +200,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomUserOnceJoined { room_id } => { let timer = tokio::time::Instant::now(); @@ -201,9 +217,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembersInvited { room_id } => { let timer = tokio::time::Instant::now(); @@ -216,9 +234,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetInviteCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -229,9 +249,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetLeftCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -242,9 +264,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsJoined { user_id } => { let timer = tokio::time::Instant::now(); @@ -257,9 +281,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsInvited { user_id } => { let timer = tokio::time::Instant::now(); @@ -271,9 +297,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsLeft { user_id } => { let timer = tokio::time::Instant::now(); @@ -285,9 +313,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::InviteState { user_id, room_id } => { let timer = tokio::time::Instant::now(); @@ -298,13 +328,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, - }?; - - context.write_str(c.body()).await?; - - Ok(()) + } } diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 6f08aee9..0fd22ca7 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{PduCount, Result, utils::stream::TryTools}; use futures::TryStreamExt; -use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomOrAliasId; use crate::{admin_command, admin_command_dispatch}; @@ -24,7 +24,7 @@ pub(crate) enum RoomTimelineCommand { } #[admin_command] -pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { +pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let result = self @@ -34,7 +34,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result, limit: Option, -) -> Result { +) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let from: Option = from.as_deref().map(str::parse).transpose()?; @@ -57,5 +57,5 @@ pub(super) async fn pdus( .try_collect() .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}"))) + self.write_str(&format!("{result:#?}")).await } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index a148f718..8b1676bc 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,10 +1,10 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedServerName, OwnedUserId}; use service::sending::Destination; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/sending.rs @@ -27,9 +27,9 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, @@ -49,30 +49,20 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, GetLatestEduCount { - server_name: Box, + server_name: OwnedServerName, }, } /// All the getters and iterators in key_value/sending.rs -pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result { - let c = reprocess(subcommand, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -/// All the getters and iterators in key_value/sending.rs -pub(super) async fn reprocess( - subcommand: SendingCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -82,9 +72,11 @@ pub(super) async fn reprocess( let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::QueuedRequests { appservice_id, @@ -97,19 +89,19 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -120,40 +112,42 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .queued_requests(&Destination::Federation(server_name.into())), + .queued_requests(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services .sending .db - .queued_requests(&Destination::Push(user_id.into(), push_key)) + .queued_requests(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let queued_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" + )) + .await }, | SendingCommand::ActiveRequestsFor { appservice_id, @@ -166,20 +160,20 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -190,49 +184,53 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .active_requests_for(&Destination::Federation(server_name.into())), + .active_requests_for(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services .sending .db - .active_requests_for(&Destination::Push(user_id.into(), push_key)) + .active_requests_for(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::GetLatestEduCount { server_name } => { let timer = tokio::time::Instant::now(); let results = services.sending.db.get_latest_educount(&server_name).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, } } diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index 0957c15e..aa7c8666 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedEventId, OwnedRoomOrAliasId}; use crate::{admin_command, admin_command_dispatch}; @@ -18,10 +18,7 @@ pub(crate) enum ShortCommand { } #[admin_command] -pub(super) async fn short_event_id( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result { let shortid = self .services .rooms @@ -29,17 +26,14 @@ pub(super) async fn short_event_id( .get_shorteventid(&event_id) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } #[admin_command] -pub(super) async fn short_room_id( - &self, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 5995bc62..0f34d13f 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -1,9 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; -use ruma::{ - OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -99,11 +97,7 @@ pub(crate) enum UsersCommand { } #[admin_command] -async fn get_shared_rooms( - &self, - user_a: OwnedUserId, - user_b: OwnedUserId, -) -> Result { +async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self .services @@ -115,9 +109,8 @@ async fn get_shared_rooms( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -127,7 +120,7 @@ async fn get_backup_session( version: String, room_id: OwnedRoomId, session_id: String, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -136,9 +129,8 @@ async fn get_backup_session( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -147,7 +139,7 @@ async fn get_room_backups( user_id: OwnedUserId, version: String, room_id: OwnedRoomId, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -156,32 +148,22 @@ async fn get_room_backups( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_all_backups( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_all(&user_id, &version).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_backup_algorithm( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -190,16 +172,12 @@ async fn get_backup_algorithm( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup_version( - &self, - user_id: OwnedUserId, -) -> Result { +async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -208,36 +186,33 @@ async fn get_latest_backup_version( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { +async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_latest_backup(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users(&self) -> Result { +async fn iter_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec = self.services.users.stream().map(Into::into).collect().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users2(&self) -> Result { +async fn iter_users2(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self.services.users.stream().collect().await; let result: Vec<_> = result @@ -248,35 +223,32 @@ async fn iter_users2(&self) -> Result { let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -async fn count_users(&self) -> Result { +async fn count_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.count().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn password_hash(&self, user_id: OwnedUserId) -> Result { +async fn password_hash(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.password_hash(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn list_devices(&self, user_id: OwnedUserId) -> Result { +async fn list_devices(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -288,13 +260,12 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result Result { +async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -304,17 +275,12 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result Result { +async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let device = self .services @@ -323,28 +289,22 @@ async fn get_device_metadata( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { +async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let device = self.services.users.get_devicelist_version(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn count_one_time_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -353,17 +313,12 @@ async fn count_one_time_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_device_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -372,24 +327,22 @@ async fn get_device_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { +async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.get_user_signing_key(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_master_key(&self, user_id: OwnedUserId) -> Result { +async fn get_master_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -398,17 +351,12 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result Result { +async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -418,7 +366,6 @@ async fn get_to_device_events( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index ab21170c..6b37ffe4 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -1,13 +1,11 @@ use std::fmt::Write; use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{ - OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomAliasId, OwnedRoomId}; -use crate::{Command, escape_html}; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomAliasCommand { @@ -18,7 +16,7 @@ pub(crate) enum RoomAliasCommand { force: bool, /// The room id to set the alias on - room_id: Box, + room_id: OwnedRoomId, /// The alias localpart to use (`alias`, not `#alias:servername.tld`) room_alias_localpart: String, @@ -40,21 +38,11 @@ pub(crate) enum RoomAliasCommand { /// - List aliases currently being used List { /// If set, only list the aliases for this room - room_id: Option>, + room_id: Option, }, } -pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomAliasCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; let server_user = &services.globals.server_user; @@ -67,9 +55,7 @@ pub(super) async fn reprocess( let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, | Err(err) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse alias: {err}" - ))); + return Err!("Failed to parse alias: {err}"); }, }; match command { @@ -81,60 +67,50 @@ pub(super) async fn reprocess( &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully overwrote alias (formerly {id})" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context + .write_str(&format!( + "Successfully overwrote alias (formerly {id})" + )) + .await, } }, - | (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!( + | (false, Ok(id)) => Err!( "Refusing to overwrite in use alias for {id}, use -f or --force to \ overwrite" - ))), + ), | (_, Err(_)) => { match services.rooms.alias.set_alias( &room_alias, &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain( - "Successfully set alias", - )), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => context.write_str("Successfully set alias").await, } }, } }, | RoomAliasCommand::Remove { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { + | Err(_) => Err!("Alias isn't in use."), | Ok(id) => match services .rooms .alias .remove_alias(&room_alias, server_user) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Removed alias from {id}" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context.write_str(&format!("Removed alias from {id}")).await, }, - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), } }, | RoomAliasCommand::Which { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { - | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( - "Alias resolves to {id}" - ))), - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), + | Err(_) => Err!("Alias isn't in use."), + | Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await, } }, | RoomAliasCommand::List { .. } => unreachable!(), @@ -156,15 +132,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "
  • {}
  • ", escape_html(alias.as_ref())) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases for {room_id}:\n{plain_list}"); - let html = format!("Aliases for {room_id}:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await } else { let aliases = services .rooms @@ -183,23 +152,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases - .iter() - .fold(String::new(), |mut output, (alias, id)| { - writeln!( - output, - "
  • {} -> #{}:{}
  • ", - escape_html(alias.as_ref()), - escape_html(id), - server_name - ) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases:\n{plain_list}"); - let html = format!("Aliases:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await }, } } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index 6dd31b48..81f36f15 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,6 +1,6 @@ -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; use crate::{PAGE_SIZE, admin_command, get_room_info}; @@ -11,7 +11,7 @@ pub(super) async fn list_rooms( exclude_disabled: bool, exclude_banned: bool, no_details: bool, -) -> Result { +) -> Result { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); let mut rooms = self @@ -41,29 +41,28 @@ pub(super) async fn list_rooms( .collect::>(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + return Err!("No more rooms."); } - let output_plain = format!( - "Rooms ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| if no_details { + let body = rooms + .iter() + .map(|(id, members, name)| { + if no_details { format!("{id}") } else { format!("{id}\tMembers: {members}\tName: {name}") - }) - .collect::>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] -pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { let result = self.services.rooms.metadata.exists(&room_id).await; - Ok(RoomMessageEventContent::notice_markdown(format!("{result}"))) + self.write_str(&format!("{result}")).await } diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index ca036825..a6be9a15 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,22 +1,22 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{RoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; -use crate::{Command, PAGE_SIZE, get_room_info}; +use crate::{Context, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { /// - Publish a room to the room directory Publish { /// The room id of the room to publish - room_id: Box, + room_id: OwnedRoomId, }, /// - Unpublish a room to the room directory Unpublish { /// The room id of the room to unpublish - room_id: Box, + room_id: OwnedRoomId, }, /// - List rooms that are published @@ -25,25 +25,16 @@ pub(crate) enum RoomDirectoryCommand { }, } -pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomDirectoryCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result { let services = context.services; match command { | RoomDirectoryCommand::Publish { room_id } => { services.rooms.directory.set_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room published")) + context.write_str("Room published").await }, | RoomDirectoryCommand::Unpublish { room_id } => { services.rooms.directory.set_not_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room unpublished")) + context.write_str("Room unpublished").await }, | RoomDirectoryCommand::List { page } => { // TODO: i know there's a way to do this with clap, but i can't seem to find it @@ -66,20 +57,18 @@ pub(super) async fn reprocess( .collect(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + return Err!("No more rooms."); } - let output = format!( - "Rooms (page {page}):\n```\n{}\n```", - rooms - .iter() - .map(|(id, members, name)| format!( - "{id} | Members: {members} | Name: {name}" - )) - .collect::>() - .join("\n") - ); - Ok(RoomMessageEventContent::text_markdown(output)) + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); + + context + .write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",)) + .await }, } } diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index a39728fe..1278e820 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{Result, utils::ReadyExt}; +use conduwuit::{Err, Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{RoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; use crate::{admin_command, admin_command_dispatch}; @@ -10,7 +10,7 @@ use crate::{admin_command, admin_command_dispatch}; pub(crate) enum RoomInfoCommand { /// - List joined members in a room ListJoinedMembers { - room_id: Box, + room_id: OwnedRoomId, /// Lists only our local users in the specified room #[arg(long)] @@ -22,16 +22,12 @@ pub(crate) enum RoomInfoCommand { /// Room topics can be huge, so this is in its /// own separate command ViewRoomTopic { - room_id: Box, + room_id: OwnedRoomId, }, } #[admin_command] -async fn list_joined_members( - &self, - room_id: Box, - local_only: bool, -) -> Result { +async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result { let room_name = self .services .rooms @@ -64,22 +60,19 @@ async fn list_joined_members( .collect() .await; - let output_plain = format!( - "{} Members in Room \"{}\":\n```\n{}\n```", - member_info.len(), - room_name, - member_info - .into_iter() - .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) - .collect::>() - .join("\n") - ); + let num = member_info.len(); + let body = member_info + .into_iter() + .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",)) + .await } #[admin_command] -async fn view_room_topic(&self, room_id: Box) -> Result { +async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { let Ok(room_topic) = self .services .rooms @@ -87,10 +80,9 @@ async fn view_room_topic(&self, room_id: Box) -> Result, + room: OwnedRoomOrAliasId, }, /// - Bans a list of rooms (room IDs and room aliases) from a newline @@ -36,7 +33,7 @@ pub(crate) enum RoomModerationCommand { UnbanRoom { /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` - room: Box, + room: OwnedRoomOrAliasId, }, /// - List of all rooms we have banned @@ -49,14 +46,14 @@ pub(crate) enum RoomModerationCommand { } #[admin_command] -async fn ban_room(&self, room: Box) -> Result { +async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) { - return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room.")); + return Err!("Not allowed to ban the admin room."); } } @@ -64,11 +61,11 @@ async fn ban_room(&self, room: Box) -> Result room_id, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -80,11 +77,11 @@ async fn ban_room(&self, room: Box) -> Result room_alias, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -123,9 +120,9 @@ async fn ban_room(&self, room: Box) -> Result { - return Ok(RoomMessageEventContent::notice_plain(format!( + return Err!( "Failed to resolve room alias {room_alias} to a room ID: {e}" - ))); + ); }, } }, @@ -135,11 +132,11 @@ async fn ban_room(&self, room: Box) -> Result) -> Result Result { +async fn ban_list_of_rooms(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let rooms_s = self @@ -356,23 +352,24 @@ async fn ban_list_of_rooms(&self) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); } - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ disabled incoming federation with the room." - ))) + )) + .await } #[admin_command] -async fn unban_room(&self, room: Box) -> Result { +async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -384,11 +381,11 @@ async fn unban_room(&self, room: Box) -> Result room_alias, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -427,9 +424,7 @@ async fn unban_room(&self, room: Box) -> Result { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); + return Err!("Failed to resolve room alias {room} to a room ID: {e}"); }, } }, @@ -439,19 +434,20 @@ async fn unban_room(&self, room: Box) -> Result Result { +async fn list_banned_rooms(&self, no_details: bool) -> Result { let room_ids: Vec = self .services .rooms @@ -462,7 +458,7 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result Result>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 17bf9ec0..6027a9eb 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,12 +1,16 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{Err, Result, info, utils::time, warn}; -use ruma::events::room::message::RoomMessageEventContent; +use conduwuit::{ + Err, Result, info, + utils::{stream::IterStream, time}, + warn, +}; +use futures::TryStreamExt; use crate::admin_command; #[admin_command] -pub(super) async fn uptime(&self) -> Result { +pub(super) async fn uptime(&self) -> Result { let elapsed = self .services .server @@ -15,47 +19,36 @@ pub(super) async fn uptime(&self) -> Result { .expect("standard duration"); let result = time::pretty(elapsed); - Ok(RoomMessageEventContent::notice_plain(format!("{result}."))) + self.write_str(&format!("{result}.")).await } #[admin_command] -pub(super) async fn show_config(&self) -> Result { - // Construct and send the response - Ok(RoomMessageEventContent::text_markdown(format!( - "{}", - *self.services.server.config - ))) +pub(super) async fn show_config(&self) -> Result { + self.write_str(&format!("{}", *self.services.server.config)) + .await } #[admin_command] -pub(super) async fn reload_config( - &self, - path: Option, -) -> Result { +pub(super) async fn reload_config(&self, path: Option) -> Result { let path = path.as_deref().into_iter(); self.services.config.reload(path)?; - Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) + self.write_str("Successfully reconfigured.").await } #[admin_command] -pub(super) async fn list_features( - &self, - available: bool, - enabled: bool, - comma: bool, -) -> Result { +pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result { let delim = if comma { "," } else { " " }; if enabled && !available { let features = info::rustc::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } if available && !enabled { let features = info::cargo::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } let mut features = String::new(); @@ -68,77 +61,76 @@ pub(super) async fn list_features( writeln!(features, "{emoji} {feature} {remark}")?; } - Ok(RoomMessageEventContent::text_markdown(features)) + self.write_str(&features).await } #[admin_command] -pub(super) async fn memory_usage(&self) -> Result { +pub(super) async fn memory_usage(&self) -> Result { let services_usage = self.services.memory_usage().await?; let database_usage = self.services.db.db.memory_usage()?; let allocator_usage = conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}", - ))) + )) + .await } #[admin_command] -pub(super) async fn clear_caches(&self) -> Result { +pub(super) async fn clear_caches(&self) -> Result { self.services.clear_cache().await; - Ok(RoomMessageEventContent::text_plain("Done.")) + self.write_str("Done.").await } #[admin_command] -pub(super) async fn list_backups(&self) -> Result { - let result = self.services.db.db.backup_list()?; - - if result.is_empty() { - Ok(RoomMessageEventContent::text_plain("No backups found.")) - } else { - Ok(RoomMessageEventContent::text_plain(result)) - } +pub(super) async fn list_backups(&self) -> Result { + self.services + .db + .db + .backup_list()? + .try_stream() + .try_for_each(|result| write!(self, "{result}")) + .await } #[admin_command] -pub(super) async fn backup_database(&self) -> Result { +pub(super) async fn backup_database(&self) -> Result { let db = Arc::clone(&self.services.db); - let mut result = self + let result = self .services .server .runtime() .spawn_blocking(move || match db.db.backup() { - | Ok(()) => String::new(), - | Err(e) => e.to_string(), + | Ok(()) => "Done".to_owned(), + | Err(e) => format!("Failed: {e}"), }) .await?; - if result.is_empty() { - result = self.services.db.db.backup_list()?; - } - - Ok(RoomMessageEventContent::notice_markdown(result)) + let count = self.services.db.db.backup_count()?; + self.write_str(&format!("{result}. Currently have {count} backups.")) + .await } #[admin_command] -pub(super) async fn admin_notice(&self, message: Vec) -> Result { +pub(super) async fn admin_notice(&self, message: Vec) -> Result { let message = message.join(" "); self.services.admin.send_text(&message).await; - Ok(RoomMessageEventContent::notice_plain("Notice was sent to #admins")) + self.write_str("Notice was sent to #admins").await } #[admin_command] -pub(super) async fn reload_mods(&self) -> Result { +pub(super) async fn reload_mods(&self) -> Result { self.services.server.reload()?; - Ok(RoomMessageEventContent::notice_plain("Reloading server...")) + self.write_str("Reloading server...").await } #[admin_command] #[cfg(unix)] -pub(super) async fn restart(&self, force: bool) -> Result { +pub(super) async fn restart(&self, force: bool) -> Result { use conduwuit::utils::sys::current_exe_deleted; if !force && current_exe_deleted() { @@ -150,13 +142,13 @@ pub(super) async fn restart(&self, force: bool) -> Result Result { +pub(super) async fn shutdown(&self) -> Result { warn!("shutdown command"); self.services.server.shutdown()?; - Ok(RoomMessageEventContent::notice_plain("Shutting down server...")) + self.write_str("Shutting down server...").await } diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 60615365..6b99e5de 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -36,7 +36,7 @@ pub(super) enum ServerCommand { /// - Print database memory usage statistics MemoryUsage, - /// - Clears all of Conduwuit's caches + /// - Clears all of Continuwuity's caches ClearCaches, /// - Performs an online backup of the database (only available for RocksDB diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 45e550be..e5e481e5 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - Result, debug, debug_warn, error, info, is_equal_to, + Err, Result, debug, debug_warn, error, info, is_equal_to, matrix::pdu::PduBuilder, utils::{self, ReadyExt}, warn, @@ -10,11 +10,10 @@ use conduwuit::{ use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, + OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, events::{ RoomAccountDataEventType, StateEventType, room::{ - message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, redaction::RoomRedactionEventContent, }, @@ -31,7 +30,7 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25; const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin."; #[admin_command] -pub(super) async fn list_users(&self) -> Result { +pub(super) async fn list_users(&self) -> Result { let users: Vec<_> = self .services .users @@ -44,30 +43,22 @@ pub(super) async fn list_users(&self) -> Result { plain_msg += users.join("\n").as_str(); plain_msg += "\n```"; - self.write_str(plain_msg.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&plain_msg).await } #[admin_command] -pub(super) async fn create_user( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn create_user(&self, username: String, password: Option) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &username)?; if let Err(e) = user_id.validate_strict() { if self.services.config.emergency_password.is_none() { - return Ok(RoomMessageEventContent::text_plain(format!( - "Username {user_id} contains disallowed characters or spaces: {e}" - ))); + return Err!("Username {user_id} contains disallowed characters or spaces: {e}"); } } if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists"))); + return Err!("User {user_id} already exists"); } let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -89,8 +80,7 @@ pub(super) async fn create_user( .new_user_displayname_suffix .is_empty() { - write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix) - .expect("should be able to write to string buffer"); + write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?; } self.services @@ -110,15 +100,17 @@ pub(super) async fn create_user( content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }) - .expect("to json value always works"), + })?, ) .await?; if !self.services.server.config.auto_join_rooms.is_empty() { for room in &self.services.server.config.auto_join_rooms { let Ok(room_id) = self.services.rooms.alias.resolve(room).await else { - error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); + error!( + %user_id, + "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping" + ); continue; }; @@ -154,18 +146,17 @@ pub(super) async fn create_user( info!("Automatically joined room {room} for user {user_id}"); }, | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to automatically join room {room} for user {user_id}: \ - {e}" - ))) - .await - .ok(); // don't return this error so we don't fail registrations error!( "Failed to automatically join room {room} for user {user_id}: {e}" ); + self.services + .admin + .send_text(&format!( + "Failed to automatically join room {room} for user {user_id}: \ + {e}" + )) + .await; }, } } @@ -192,25 +183,18 @@ pub(super) async fn create_user( debug!("create_user admin command called without an admin room being available"); } - Ok(RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: `{password}`" - ))) + self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`")) + .await } #[admin_command] -pub(super) async fn deactivate( - &self, - no_leave_rooms: bool, - user_id: String, -) -> Result { +pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; // don't deactivate the server service account if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to deactivate the server service account.", - )); + return Err!("Not allowed to deactivate the server service account.",); } self.services.users.deactivate_account(&user_id).await?; @@ -218,11 +202,8 @@ pub(super) async fn deactivate( if !no_leave_rooms { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Making {user_id} leave all rooms after deactivation..." - ))) - .await - .ok(); + .send_text(&format!("Making {user_id} leave all rooms after deactivation...")) + .await; let all_joined_rooms: Vec = self .services @@ -239,24 +220,19 @@ pub(super) async fn deactivate( leave_all_rooms(self.services, &user_id).await; } - Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} has been deactivated" - ))) + self.write_str(&format!("User {user_id} has been deactivated")) + .await } #[admin_command] -pub(super) async fn reset_password( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn reset_password(&self, username: String, password: Option) -> Result { let user_id = parse_local_user_id(self.services, &username)?; if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to set the password for the server account. Please use the emergency \ password config option.", - )); + ); } let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -266,28 +242,20 @@ pub(super) async fn reset_password( .users .set_password(&user_id, Some(new_password.as_str())) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {user_id}: `{new_password}`" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {user_id}: {e}" - ))), + | Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"), + | Ok(()) => + write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"), } + .await } #[admin_command] -pub(super) async fn deactivate_all( - &self, - no_leave_rooms: bool, - force: bool, -) -> Result { +pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let usernames = self @@ -301,15 +269,23 @@ pub(super) async fn deactivate_all( for username in usernames { match parse_active_local_user_id(self.services, username).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + + continue; + }, | Ok(user_id) => { if self.services.users.is_admin(&user_id).await && !force { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is an admin and --force is not set, skipping over" - ))) - .await - .ok(); + )) + .await; + admins.push(username); continue; } @@ -318,26 +294,16 @@ pub(super) async fn deactivate_all( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } user_ids.push(user_id); }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); - continue; - }, } } @@ -345,6 +311,12 @@ pub(super) async fn deactivate_all( for user_id in user_ids { match self.services.users.deactivate_account(&user_id).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("Failed deactivating user: {e}")) + .await; + }, | Ok(()) => { deactivation_count = deactivation_count.saturating_add(1); if !no_leave_rooms { @@ -365,33 +337,24 @@ pub(super) async fn deactivate_all( leave_all_rooms(self.services, &user_id).await; } }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed deactivating user: {e}" - ))) - .await - .ok(); - }, } } if admins.is_empty() { - Ok(RoomMessageEventContent::text_plain(format!( - "Deactivated {deactivation_count} accounts." - ))) + write!(self, "Deactivated {deactivation_count} accounts.") } else { - Ok(RoomMessageEventContent::text_plain(format!( + write!( + self, "Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \ --force to deactivate admin accounts", admins.join(", ") - ))) + ) } + .await } #[admin_command] -pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { +pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; @@ -405,23 +368,20 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result>() - .join("\n") - ); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] @@ -429,27 +389,23 @@ pub(super) async fn force_join_list_of_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all specified local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -466,7 +422,7 @@ pub(super) async fn force_join_list_of_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -486,9 +442,7 @@ pub(super) async fn force_join_list_of_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let usernames = self @@ -506,11 +460,11 @@ pub(super) async fn force_join_list_of_local_users( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } @@ -519,11 +473,9 @@ pub(super) async fn force_join_list_of_local_users( | Err(e) => { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + continue; }, } @@ -554,10 +506,11 @@ pub(super) async fn force_join_list_of_local_users( } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -565,18 +518,16 @@ pub(super) async fn force_join_all_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -593,7 +544,7 @@ pub(super) async fn force_join_all_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -613,9 +564,7 @@ pub(super) async fn force_join_all_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let mut failed_joins: usize = 0; @@ -650,10 +599,11 @@ pub(super) async fn force_join_all_local_users( } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -661,7 +611,7 @@ pub(super) async fn force_join_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let (room_id, servers) = self .services @@ -677,9 +627,8 @@ pub(super) async fn force_join_room( join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been joined to {room_id}.", - ))) + self.write_str(&format!("{user_id} has been joined to {room_id}.",)) + .await } #[admin_command] @@ -687,7 +636,7 @@ pub(super) async fn force_leave_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -703,24 +652,17 @@ pub(super) async fn force_leave_room( .is_joined(&user_id, &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} is not joined in the room" - ))); + return Err!("{user_id} is not joined in the room"); } leave_room(self.services, &user_id, &room_id, None).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has left {room_id}.", - ))) + self.write_str(&format!("{user_id} has left {room_id}.",)) + .await } #[admin_command] -pub(super) async fn force_demote( - &self, - user_id: String, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -731,15 +673,11 @@ pub(super) async fn force_demote( let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - let room_power_levels = self + let room_power_levels: Option = self .services .rooms .state_accessor - .room_state_get_content::( - &room_id, - &StateEventType::RoomPowerLevels, - "", - ) + .room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "") .await .ok(); @@ -757,9 +695,7 @@ pub(super) async fn force_demote( .is_ok_and(|event| event.sender == user_id); if !user_can_demote_self { - return Ok(RoomMessageEventContent::notice_markdown( - "User is not allowed to modify their own power levels in the room.", - )); + return Err!("User is not allowed to modify their own power levels in the room.",); } let mut power_levels_content = room_power_levels.unwrap_or_default(); @@ -777,34 +713,34 @@ pub(super) async fn force_demote( ) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "User {user_id} demoted themselves to the room default power level in {room_id} - \ {event_id}" - ))) + )) + .await } #[admin_command] -pub(super) async fn make_user_admin(&self, user_id: String) -> Result { +pub(super) async fn make_user_admin(&self, user_id: String) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; - assert!( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + self.services.admin.make_user_admin(&user_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been granted admin privileges.", - ))) + self.write_str(&format!("{user_id} has been granted admin privileges.",)) + .await } #[admin_command] pub(super) async fn put_room_tag( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -831,18 +767,19 @@ pub(super) async fn put_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id} with tag {tag}" - ))) + )) + .await } #[admin_command] pub(super) async fn delete_room_tag( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -866,18 +803,15 @@ pub(super) async fn delete_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id}, deleting room \ tag {tag}" - ))) + )) + .await } #[admin_command] -pub(super) async fn get_room_tags( - &self, - user_id: String, - room_id: Box, -) -> Result { +pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let tags_event = self @@ -889,17 +823,12 @@ pub(super) async fn get_room_tags( content: TagEventContent { tags: BTreeMap::new() }, }); - Ok(RoomMessageEventContent::notice_markdown(format!( - "```\n{:#?}\n```", - tags_event.content.tags - ))) + self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags)) + .await } #[admin_command] -pub(super) async fn redact_event( - &self, - event_id: Box, -) -> Result { +pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self .services .rooms @@ -907,20 +836,18 @@ pub(super) async fn redact_event( .get_non_outlier_pdu(&event_id) .await else { - return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database.")); + return Err!("Event does not exist in our database."); }; if event.is_redacted() { - return Ok(RoomMessageEventContent::text_plain("Event is already redacted.")); + return Err!("Event is already redacted."); } let room_id = event.room_id; let sender_user = event.sender; if !self.services.globals.user_is_local(&sender_user) { - return Ok(RoomMessageEventContent::text_plain( - "This command only works on local users.", - )); + return Err!("This command only works on local users."); } let reason = format!( @@ -949,9 +876,8 @@ pub(super) async fn redact_event( .await? }; - let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}"); - - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!( + "Successfully redacted event. Redaction event ID: {redaction_event_id}" + )) + .await } diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index 1494ea8f..e789376a 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId}; use crate::admin_command_dispatch; @@ -102,21 +102,21 @@ pub(super) enum UserCommand { /// room's internal ID, and the tag name `m.server_notice`. PutRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Deletes the room tag for the specified user and room ID DeleteRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Gets all the room tags for the specified user and room ID GetRoomTags { user_id: String, - room_id: Box, + room_id: OwnedRoomId, }, /// - Attempts to forcefully redact the specified event ID from the sender @@ -124,7 +124,7 @@ pub(super) enum UserCommand { /// /// This is only valid for local users RedactEvent { - event_id: Box, + event_id: OwnedEventId, }, /// - Force joins a specified list of local users to join the specified diff --git a/src/admin/utils.rs b/src/admin/utils.rs index a2696c50..ea9696b2 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] + use conduwuit_core::{Err, Result, err}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 7890561c..15ada812 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,21 +17,50 @@ crate-type = [ ] [features] -element_hacks = [] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "reqwest/brotli", ] -zstd_compression = [ - "reqwest/zstd", +element_hacks = [ + "conduwuit-service/element_hacks", ] gzip_compression = [ + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", "reqwest/gzip", ] -brotli_compression = [ - "reqwest/brotli", +io_uring = [ + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] +zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "reqwest/zstd", ] [dependencies] @@ -42,7 +71,6 @@ axum.workspace = true base64.workspace = true bytes.workspace = true conduwuit-core.workspace = true -conduwuit-database.workspace = true conduwuit-service.workspace = true const-str.workspace = true futures.workspace = true diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 470ff6ab..7362c4f9 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -15,7 +15,7 @@ use crate::Ruma; /// # `GET /_matrix/client/v3/capabilities` /// -/// Get information on the supported feature set and other relevent capabilities +/// Get information on the supported feature set and other relevant capabilities /// of this server. pub(crate) async fn get_capabilities_route( State(services): State, diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index b44b9f64..aa6ae168 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -52,13 +52,8 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .config - .forbidden_remote_room_directory_server_names - .is_match(server.host()) - || services - .config - .forbidden_remote_server_names - .is_match(server.host()) + .moderation + .is_remote_server_room_directory_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } @@ -92,15 +87,7 @@ pub(crate) async fn get_public_rooms_route( body: Ruma, ) -> Result { if let Some(server) = &body.server { - if services - .config - .forbidden_remote_room_directory_server_names - .is_match(server.host()) - || services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1eeacf83..e587d806 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,6 +1,6 @@ use std::{ borrow::Borrow, - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, iter::once, net::IpAddr, sync::Arc, @@ -9,7 +9,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, at, debug, debug_info, debug_warn, err, error, info, + Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching, matrix::{ StateKey, pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, @@ -17,7 +17,12 @@ use conduwuit::{ }, result::{FlatOk, NotFound}, trace, - utils::{self, IterStream, ReadyExt, shuffle}, + utils::{ + self, FutureBoolExt, + future::ReadyEqExt, + shuffle, + stream::{BroadbandExt, IterStream, ReadyExt}, + }, warn, }; use conduwuit_service::{ @@ -28,7 +33,7 @@ use conduwuit_service::{ state_compressor::{CompressedState, HashSetCompressStateEvent}, }, }; -use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; +use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, @@ -52,7 +57,6 @@ use ruma::{ room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, }, }, }; @@ -79,9 +83,8 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .config - .forbidden_remote_server_names - .is_match(room_id.server_name().unwrap().host()) + .moderation + .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ @@ -96,12 +99,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -136,12 +138,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -366,10 +367,10 @@ pub(crate) async fn knock_room_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; + let sender_user = body.sender_user(); + let body = &body.body; - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { | Ok(room_id) => { banned_room_check( &services, @@ -493,7 +494,7 @@ pub(crate) async fn invite_user_route( let sender_user = body.sender_user(); if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { - info!( + debug_error!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id ); @@ -722,12 +723,10 @@ pub(crate) async fn forget_room_route( let joined = services.rooms.state_cache.is_joined(user_id, room_id); let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); - let left = services.rooms.state_cache.is_left(user_id, room_id); let invited = services.rooms.state_cache.is_invited(user_id, room_id); - let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await; - - if joined || knocked || invited { + pin_mut!(joined, knocked, invited); + if joined.or(knocked).or(invited).await { return Err!(Request(Unknown("You must leave the room before forgetting it"))); } @@ -741,11 +740,11 @@ pub(crate) async fn forget_room_route( return Err!(Request(Unknown("No membership event was found, room was never joined"))); } - if left - || membership.is_ok_and(|member| { - member.membership == MembershipState::Leave - || member.membership == MembershipState::Ban - }) { + let non_membership = membership + .map(|member| member.membership) + .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); + + if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { services.rooms.state_cache.forget(room_id, user_id); } @@ -866,32 +865,32 @@ pub(crate) async fn joined_members_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); - if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) + .user_can_see_state_events(body.sender_user(), &body.room_id) .await { return Err!(Request(Forbidden("You don't have permission to view this room."))); } - let joined: BTreeMap = services - .rooms - .state_cache - .room_members(&body.room_id) - .map(ToOwned::to_owned) - .then(|user| async move { - (user.clone(), RoomMember { - display_name: services.users.displayname(&user).await.ok(), - avatar_url: services.users.avatar_url(&user).await.ok(), - }) - }) - .collect() - .await; + Ok(joined_members::v3::Response { + joined: services + .rooms + .state_cache + .room_members(&body.room_id) + .map(ToOwned::to_owned) + .broad_then(|user_id| async move { + let member = RoomMember { + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; - Ok(joined_members::v3::Response { joined }) + (user_id, member) + }) + .collect() + .await, + }) } pub async fn join_room_by_id_helper( @@ -1118,9 +1117,10 @@ async fn join_room_by_id_helper_remote( })?; if signed_event_id != event_id { - return Err!(Request(BadJson( - warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") - ))); + return Err!(Request(BadJson(warn!( + %signed_event_id, %event_id, + "Server {remote_server} sent event with wrong event ID" + )))); } match signed_value["signatures"] @@ -1696,19 +1696,18 @@ pub(crate) async fn invite_helper( })?; if pdu.event_id != event_id { - return Err!(Request(BadJson( - warn!(%pdu.event_id, %event_id, "Server {} sent event with wrong event ID", user_id.server_name()) - ))); + return Err!(Request(BadJson(warn!( + %pdu.event_id, %event_id, + "Server {} sent event with wrong event ID", + user_id.server_name() + )))); } - let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, - ) - .expect("CanonicalJson is valid json value"), - ) + let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, + )?) .map_err(|e| { err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) })?; @@ -1818,9 +1817,11 @@ pub async fn leave_room( blurhash: None, }; - if services.rooms.metadata.is_banned(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - { + let is_banned = services.rooms.metadata.is_banned(room_id); + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + pin_mut!(is_banned, is_disabled); + if is_banned.or(is_disabled).await { // the room is banned/disabled, the room must be rejected locally since we // cant/dont want to federate with this server services @@ -1840,18 +1841,24 @@ pub async fn leave_room( return Ok(()); } - // Ask a remote server if we don't have this room and are not knocking on it - if !services + let dont_have_room = services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) - .await && !services + .eq(&false); + + let not_knocked = services .rooms .state_cache .is_knocked(user_id, room_id) - .await - { - if let Err(e) = remote_leave_room(services, user_id, room_id).await { + .eq(&false); + + // Ask a remote server if we don't have this room and are not knocking on it + if dont_have_room.and(not_knocked).await { + if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) + .boxed() + .await + { warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); // Don't tell the client about this error } @@ -1936,6 +1943,7 @@ async fn remote_leave_room( services: &Services, user_id: &UserId, room_id: &RoomId, + reason: Option, ) -> Result<()> { let mut make_leave_response_and_server = Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); @@ -2052,6 +2060,12 @@ async fn remote_leave_room( .expect("Timestamp is valid js_int value"), ), ); + // Inject the reason key into the event content dict if it exists + if let Some(reason) = reason { + if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { + content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); + } + } // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { @@ -2148,6 +2162,109 @@ async fn knock_room_by_id_helper( } } + // For knock_restricted rooms, check if the user meets the restricted conditions + // If they do, attempt to join instead of knock + // This is not mentioned in the spec, but should be allowable (we're allowed to + // auto-join invites to knocked rooms) + let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await; + if let JoinRule::KnockRestricted(restricted) = &join_rule { + let restriction_rooms: Vec<_> = restricted + .allow + .iter() + .filter_map(|a| match a { + | AllowRule::RoomMembership(r) => Some(&r.room_id), + | _ => None, + }) + .collect(); + + // Check if the user is in any of the allowed rooms + let mut user_meets_restrictions = false; + for restriction_room_id in &restriction_rooms { + if services + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + .await + { + user_meets_restrictions = true; + break; + } + } + + // If the user meets the restrictions, try joining instead + if user_meets_restrictions { + debug_info!( + "{sender_user} meets the restricted criteria in knock_restricted room \ + {room_id}, attempting to join instead of knock" + ); + // For this case, we need to drop the state lock and get a new one in + // join_room_by_id_helper We need to release the lock here and let + // join_room_by_id_helper acquire it again + drop(state_lock); + match join_room_by_id_helper( + services, + sender_user, + room_id, + reason.clone(), + servers, + None, + &None, + ) + .await + { + | Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())), + | Err(e) => { + debug_warn!( + "Failed to convert knock to join for {sender_user} in {room_id}: {e:?}" + ); + // Get a new state lock for the remaining knock logic + let new_state_lock = services.rooms.state.mutex.lock(room_id).await; + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + let local_knock = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_knock { + knock_room_helper_local( + services, + sender_user, + room_id, + reason, + servers, + new_state_lock, + ) + .boxed() + .await?; + } else { + knock_room_helper_remote( + services, + sender_user, + room_id, + reason, + servers, + new_state_lock, + ) + .boxed() + .await?; + } + + return Ok(knock_room::v3::Response::new(room_id.to_owned())); + }, + } + } + } else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) { + debug_warn!( + "{sender_user} attempted to knock on room {room_id} but its join rule is \ + {join_rule:?}, not knock or knock_restricted" + ); + } + let server_in_room = services .rooms .state_cache @@ -2195,6 +2312,12 @@ async fn knock_room_helper_local( return Err!(Request(Forbidden("This room does not support knocking."))); } + // Verify that this room has a valid knock or knock_restricted join rule + let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await; + if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) { + return Err!(Request(Forbidden("This room's join rule does not allow knocking."))); + } + let content = RoomMemberEventContent { displayname: services.users.displayname(sender_user).await.ok(), avatar_url: services.users.avatar_url(sender_user).await.ok(), diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 9c2c4057..e442850b 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,3 +1,5 @@ +use core::panic; + use axum::extract::State; use conduwuit::{ Err, Result, at, @@ -132,8 +134,6 @@ pub(crate) async fn get_message_events_route( .take(limit) .collect() .await; - // let appservice_id = body.appservice_info.map(|appservice| - // appservice.registration.id); let lazy_loading_context = lazy_loading::Context { user_id: sender_user, @@ -143,7 +143,10 @@ pub(crate) async fn get_message_events_route( if let Some(registration) = body.appservice_info.as_ref() { <&DeviceId>::from(registration.registration.id.as_str()) } else { - <&DeviceId>::from("") + panic!( + "No device_id provided and no appservice registration found, this \ + should be unreachable" + ); }, }, room_id, @@ -274,12 +277,13 @@ pub(crate) async fn is_ignored_pdu( let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); let ignored_server = services - .config - .forbidden_remote_server_names - .is_match(pdu.sender().server_name().host()); + .moderation + .is_remote_server_ignored(pdu.sender().server_name()); if ignored_type - && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) + && (ignored_server + || (!services.config.send_messages_from_ignored_users_to_client + && services.users.user_is_ignored(&pdu.sender, user_id).await)) { return true; } diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 4ce53f15..be3fd23b 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -107,7 +107,6 @@ pub(crate) async fn create_room_route( return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed"))); } - let _short_id = services .rooms .short @@ -606,24 +605,42 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result( - services: &Services, - rooms: &[&'a RoomId], - filter: &[RoomTypeFilter], - negate: bool, -) -> Vec<&'a RoomId> { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 24930941..8eac6b66 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -14,8 +14,8 @@ use conduwuit::{ pair_of, ref_at, result::FlatOk, utils::{ - self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, - future::OptionStream, + self, BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::{OptionStream, ReadyEqExt}, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, @@ -32,6 +32,7 @@ use conduwuit_service::{ use futures::{ FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, + pin_mut, }; use ruma::{ DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, @@ -433,10 +434,14 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - || services.rooms.metadata.is_banned(room_id).await - { + let is_not_found = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(is_not_found, is_disabled, is_banned); + if is_not_found.or(is_disabled).or(is_banned).await { // This is just a rejected invite, not a room we know // Insert a leave event anyways for the client let event = PduEvent { diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index f7edb8c0..f153b2da 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,23 +6,27 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduCount, PduEvent, Result, debug, error, extract_variant, + Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, + matrix::TypeStateKey, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, }, warn, }; +use conduwuit_service::{ + Services, + rooms::read_receipt::pack_receipts, + sync::{into_db_key, into_snake_key}, +}; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::{ - error::ErrorKind, - sync::sync_events::{ - self, DeviceLists, UnreadNotificationsCount, - v4::{SlidingOp, SlidingSyncRoomHero}, - }, + api::client::sync::sync_events::{ + self, DeviceLists, UnreadNotificationsCount, + v4::{SlidingOp, SlidingSyncRoomHero}, }, + directory::RoomTypeFilter, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, @@ -31,15 +35,15 @@ use ruma::{ serde::Raw, uint, }; -use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; use crate::{ Ruma, - client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms}, + client::{DEFAULT_BUMP_TYPES, ignored_filter}, }; -pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; +type TodoRooms = BTreeMap, usize, u64)>; +const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` /// @@ -50,10 +54,11 @@ pub(crate) async fn sync_events_v4_route( ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut body = body.body; + // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(sender_user, &sender_device); + let watcher = services.sync.watch(sender_user, sender_device); let next_batch = services.globals.next_count()?; @@ -68,33 +73,21 @@ pub(crate) async fn sync_events_v4_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services - .sync - .remembered(sender_user.clone(), sender_device.clone(), conn_id.clone()) - { + let db_key = into_db_key(sender_user, sender_device, conn_id.clone()); + if globalsince != 0 && !services.sync.remembered(&db_key) { debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + return Err!(Request(UnknownPos("Connection data lost since last time"))); } if globalsince == 0 { - services.sync.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_sync_request_connection(&db_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone()); + let known_rooms = services + .sync + .update_sync_request_with_cache(&snake_key, &mut body); let all_joined_rooms: Vec<_> = services .rooms @@ -136,7 +129,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.to_device.enabled.unwrap_or(false) { services .users - .remove_to_device_events(sender_user, &sender_device, globalsince) + .remove_to_device_events(sender_user, sender_device, globalsince) .await; } @@ -261,7 +254,7 @@ pub(crate) async fn sync_events_v4_route( if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) { - if user_id == *sender_user { + if user_id == sender_user { continue; } @@ -299,7 +292,7 @@ pub(crate) async fn sync_events_v4_route( .state_cache .room_members(room_id) // Don't send key updates from the sender to the sender - .ready_filter(|user_id| sender_user != user_id) + .ready_filter(|&user_id| sender_user != user_id) // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { @@ -425,10 +418,9 @@ pub(crate) async fn sync_events_v4_route( }); if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, list_id.clone(), new_known_rooms, globalsince, @@ -478,23 +470,20 @@ pub(crate) async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, ); } - if let Some(conn_id) = &body.conn_id { - services.sync.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); + if let Some(conn_id) = body.conn_id.clone() { + let db_key = into_db_key(sender_user, sender_device, conn_id); + services + .sync + .update_sync_subscriptions(&db_key, body.room_subscriptions); } let mut rooms = BTreeMap::new(); @@ -648,7 +637,7 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_cache .room_members(room_id) - .ready_filter(|member| member != sender_user) + .ready_filter(|&member| member != sender_user) .filter_map(|user_id| { services .rooms @@ -787,7 +776,7 @@ pub(crate) async fn sync_events_v4_route( .users .get_to_device_events( sender_user, - &sender_device, + sender_device, Some(globalsince), Some(next_batch), ) @@ -805,7 +794,7 @@ pub(crate) async fn sync_events_v4_route( }, device_one_time_keys_count: services .users - .count_one_time_keys(sender_user, &sender_device) + .count_one_time_keys(sender_user, sender_device) .await, // Fallback keys are not yet supported device_unused_fallback_key_types: None, @@ -817,3 +806,33 @@ pub(crate) async fn sync_events_v4_route( delta_token: None, }) } + +async fn filter_rooms<'a>( + services: &Services, + rooms: &[&'a RoomId], + filter: &[RoomTypeFilter], + negate: bool, +) -> Vec<&'a RoomId> { + rooms + .iter() + .stream() + .filter_map(|r| async move { + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r) + }) + .collect() + .await +} diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 684752ec..f3fc0f44 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -1,31 +1,35 @@ use std::{ cmp::{self, Ordering}, collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + ops::Deref, time::Duration, }; use axum::extract::State; use conduwuit::{ - Error, Result, debug, error, extract_variant, + Err, Error, Result, error, extract_variant, is_equal_to, matrix::{ TypeStateKey, pdu::{PduCount, PduEvent}, }, trace, utils::{ - BoolExt, IterStream, ReadyExt, TryFutureExtExt, + BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::ReadyEqExt, math::{ruma_from_usize, usize_from_ruma}, }, warn, }; -use conduwuit_service::rooms::read_receipt::pack_receipts; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use conduwuit_service::{Services, rooms::read_receipt::pack_receipts, sync::into_snake_key}; +use futures::{ + FutureExt, Stream, StreamExt, TryFutureExt, + future::{OptionFuture, join3, try_join4}, + pin_mut, +}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::{ - error::ErrorKind, - sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, - }, + api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + directory::RoomTypeFilter, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, room::member::{MembershipState, RoomMemberEventContent}, @@ -34,13 +38,15 @@ use ruma::{ uint, }; -use super::{filter_rooms, share_encrypted_room}; +use super::share_encrypted_room; use crate::{ Ruma, client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, }; type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); +type TodoRooms = BTreeMap, usize, u64)>; +type KnownRooms = BTreeMap>; /// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync` /// ([MSC4186]) @@ -53,7 +59,7 @@ type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request /// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 /// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 pub(crate) async fn sync_events_v5_route( - State(services): State, + State(ref services): State, body: Ruma, ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); @@ -74,95 +80,95 @@ pub(crate) async fn sync_events_v5_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services.sync.snake_connection_cached( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) { - debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + let snake_key = into_snake_key(sender_user, sender_device, conn_id); + + if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) { + return Err!(Request(UnknownPos( + "Connection data unknown to server; restarting sync stream." + ))); } // Client / User requested an initial sync if globalsince == 0 { - services.sync.forget_snake_sync_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_snake_sync_connection(&snake_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_snake_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let known_rooms = services + .sync + .update_snake_sync_request_with_cache(&snake_key, &mut body); - let all_joined_rooms: Vec<_> = services + let all_joined_rooms = services .rooms .state_cache .rooms_joined(sender_user) .map(ToOwned::to_owned) - .collect() - .await; + .collect::>(); - let all_invited_rooms: Vec<_> = services + let all_invited_rooms = services .rooms .state_cache .rooms_invited(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_knocked_rooms: Vec<_> = services + let all_knocked_rooms = services .rooms .state_cache .rooms_knocked(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_rooms: Vec<&RoomId> = all_joined_rooms - .iter() - .map(AsRef::as_ref) - .chain(all_invited_rooms.iter().map(AsRef::as_ref)) - .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) - .collect(); + let (all_joined_rooms, all_invited_rooms, all_knocked_rooms) = + join3(all_joined_rooms, all_invited_rooms, all_knocked_rooms).await; - let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); - let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref); + let all_knocked_rooms = all_knocked_rooms.iter().map(AsRef::as_ref); + let all_rooms = all_joined_rooms + .clone() + .chain(all_invited_rooms.clone()) + .chain(all_knocked_rooms.clone()); let pos = next_batch.clone().to_string(); let mut todo_rooms: TodoRooms = BTreeMap::new(); let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body); + + let account_data = collect_account_data(services, sync_info).map(Ok); + + let e2ee = collect_e2ee(services, sync_info, all_joined_rooms.clone()); + + let to_device = collect_to_device(services, sync_info, next_batch).map(Ok); + + let receipts = collect_receipts(services).map(Ok); + + let (account_data, e2ee, to_device, receipts) = + try_join4(account_data, e2ee, to_device, receipts).await?; + + let extensions = sync_events::v5::response::Extensions { + account_data, + e2ee, + to_device, + receipts, + typing: sync_events::v5::response::Typing::default(), + }; + let mut response = sync_events::v5::Response { txn_id: body.txn_id.clone(), pos, lists: BTreeMap::new(), rooms: BTreeMap::new(), - extensions: sync_events::v5::response::Extensions { - account_data: collect_account_data(services, sync_info).await, - e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?, - to_device: collect_to_device(services, sync_info, next_batch).await, - receipts: collect_receipts(services).await, - typing: sync_events::v5::response::Typing::default(), - }, + extensions, }; handle_lists( services, sync_info, - &all_invited_rooms, - &all_joined_rooms, - &all_rooms, + all_invited_rooms.clone(), + all_joined_rooms.clone(), + all_rooms, &mut todo_rooms, &known_rooms, &mut response, @@ -175,7 +181,7 @@ pub(crate) async fn sync_events_v5_route( services, sender_user, next_batch, - &all_invited_rooms, + all_invited_rooms.clone(), &todo_rooms, &mut response, &body, @@ -200,31 +206,33 @@ pub(crate) async fn sync_events_v5_route( } trace!( - rooms=?response.rooms.len(), - account_data=?response.extensions.account_data.rooms.len(), - receipts=?response.extensions.receipts.rooms.len(), + rooms = ?response.rooms.len(), + account_data = ?response.extensions.account_data.rooms.len(), + receipts = ?response.extensions.receipts.rooms.len(), "responding to request with" ); Ok(response) } -type KnownRooms = BTreeMap>; -pub(crate) type TodoRooms = BTreeMap, usize, u64)>; - async fn fetch_subscriptions( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, known_rooms: &KnownRooms, todo_rooms: &mut TodoRooms, ) { let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - || services.rooms.metadata.is_banned(room_id).await - { + let not_exists = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(not_exists, is_disabled, is_banned); + if not_exists.or(is_disabled).or(is_banned).await { continue; } + let todo_room = todo_rooms .entry(room_id.clone()) @@ -254,11 +262,10 @@ async fn fetch_subscriptions( // body.room_subscriptions.remove(&r); //} - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, @@ -267,27 +274,39 @@ async fn fetch_subscriptions( } #[allow(clippy::too_many_arguments)] -async fn handle_lists<'a>( - services: crate::State, +async fn handle_lists<'a, Rooms, AllRooms>( + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, - all_invited_rooms: &Vec<&'a RoomId>, - all_joined_rooms: &Vec<&'a RoomId>, - all_rooms: &Vec<&'a RoomId>, + all_invited_rooms: Rooms, + all_joined_rooms: Rooms, + all_rooms: AllRooms, todo_rooms: &'a mut TodoRooms, known_rooms: &'a KnownRooms, response: &'_ mut sync_events::v5::Response, -) -> KnownRooms { +) -> KnownRooms +where + Rooms: Iterator + Clone + Send + 'a, + AllRooms: Iterator + Clone + Send + 'a, +{ for (list_id, list) in &body.lists { - let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { - | Some(true) => all_invited_rooms, - | Some(false) => all_joined_rooms, - | None => all_rooms, + let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) { + | None => all_rooms.clone().collect(), + | Some(true) => all_invited_rooms.clone().collect(), + | Some(false) => all_joined_rooms.clone().collect(), }; - let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { - | Some(filter) if filter.is_empty() => active_rooms, - | Some(value) => &filter_rooms(&services, active_rooms, &value, true).await, + let active_rooms = match list.filters.as_ref().map(|f| &f.not_room_types) { | None => active_rooms, + | Some(filter) if filter.is_empty() => active_rooms, + | Some(value) => + filter_rooms( + services, + value, + &true, + active_rooms.iter().stream().map(Deref::deref), + ) + .collect() + .await, }; let mut new_known_rooms: BTreeSet = BTreeSet::new(); @@ -305,6 +324,7 @@ async fn handle_lists<'a>( let new_rooms: BTreeSet = room_ids.clone().into_iter().map(From::from).collect(); + new_known_rooms.extend(new_rooms); //new_known_rooms.extend(room_ids..cloned()); for room_id in room_ids { @@ -340,29 +360,32 @@ async fn handle_lists<'a>( count: ruma_from_usize(active_rooms.len()), }); - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, list_id.clone(), new_known_rooms, globalsince, ); } } + BTreeMap::default() } -async fn process_rooms( - services: crate::State, +async fn process_rooms<'a, Rooms>( + services: &Services, sender_user: &UserId, next_batch: u64, - all_invited_rooms: &[&RoomId], + all_invited_rooms: Rooms, todo_rooms: &TodoRooms, response: &mut sync_events::v5::Response, body: &sync_events::v5::Request, -) -> Result> { +) -> Result> +where + Rooms: Iterator + Clone + Send + 'a, +{ let mut rooms = BTreeMap::new(); for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms { let roomsincecount = PduCount::Normal(*roomsince); @@ -371,7 +394,7 @@ async fn process_rooms( let mut invite_state = None; let (timeline_pdus, limited); let new_room_id: &RoomId = (*room_id).as_ref(); - if all_invited_rooms.contains(&new_room_id) { + if all_invited_rooms.clone().any(is_equal_to!(new_room_id)) { // TODO: figure out a timestamp we can use for remote invites invite_state = services .rooms @@ -383,7 +406,7 @@ async fn process_rooms( (timeline_pdus, limited) = (Vec::new(), true); } else { (timeline_pdus, limited) = match load_timeline( - &services, + services, sender_user, room_id, roomsincecount, @@ -416,18 +439,17 @@ async fn process_rooms( .rooms .read_receipt .last_privateread_update(sender_user, room_id) - .await > *roomsince; + .await; - let private_read_event = if last_privateread_update { - services - .rooms - .read_receipt - .private_read_get(room_id, sender_user) - .await - .ok() - } else { - None - }; + let private_read_event: OptionFuture<_> = (last_privateread_update > *roomsince) + .then(|| { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .ok() + }) + .into(); let mut receipts: Vec> = services .rooms @@ -443,7 +465,7 @@ async fn process_rooms( .collect() .await; - if let Some(private_read_event) = private_read_event { + if let Some(private_read_event) = private_read_event.await.flatten() { receipts.push(private_read_event); } @@ -492,7 +514,7 @@ async fn process_rooms( let room_events: Vec<_> = timeline_pdus .iter() .stream() - .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) + .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect() .await; @@ -644,7 +666,7 @@ async fn process_rooms( Ok(rooms) } async fn collect_account_data( - services: crate::State, + services: &Services, (sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request), ) -> sync_events::v5::response::AccountData { let mut account_data = sync_events::v5::response::AccountData { @@ -680,16 +702,19 @@ async fn collect_account_data( account_data } -async fn collect_e2ee<'a>( - services: crate::State, +async fn collect_e2ee<'a, Rooms>( + services: &Services, (sender_user, sender_device, globalsince, body): ( &UserId, &DeviceId, u64, &sync_events::v5::Request, ), - all_joined_rooms: &'a Vec<&'a RoomId>, -) -> Result { + all_joined_rooms: Rooms, +) -> Result +where + Rooms: Iterator + Send + 'a, +{ if !body.extensions.e2ee.enabled.unwrap_or(false) { return Ok(sync_events::v5::response::E2EE::default()); } @@ -790,7 +815,7 @@ async fn collect_e2ee<'a>( | MembershipState::Join => { // A new user joined an encrypted room if !share_encrypted_room( - &services, + services, sender_user, user_id, Some(room_id), @@ -823,7 +848,7 @@ async fn collect_e2ee<'a>( // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { - share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + share_encrypted_room(services, sender_user, user_id, Some(room_id)) .map(|res| res.or_some(user_id.to_owned())) }) .collect::>() @@ -846,7 +871,7 @@ async fn collect_e2ee<'a>( for user_id in left_encrypted_users { let dont_share_encrypted_room = - !share_encrypted_room(&services, sender_user, &user_id, None).await; + !share_encrypted_room(services, sender_user, &user_id, None).await; // If the user doesn't share an encrypted room with the target anymore, we need // to tell them @@ -856,20 +881,22 @@ async fn collect_e2ee<'a>( } Ok(sync_events::v5::response::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, + device_unused_fallback_key_types: None, + device_one_time_keys_count: services .users .count_one_time_keys(sender_user, sender_device) .await, - device_unused_fallback_key_types: None, + + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, }) } async fn collect_to_device( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, next_batch: u64, ) -> Option { @@ -892,7 +919,35 @@ async fn collect_to_device( }) } -async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts { +async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts { sync_events::v5::response::Receipts { rooms: BTreeMap::new() } // TODO: get explicitly requested read receipts } + +fn filter_rooms<'a, Rooms>( + services: &'a Services, + filter: &'a [RoomTypeFilter], + negate: &'a bool, + rooms: Rooms, +) -> impl Stream + Send + 'a +where + Rooms: Stream + Send + 'a, +{ + rooms.filter_map(async |room_id| { + let room_type = services.rooms.state_accessor.get_room_type(room_id).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if *negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(room_id) + }) +} diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 99b3bb67..748fc049 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,7 +1,10 @@ use axum::extract::State; use conduwuit::{ Result, - utils::{future::BoolExt, stream::BroadbandExt}, + utils::{ + future::BoolExt, + stream::{BroadbandExt, ReadyExt}, + }, }; use futures::{FutureExt, StreamExt, pin_mut}; use ruma::{ @@ -30,29 +33,21 @@ pub(crate) async fn search_users_route( .map_or(LIMIT_DEFAULT, usize::from) .min(LIMIT_MAX); + let search_term = body.search_term.to_lowercase(); let mut users = services .users .stream() + .ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term)) .map(ToOwned::to_owned) .broad_filter_map(async |user_id| { - let user = search_users::v3::User { - user_id: user_id.clone(), - display_name: services.users.displayname(&user_id).await.ok(), - avatar_url: services.users.avatar_url(&user_id).await.ok(), - }; + let display_name = services.users.displayname(&user_id).await.ok(); - let user_id_matches = user - .user_id - .as_str() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); + let display_name_matches = display_name + .as_deref() + .map(str::to_lowercase) + .is_some_and(|display_name| display_name.contains(&search_term)); - let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| { - name.to_lowercase() - .contains(&body.search_term.to_lowercase()) - }); - - if !user_id_matches && !user_displayname_matches { + if !display_name_matches { return None; } @@ -61,11 +56,11 @@ pub(crate) async fn search_users_route( .state_cache .rooms_joined(&user_id) .map(ToOwned::to_owned) - .any(|room| async move { + .broad_any(async |room_id| { services .rooms .state_accessor - .get_join_rules(&room) + .get_join_rules(&room_id) .map(|rule| matches!(rule, JoinRule::Public)) .await }); @@ -76,8 +71,14 @@ pub(crate) async fn search_users_route( .user_sees_user(sender_user, &user_id); pin_mut!(user_in_public_room, user_sees_user); - - user_in_public_room.or(user_sees_user).await.then_some(user) + user_in_public_room + .or(user_sees_user) + .await + .then_some(search_users::v3::User { + user_id: user_id.clone(), + display_name, + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }) }); let results = users.by_ref().take(limit).collect().await; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index eedab981..fe2281ba 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,5 +1,6 @@ use axum::{Json, extract::State, response::IntoResponse}; use conduwuit::{Error, Result}; +use futures::StreamExt; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, @@ -17,7 +18,7 @@ pub(crate) async fn well_known_client( State(services): State, _body: Ruma, ) -> Result { - let client_url = match services.server.config.well_known.client.as_ref() { + let client_url = match services.config.well_known.client.as_ref() { | Some(url) => url.to_string(), | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), }; @@ -33,44 +34,63 @@ pub(crate) async fn well_known_client( /// # `GET /.well-known/matrix/support` /// /// Server support contact and support page of a homeserver's domain. +/// Implements MSC1929 for server discovery. +/// If no configuration is set, uses admin users as contacts. pub(crate) async fn well_known_support( State(services): State, _body: Ruma, ) -> Result { let support_page = services - .server .config .well_known .support_page .as_ref() .map(ToString::to_string); - let role = services.server.config.well_known.support_role.clone(); + let email_address = services.config.well_known.support_email.clone(); + let matrix_id = services.config.well_known.support_mxid.clone(); - // support page or role must be either defined for this to be valid - if support_page.is_none() && role.is_none() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - let email_address = services.server.config.well_known.support_email.clone(); - let matrix_id = services.server.config.well_known.support_mxid.clone(); - - // if a role is specified, an email address or matrix id is required - if role.is_some() && (email_address.is_none() && matrix_id.is_none()) { - return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); - } - - // TOOD: support defining multiple contacts in the config + // TODO: support defining multiple contacts in the config let mut contacts: Vec = vec![]; - if let Some(role) = role { - let contact = Contact { role, email_address, matrix_id }; + let role_value = services + .config + .well_known + .support_role + .clone() + .unwrap_or_else(|| "m.role.admin".to_owned().into()); - contacts.push(contact); + // Add configured contact if at least one contact method is specified + if email_address.is_some() || matrix_id.is_some() { + contacts.push(Contact { + role: role_value.clone(), + email_address: email_address.clone(), + matrix_id: matrix_id.clone(), + }); + } + + // Try to add admin users as contacts if no contacts are configured + if contacts.is_empty() { + if let Ok(admin_room) = services.admin.get_admin_room().await { + let admin_users = services.rooms.state_cache.room_members(&admin_room); + let mut stream = admin_users; + + while let Some(user_id) = stream.next().await { + // Skip server user + if *user_id == services.globals.server_user { + break; + } + contacts.push(Contact { + role: role_value.clone(), + email_address: None, + matrix_id: Some(user_id.to_owned()), + }); + } + } } - // support page or role+contacts must be either defined for this to be valid if contacts.is_empty() && support_page.is_none() { + // No admin room, no configured contacts, and no support page return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); } @@ -84,9 +104,9 @@ pub(crate) async fn well_known_support( pub(crate) async fn syncv3_client_server_json( State(services): State, ) -> Result { - let server_url = match services.server.config.well_known.client.as_ref() { + let server_url = match services.config.well_known.client.as_ref() { | Some(url) => url.to_string(), - | None => match services.server.config.well_known.server.as_ref() { + | None => match services.config.well_known.server.as_ref() { | Some(url) => url.to_string(), | None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), }, diff --git a/src/api/router.rs b/src/api/router.rs index 3fbef275..5416e9e9 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -3,7 +3,6 @@ mod auth; mod handler; mod request; mod response; -pub mod state; use std::str::FromStr; @@ -13,10 +12,11 @@ use axum::{ routing::{any, get, post}, }; use conduwuit::{Server, err}; +pub(super) use conduwuit_service::state::State; use http::{Uri, uri}; use self::handler::RouterExt; -pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State}; +pub(super) use self::{args::Args as Ruma, response::RumaResponse}; use crate::{client, server}; pub fn build(router: Router, server: &Server) -> Router { diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 0eb61ca6..01254c32 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -306,7 +306,7 @@ async fn auth_server( } fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { - if !services.server.config.allow_federation { + if !services.config.allow_federation { return Err!(Config("allow_federation", "Federation is disabled.")); } @@ -316,11 +316,7 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { } let origin = &x_matrix.origin; - if services - .config - .forbidden_remote_server_names - .is_match(origin.host()) - { + if services.moderation.is_remote_server_forbidden(origin) { return Err!(Request(Forbidden(debug_warn!( "Federation requests from {origin} denied." )))); diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index edd6ac16..f53e1a15 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -37,19 +37,14 @@ pub(crate) async fn create_invite_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index ac2c5485..3204c30c 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,9 +42,8 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried joining room ID {} which has a server name that \ @@ -57,11 +56,7 @@ pub(crate) async fn create_join_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden(warn!( "Room ID server name {server} is banned on this homeserver." )))); diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 511c13b2..423c8e81 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -33,9 +33,8 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried knocking room ID {} which has a server name \ @@ -48,11 +47,7 @@ pub(crate) async fn create_knock_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index a66d8890..895eca81 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,9 +268,8 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried joining room ID {} through us who has a server name that is \ @@ -282,11 +281,7 @@ pub(crate) async fn create_join_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", @@ -314,19 +309,14 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index ee7b6cba..8d3697d2 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -26,9 +26,8 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried knocking room ID {} who has a server name that is globally \ @@ -40,11 +39,7 @@ pub(crate) async fn create_knock_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried knocking room ID {} which has a server name that is globally \ forbidden. Rejecting.", diff --git a/src/build_metadata/Cargo.toml b/src/build_metadata/Cargo.toml new file mode 100644 index 00000000..62c4dc70 --- /dev/null +++ b/src/build_metadata/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "conduwuit_build_metadata" +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + + +build = "build.rs" +# [[bin]] +# path = "main.rs" +# name = "conduwuit_build_metadata" + +[lib] +path = "mod.rs" +crate-type = [ + "rlib", + # "dylib", +] + +[features] + + +[dependencies] + +[build-dependencies] +built = { version = "0.8", features = [] } + +[lints] +workspace = true diff --git a/src/build_metadata/build.rs b/src/build_metadata/build.rs new file mode 100644 index 00000000..bfdf20b1 --- /dev/null +++ b/src/build_metadata/build.rs @@ -0,0 +1,93 @@ +use std::process::Command; + +fn run_git_command(args: &[&str]) -> Option { + Command::new("git") + .args(args) + .output() + .ok() + .filter(|output| output.status.success()) + .and_then(|output| String::from_utf8(output.stdout).ok()) + .map(|s| s.trim().to_owned()) + .filter(|s| !s.is_empty()) +} +fn get_env(env_var: &str) -> Option { + match std::env::var(env_var) { + | Ok(val) if !val.is_empty() => Some(val), + | _ => None, + } +} +fn main() { + // built gets the default crate from the workspace. Not sure if this is intended + // behavior, but it's what we want. + built::write_built_file().expect("Failed to acquire build-time information"); + + // --- Git Information --- + let mut commit_hash = None; + let mut commit_hash_short = None; + let mut remote_url_web = None; + + // Get full commit hash + if let Some(hash) = + get_env("GIT_COMMIT_HASH").or_else(|| run_git_command(&["rev-parse", "HEAD"])) + { + println!("cargo:rustc-env=GIT_COMMIT_HASH={hash}"); + commit_hash = Some(hash); + } + + // Get short commit hash + if let Some(short_hash) = get_env("GIT_COMMIT_HASH_SHORT") + .or_else(|| run_git_command(&["rev-parse", "--short", "HEAD"])) + { + println!("cargo:rustc-env=GIT_COMMIT_HASH_SHORT={short_hash}"); + commit_hash_short = Some(short_hash); + } + + // Get remote URL and convert to web URL + if let Some(remote_url_raw) = get_env("GIT_REMOTE_URL") + .or_else(|| run_git_command(&["config", "--get", "remote.origin.url"])) + { + println!("cargo:rustc-env=GIT_REMOTE_URL={remote_url_raw}"); + let web_url = if remote_url_raw.starts_with("https://") { + remote_url_raw.trim_end_matches(".git").to_owned() + } else if remote_url_raw.starts_with("git@") { + remote_url_raw + .trim_end_matches(".git") + .replacen(':', "/", 1) + .replacen("git@", "https://", 1) + } else if remote_url_raw.starts_with("ssh://") { + remote_url_raw + .trim_end_matches(".git") + .replacen("git@", "", 1) + .replacen("ssh:", "https:", 1) + } else { + // Assume it's already a web URL or unknown format + remote_url_raw + }; + println!("cargo:rustc-env=GIT_REMOTE_WEB_URL={web_url}"); + remote_url_web = Some(web_url); + } + + // Construct remote commit URL + if let Some(remote_commit_url) = get_env("GIT_REMOTE_COMMIT_URL") { + println!("cargo:rustc-env=GIT_REMOTE_COMMIT_URL={remote_commit_url}"); + } else if let (Some(base_url), Some(hash)) = + (&remote_url_web, commit_hash.as_ref().or(commit_hash_short.as_ref())) + { + let commit_page = format!("{base_url}/commit/{hash}"); + println!("cargo:rustc-env=GIT_REMOTE_COMMIT_URL={commit_page}"); + } + + // --- Rerun Triggers --- + // TODO: The git rerun triggers seem to always run + // Rerun if the git HEAD changes + println!("cargo:rerun-if-changed=.git/HEAD"); + // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch) + if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"]) { + println!("cargo:rerun-if-changed=.git/{ref_path}"); + } + + println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH"); + println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT"); + println!("cargo:rerun-if-env-changed=GIT_REMOTE_URL"); + println!("cargo:rerun-if-env-changed=GIT_REMOTE_COMMIT_URL"); +} diff --git a/src/build_metadata/mod.rs b/src/build_metadata/mod.rs new file mode 100644 index 00000000..86a8a800 --- /dev/null +++ b/src/build_metadata/mod.rs @@ -0,0 +1,29 @@ +pub mod built { + include!(concat!(env!("OUT_DIR"), "/built.rs")); +} + +pub static GIT_COMMIT_HASH: Option<&str> = option_env!("GIT_COMMIT_HASH"); + +pub static GIT_COMMIT_HASH_SHORT: Option<&str> = option_env!("GIT_COMMIT_HASH_SHORT"); + +// this would be a lot better if Option::or was const. +pub static VERSION_EXTRA: Option<&str> = + if let v @ Some(_) = option_env!("CONTINUWUITY_VERSION_EXTRA") { + v + } else if let v @ Some(_) = option_env!("CONDUWUIT_VERSION_EXTRA") { + v + } else { + option_env!("CONDUIT_VERSION_EXTRA") + }; + +#[must_use] +pub fn version_tag() -> Option<&'static str> { + VERSION_EXTRA + .filter(|s| !s.is_empty()) + .or(GIT_COMMIT_HASH_SHORT) +} + +pub static GIT_REMOTE_WEB_URL: Option<&str> = option_env!("GIT_REMOTE_WEB_URL"); +pub static GIT_REMOTE_COMMIT_URL: Option<&str> = option_env!("GIT_REMOTE_COMMIT_URL"); + +// TODO: Mark dirty builds within the version string diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 4848e742..0c33c590 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -17,17 +17,24 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "reqwest/brotli", +] +conduwuit_mods = [ + "dep:libloading" +] +gzip_compression = [ + "reqwest/gzip", +] +hardened_malloc = [ + "dep:hardened_malloc-rs" ] jemalloc = [ "dep:tikv-jemalloc-sys", "dep:tikv-jemalloc-ctl", "dep:tikv-jemallocator", ] +jemalloc_conf = [] jemalloc_prof = [ "tikv-jemalloc-sys/profiling", ] @@ -36,24 +43,17 @@ jemalloc_stats = [ "tikv-jemalloc-ctl/stats", "tikv-jemallocator/stats", ] -jemalloc_conf = [] -hardened_malloc = [ - "dep:hardened_malloc-rs" -] -gzip_compression = [ - "reqwest/gzip", -] -brotli_compression = [ - "reqwest/brotli", +perf_measurements = [] +release_max_log_level = [ + "tracing/max_level_trace", + "tracing/release_max_level_info", + "log/max_level_trace", + "log/release_max_level_info", ] +sentry_telemetry = [] zstd_compression = [ "reqwest/zstd", ] -perf_measurements = [] -sentry_telemetry = [] -conduwuit_mods = [ - "dep:libloading" -] [dependencies] argon2.workspace = true @@ -67,6 +67,7 @@ checked_ops.workspace = true chrono.workspace = true clap.workspace = true conduwuit-macros.workspace = true +conduwuit-build-metadata.workspace = true const-str.workspace = true core_affinity.workspace = true ctor.workspace = true diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 2424e99c..e138233e 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -274,6 +274,10 @@ pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Res } } +pub fn background_thread_enable(enable: bool) -> Result { + set::(&mallctl!("background_thread"), enable.into()).map(is_nonzero!()) +} + #[inline] #[must_use] pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index f9d51eeb..ded9533d 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -118,7 +118,7 @@ pub fn check(config: &Config) -> Result { if cfg!(not(debug_assertions)) && config.server_name == "your.server.name" { return Err!(Config( "server_name", - "You must specify a valid server name for production usage of conduwuit." + "You must specify a valid server name for production usage of continuwuity." )); } @@ -290,7 +290,7 @@ fn warn_deprecated(config: &Config) { if was_deprecated { warn!( - "Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check your \ + "Read continuwuity config documentation at https://continuwuity.org/configuration.html and check your \ configuration if any new configuration parameters should be adjusted" ); } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a7205423..d4a10345 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -27,7 +27,7 @@ use self::proxy::ProxyConfig; pub use self::{check::check, manager::Manager}; use crate::{Result, err, error::Error, utils::sys}; -/// All the config options for conduwuit. +/// All the config options for continuwuity. #[allow(clippy::struct_excessive_bools)] #[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] #[derive(Clone, Debug, Deserialize)] @@ -35,7 +35,7 @@ use crate::{Result, err, error::Error, utils::sys}; filename = "conduwuit-example.toml", section = "global", undocumented = "# This item is undocumented. Please contribute documentation for it.", - header = r#"### conduwuit Configuration + header = r#"### continuwuity Configuration ### ### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE ### OVERWRITTEN! @@ -50,7 +50,7 @@ use crate::{Result, err, error::Error, utils::sys}; ### that say "YOU NEED TO EDIT THIS". ### ### For more information, see: -### https://conduwuit.puppyirl.gay/configuration.html +### https://continuwuity.org/configuration.html "#, ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure" )] @@ -59,7 +59,7 @@ pub struct Config { /// suffix for user and room IDs/aliases. /// /// See the docs for reverse proxying and delegation: - /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy + /// https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy /// /// Also see the `[global.well_known]` config section at the very bottom. /// @@ -70,10 +70,10 @@ pub struct Config { /// YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE /// WIPE. /// - /// example: "conduwuit.woof" + /// example: "continuwuity.org" pub server_name: OwnedServerName, - /// The default address (IPv4 or IPv6) conduwuit will listen on. + /// The default address (IPv4 or IPv6) continuwuity will listen on. /// /// If you are using Docker or a container NAT networking setup, this must /// be "0.0.0.0". @@ -85,10 +85,10 @@ pub struct Config { #[serde(default = "default_address")] address: ListeningAddr, - /// The port(s) conduwuit will listen on. + /// The port(s) continuwuity will listen on. /// /// For reverse proxying, see: - /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy + /// https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy /// /// If you are using Docker, don't change this, you'll need to map an /// external port to this. @@ -103,16 +103,17 @@ pub struct Config { #[serde(default)] pub tls: TlsConfig, - /// The UNIX socket conduwuit will listen on. + /// The UNIX socket continuwuity will listen on. /// - /// conduwuit cannot listen on both an IP address and a UNIX socket. If + /// continuwuity cannot listen on both an IP address and a UNIX socket. If /// listening on a UNIX socket, you MUST remove/comment the `address` key. /// /// Remember to make sure that your reverse proxy has access to this socket - /// file, either by adding your reverse proxy to the 'conduwuit' group or - /// granting world R/W permissions with `unix_socket_perms` (666 minimum). + /// file, either by adding your reverse proxy to the appropriate user group + /// or granting world R/W permissions with `unix_socket_perms` (666 + /// minimum). /// - /// example: "/run/conduwuit/conduwuit.sock" + /// example: "/run/continuwuity/continuwuity.sock" pub unix_socket_path: Option, /// The default permissions (in octal) to create the UNIX socket with. @@ -121,22 +122,22 @@ pub struct Config { #[serde(default = "default_unix_socket_perms")] pub unix_socket_perms: u32, - /// This is the only directory where conduwuit will save its data, including - /// media. Note: this was previously "/var/lib/matrix-conduit". + /// This is the only directory where continuwuity will save its data, + /// including media. Note: this was previously "/var/lib/matrix-conduit". /// /// YOU NEED TO EDIT THIS. /// - /// example: "/var/lib/conduwuit" + /// example: "/var/lib/continuwuity" pub database_path: PathBuf, - /// conduwuit supports online database backups using RocksDB's Backup engine - /// API. To use this, set a database backup path that conduwuit can write - /// to. + /// continuwuity supports online database backups using RocksDB's Backup + /// engine API. To use this, set a database backup path that continuwuity + /// can write to. /// /// For more information, see: - /// https://conduwuit.puppyirl.gay/maintenance.html#backups + /// https://continuwuity.org/maintenance.html#backups /// - /// example: "/opt/conduwuit-db-backups" + /// example: "/opt/continuwuity-db-backups" pub database_backup_path: Option, /// The amount of online RocksDB database backups to keep/retain, if using @@ -160,18 +161,16 @@ pub struct Config { #[serde(default = "default_new_user_displayname_suffix")] pub new_user_displayname_suffix: String, - /// If enabled, conduwuit will send a simple GET request periodically to - /// `https://pupbrain.dev/check-for-updates/stable` for any new - /// announcements made. Despite the name, this is not an update check - /// endpoint, it is simply an announcement check endpoint. + /// If enabled, continuwuity will send a simple GET request periodically to + /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new + /// announcements or major updates. This is not an update check endpoint. /// - /// This is disabled by default as this is rarely used except for security - /// updates or major updates. - #[serde(default, alias = "allow_announcements_check")] - pub allow_check_for_updates: bool, + /// default: true + #[serde(alias = "allow_check_for_updates", default = "true_fn")] + pub allow_announcements_check: bool, - /// Set this to any float value to multiply conduwuit's in-memory LRU caches - /// with such as "auth_chain_cache_capacity". + /// Set this to any float value to multiply continuwuity's in-memory LRU + /// caches with such as "auth_chain_cache_capacity". /// /// May be useful if you have significant memory to spare to increase /// performance. @@ -188,7 +187,7 @@ pub struct Config { )] pub cache_capacity_modifier: f64, - /// Set this to any float value in megabytes for conduwuit to tell the + /// Set this to any float value in megabytes for continuwuity to tell the /// database engine that this much memory is available for database read /// caches. /// @@ -204,7 +203,7 @@ pub struct Config { #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, - /// Set this to any float value in megabytes for conduwuit to tell the + /// Set this to any float value in megabytes for continuwuity to tell the /// database engine that this much memory is available for database write /// caches. /// @@ -321,9 +320,9 @@ pub struct Config { /// Enable using *only* TCP for querying your specified nameservers instead /// of UDP. /// - /// If you are running conduwuit in a container environment, this config + /// If you are running continuwuity in a container environment, this config /// option may need to be enabled. For more details, see: - /// https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker + /// https://continuwuity.org/troubleshooting.html#potential-dns-issues-when-using-docker #[serde(default)] pub query_over_tcp_only: bool, @@ -536,9 +535,9 @@ pub struct Config { /// tokens. Multiple tokens can be added if you separate them with /// whitespace /// - /// conduwuit must be able to access the file, and it must not be empty + /// continuwuity must be able to access the file, and it must not be empty /// - /// example: "/etc/conduwuit/.reg_token" + /// example: "/etc/continuwuity/.reg_token" pub registration_token_file: Option, /// Controls whether encrypted rooms and events are allowed. @@ -629,16 +628,16 @@ pub struct Config { pub allow_room_creation: bool, /// Set to false to disable users from joining or creating room versions - /// that aren't officially supported by conduwuit. + /// that aren't officially supported by continuwuity. /// - /// conduwuit officially supports room versions 6 - 11. + /// continuwuity officially supports room versions 6 - 11. /// - /// conduwuit has slightly experimental (though works fine in practice) + /// continuwuity has slightly experimental (though works fine in practice) /// support for versions 3 - 5. #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, - /// Default room version conduwuit will create rooms with. + /// Default room version continuwuity will create rooms with. /// /// Per spec, room version 11 is the default. /// @@ -712,7 +711,7 @@ pub struct Config { /// Servers listed here will be used to gather public keys of other servers /// (notary trusted key servers). /// - /// Currently, conduwuit doesn't support inbound batched key requests, so + /// Currently, continuwuity doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// /// example: ["matrix.org", "tchncs.de"] @@ -757,7 +756,7 @@ pub struct Config { #[serde(default = "default_trusted_server_batch_size")] pub trusted_server_batch_size: usize, - /// Max log level for conduwuit. Allows debug, info, warn, or error. + /// Max log level for continuwuity. Allows debug, info, warn, or error. /// /// See also: /// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives @@ -782,8 +781,9 @@ pub struct Config { #[serde(default = "default_log_span_events")] pub log_span_events: String, - /// Configures whether CONDUWUIT_LOG EnvFilter matches values using regular - /// expressions. See the tracing_subscriber documentation on Directives. + /// Configures whether CONTINUWUITY_LOG EnvFilter matches values using + /// regular expressions. See the tracing_subscriber documentation on + /// Directives. /// /// default: true #[serde(default = "true_fn")] @@ -865,7 +865,7 @@ pub struct Config { /// This takes priority over "turn_secret" first, and falls back to /// "turn_secret" if invalid or failed to open. /// - /// example: "/etc/conduwuit/.turn_secret" + /// example: "/etc/continuwuity/.turn_secret" pub turn_secret_file: Option, /// TURN TTL, in seconds. @@ -874,12 +874,12 @@ pub struct Config { #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, - /// List/vector of room IDs or room aliases that conduwuit will make newly - /// registered users join. The rooms specified must be rooms that you have - /// joined at least once on the server, and must be public. + /// List/vector of room IDs or room aliases that continuwuity will make + /// newly registered users join. The rooms specified must be rooms that you + /// have joined at least once on the server, and must be public. /// - /// example: ["#conduwuit:puppygock.gay", - /// "!eoIzvAvVwY23LPDay8:puppygock.gay"] + /// example: ["#continuwuity:continuwuity.org", + /// "!main-1:continuwuity.org"] /// /// default: [] #[serde(default = "Vec::new")] @@ -904,10 +904,10 @@ pub struct Config { #[serde(default)] pub auto_deactivate_banned_room_attempts: bool, - /// RocksDB log level. This is not the same as conduwuit's log level. This - /// is the log level for the RocksDB engine/library which show up in your - /// database folder/path as `LOG` files. conduwuit will log RocksDB errors - /// as normal through tracing or panics if severe for safety. + /// RocksDB log level. This is not the same as continuwuity's log level. + /// This is the log level for the RocksDB engine/library which show up in + /// your database folder/path as `LOG` files. continuwuity will log RocksDB + /// errors as normal through tracing or panics if severe for safety. /// /// default: "error" #[serde(default = "default_rocksdb_log_level")] @@ -932,7 +932,7 @@ pub struct Config { /// Set this to true to use RocksDB config options that are tailored to HDDs /// (slower device storage). /// - /// It is worth noting that by default, conduwuit will use RocksDB with + /// It is worth noting that by default, continuwuity will use RocksDB with /// Direct IO enabled. *Generally* speaking this improves performance as it /// bypasses buffered I/O (system page cache). However there is a potential /// chance that Direct IO may cause issues with database operations if your @@ -940,7 +940,7 @@ pub struct Config { /// possibly ZFS filesystem. RocksDB generally deals/corrects these issues /// but it cannot account for all setups. If you experience any weird /// RocksDB issues, try enabling this option as it turns off Direct IO and - /// feel free to report in the conduwuit Matrix room if this option fixes + /// feel free to report in the continuwuity Matrix room if this option fixes /// your DB issues. /// /// For more information, see: @@ -1001,7 +1001,7 @@ pub struct Config { /// as they all differ. See their `kDefaultCompressionLevel`. /// /// Note when using the default value we may override it with a setting - /// tailored specifically conduwuit. + /// tailored specifically for continuwuity. /// /// default: 32767 #[serde(default = "default_rocksdb_compression_level")] @@ -1019,7 +1019,7 @@ pub struct Config { /// algorithm. /// /// Note when using the default value we may override it with a setting - /// tailored specifically conduwuit. + /// tailored specifically for continuwuity. /// /// default: 32767 #[serde(default = "default_rocksdb_bottommost_compression_level")] @@ -1061,13 +1061,13 @@ pub struct Config { /// 0 = AbsoluteConsistency /// 1 = TolerateCorruptedTailRecords (default) /// 2 = PointInTime (use me if trying to recover) - /// 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) + /// 3 = SkipAnyCorruptedRecord (you now voided your Continuwuity warranty) /// /// For more information on these modes, see: /// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes /// /// For more details on recovering a corrupt database, see: - /// https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption + /// https://continuwuity.org/troubleshooting.html#database-corruption /// /// default: 1 #[serde(default = "default_rocksdb_recovery_mode")] @@ -1111,7 +1111,7 @@ pub struct Config { /// - Disabling repair mode and restarting the server is recommended after /// running the repair. /// - /// See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. + /// See https://continuwuity.org/troubleshooting.html#database-corruption for more details on recovering a corrupt database. #[serde(default)] pub rocksdb_repair: bool, @@ -1133,10 +1133,10 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, - /// Disables RocksDB compaction. You should never ever have to set this - /// option to true. If you for some reason find yourself needing to use this - /// option as part of troubleshooting or a bug, please reach out to us in - /// the conduwuit Matrix room with information and details. + /// Enables RocksDB compaction. You should never ever have to set this + /// option to false. If you for some reason find yourself needing to use + /// this option as part of troubleshooting or a bug, please reach out to us + /// in the continuwuity Matrix room with information and details. /// /// Disabling compaction will lead to a significantly bloated and /// explosively large database, gradually poor performance, unnecessarily @@ -1164,7 +1164,7 @@ pub struct Config { /// purposes such as recovering/recreating your admin room, or inviting /// yourself back. /// - /// See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. + /// See https://continuwuity.org/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. /// /// Once this password is unset, all sessions will be logged out for /// security purposes. @@ -1180,8 +1180,8 @@ pub struct Config { /// Allow local (your server only) presence updates/requests. /// - /// Note that presence on conduwuit is very fast unlike Synapse's. If using - /// outgoing presence, this MUST be enabled. + /// Note that presence on continuwuity is very fast unlike Synapse's. If + /// using outgoing presence, this MUST be enabled. #[serde(default = "true_fn")] pub allow_local_presence: bool, @@ -1189,7 +1189,7 @@ pub struct Config { /// /// This option receives presence updates from other servers, but does not /// send any unless `allow_outgoing_presence` is true. Note that presence on - /// conduwuit is very fast unlike Synapse's. + /// continuwuity is very fast unlike Synapse's. #[serde(default = "true_fn")] pub allow_incoming_presence: bool, @@ -1197,8 +1197,8 @@ pub struct Config { /// /// This option sends presence updates to other servers, but does not /// receive any unless `allow_incoming_presence` is true. Note that presence - /// on conduwuit is very fast unlike Synapse's. If using outgoing presence, - /// you MUST enable `allow_local_presence` as well. + /// on continuwuity is very fast unlike Synapse's. If using outgoing + /// presence, you MUST enable `allow_local_presence` as well. #[serde(default = "true_fn")] pub allow_outgoing_presence: bool, @@ -1261,8 +1261,8 @@ pub struct Config { #[serde(default = "default_typing_client_timeout_max_s")] pub typing_client_timeout_max_s: u64, - /// Set this to true for conduwuit to compress HTTP response bodies using - /// zstd. This option does nothing if conduwuit was not built with + /// Set this to true for continuwuity to compress HTTP response bodies using + /// zstd. This option does nothing if continuwuity was not built with /// `zstd_compression` feature. Please be aware that enabling HTTP /// compression may weaken TLS. Most users should not need to enable this. /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1270,8 +1270,8 @@ pub struct Config { #[serde(default)] pub zstd_compression: bool, - /// Set this to true for conduwuit to compress HTTP response bodies using - /// gzip. This option does nothing if conduwuit was not built with + /// Set this to true for continuwuity to compress HTTP response bodies using + /// gzip. This option does nothing if continuwuity was not built with /// `gzip_compression` feature. Please be aware that enabling HTTP /// compression may weaken TLS. Most users should not need to enable this. /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before @@ -1282,8 +1282,8 @@ pub struct Config { #[serde(default)] pub gzip_compression: bool, - /// Set this to true for conduwuit to compress HTTP response bodies using - /// brotli. This option does nothing if conduwuit was not built with + /// Set this to true for continuwuity to compress HTTP response bodies using + /// brotli. This option does nothing if continuwuity was not built with /// `brotli_compression` feature. Please be aware that enabling HTTP /// compression may weaken TLS. Most users should not need to enable this. /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1344,7 +1344,7 @@ pub struct Config { /// Otherwise setting this to false reduces filesystem clutter and overhead /// for managing these symlinks in the directory. This is now disabled by /// default. You may still return to upstream Conduit but you have to run - /// conduwuit at least once with this set to true and allow the + /// continuwuity at least once with this set to true and allow the /// media_startup_check to take place before shutting down to return to /// Conduit. #[serde(default)] @@ -1361,8 +1361,40 @@ pub struct Config { #[serde(default)] pub prune_missing_media: bool, - /// Vector list of regex patterns of server names that conduwuit will refuse - /// to download remote media from. + /// List of forbidden server names via regex patterns that we will block + /// incoming AND outgoing federation with, and block client room joins / + /// remote user invites. + /// + /// Note that your messages can still make it to forbidden servers through + /// backfilling. Events we receive from forbidden servers via backfill + /// from servers we *do* federate with will be stored in the database. + /// + /// This check is applied on the room ID, room alias, sender server name, + /// sender user's server name, inbound federation X-Matrix origin, and + /// outbound federation handler. + /// + /// You can set this to ["*"] to block all servers by default, and then + /// use `allowed_remote_server_names` to allow only specific servers. + /// + /// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub forbidden_remote_server_names: RegexSet, + + /// List of allowed server names via regex patterns that we will allow, + /// regardless of if they match `forbidden_remote_server_names`. + /// + /// This option has no effect if `forbidden_remote_server_names` is empty. + /// + /// example: ["goodserver\\.tld$", "goodphrase"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub allowed_remote_server_names: RegexSet, + + /// Vector list of regex patterns of server names that continuwuity will + /// refuse to download remote media from. /// /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// @@ -1370,22 +1402,6 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub prevent_media_downloads_from: RegexSet, - /// List of forbidden server names via regex patterns that we will block - /// incoming AND outgoing federation with, and block client room joins / - /// remote user invites. - /// - /// This check is applied on the room ID, room alias, sender server name, - /// sender user's server name, inbound federation X-Matrix origin, and - /// outbound federation handler. - /// - /// Basically "global" ACLs. - /// - /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub forbidden_remote_server_names: RegexSet, - /// List of forbidden server names via regex patterns that we will block all /// outgoing federated room directory requests for. Useful for preventing /// our users from wandering into bad servers or spaces. @@ -1396,8 +1412,33 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub forbidden_remote_room_directory_server_names: RegexSet, + /// Vector list of regex patterns of server names that continuwuity will not + /// send messages to the client from. + /// + /// Note that there is no way for clients to receive messages once a server + /// has become unignored without doing a full sync. This is a protocol + /// limitation with the current sync protocols. This means this is somewhat + /// of a nuclear option. + /// + /// example: ["reallybadserver\.tld$", "reallybadphrase", + /// "69dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub ignore_messages_from_server_names: RegexSet, + + /// Send messages from users that the user has ignored to the client. + /// + /// There is no way for clients to receive messages sent while a user was + /// ignored without doing a full sync. This is a protocol limitation with + /// the current sync protocols. Disabling this option will move + /// responsibility of ignoring messages to the client, which can avoid this + /// limitation. + #[serde(default)] + pub send_messages_from_ignored_users_to_client: bool, + /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you - /// do not want conduwuit to send outbound requests to. Defaults to + /// do not want continuwuity to send outbound requests to. Defaults to /// RFC1918, unroutable, loopback, multicast, and testnet addresses for /// security. /// @@ -1565,26 +1606,26 @@ pub struct Config { /// Allow admins to enter commands in rooms other than "#admins" (admin /// room) by prefixing your message with "\!admin" or "\\!admin" followed up - /// a normal conduwuit admin command. The reply will be publicly visible to - /// the room, originating from the sender. + /// a normal continuwuity admin command. The reply will be publicly visible + /// to the room, originating from the sender. /// /// example: \\!admin debug ping puppygock.gay #[serde(default = "true_fn")] pub admin_escape_commands: bool, - /// Automatically activate the conduwuit admin room console / CLI on - /// startup. This option can also be enabled with `--console` conduwuit + /// Automatically activate the continuwuity admin room console / CLI on + /// startup. This option can also be enabled with `--console` continuwuity /// argument. #[serde(default)] pub admin_console_automatic: bool, /// List of admin commands to execute on startup. /// - /// This option can also be configured with the `--execute` conduwuit + /// This option can also be configured with the `--execute` continuwuity /// argument and can take standard shell commands and environment variables /// - /// For example: `./conduwuit --execute "server admin-notice conduwuit has - /// started up at $(date)"` + /// For example: `./continuwuity --execute "server admin-notice continuwuity + /// has started up at $(date)"` /// /// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` /// @@ -1594,7 +1635,7 @@ pub struct Config { /// Ignore errors in startup commands. /// - /// If false, conduwuit will error and fail to start if an admin execute + /// If false, continuwuity will error and fail to start if an admin execute /// command (`--execute` / `admin_execute`) fails. #[serde(default)] pub admin_execute_errors_ignore: bool, @@ -1619,17 +1660,16 @@ pub struct Config { /// The default room tag to apply on the admin room. /// /// On some clients like Element, the room tag "m.server_notice" is a - /// special pinned room at the very bottom of your room list. The conduwuit - /// admin room can be pinned here so you always have an easy-to-access - /// shortcut dedicated to your admin room. + /// special pinned room at the very bottom of your room list. The + /// continuwuity admin room can be pinned here so you always have an + /// easy-to-access shortcut dedicated to your admin room. /// /// default: "m.server_notice" #[serde(default = "default_admin_room_tag")] pub admin_room_tag: String, /// Sentry.io crash/panic reporting, performance monitoring/metrics, etc. - /// This is NOT enabled by default. conduwuit's default Sentry reporting - /// endpoint domain is `o4506996327251968.ingest.us.sentry.io`. + /// This is NOT enabled by default. #[serde(default)] pub sentry: bool, @@ -1640,7 +1680,7 @@ pub struct Config { #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, - /// Report your conduwuit server_name in Sentry.io crash reports and + /// Report your continuwuity server_name in Sentry.io crash reports and /// metrics. #[serde(default)] pub sentry_send_server_name: bool, @@ -1681,7 +1721,7 @@ pub struct Config { /// Enable the tokio-console. This option is only relevant to developers. /// /// For more information, see: - /// https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console + /// https://continuwuity.org/development.html#debugging-with-tokio-console #[serde(default)] pub tokio_console: bool, @@ -1857,12 +1897,28 @@ pub struct WellKnownConfig { /// example: "matrix.example.com:443" pub server: Option, + /// URL to a support page for the server, which will be served as part of + /// the MSC1929 server support endpoint at /.well-known/matrix/support. + /// Will be included alongside any contact information pub support_page: Option, + /// Role string for server support contacts, to be served as part of the + /// MSC1929 server support endpoint at /.well-known/matrix/support. + /// + /// default: "m.role.admin" pub support_role: Option, + /// Email address for server support contacts, to be served as part of the + /// MSC1929 server support endpoint. + /// This will be used along with support_mxid if specified. pub support_email: Option, + /// Matrix ID for server support contacts, to be served as part of the + /// MSC1929 server support endpoint. + /// This will be used along with support_email if specified. + /// + /// If no email or mxid is specified, all of the server's admins will be + /// listed. pub support_mxid: Option, } @@ -1923,7 +1979,11 @@ impl Config { where I: Iterator, { - let envs = [Env::var("CONDUIT_CONFIG"), Env::var("CONDUWUIT_CONFIG")]; + let envs = [ + Env::var("CONDUIT_CONFIG"), + Env::var("CONDUWUIT_CONFIG"), + Env::var("CONTINUWUITY_CONFIG"), + ]; let config = envs .into_iter() @@ -1932,7 +1992,8 @@ impl Config { .chain(paths.map(Toml::file)) .fold(Figment::new(), |config, file| config.merge(file.nested())) .merge(Env::prefixed("CONDUIT_").global().split("__")) - .merge(Env::prefixed("CONDUWUIT_").global().split("__")); + .merge(Env::prefixed("CONDUWUIT_").global().split("__")) + .merge(Env::prefixed("CONTINUWUITY_").global().split("__")); Ok(config) } @@ -1954,7 +2015,7 @@ impl Config { let mut addrs = Vec::with_capacity( self.get_bind_hosts() .len() - .saturating_add(self.get_bind_ports().len()), + .saturating_mul(self.get_bind_ports().len()), ); for host in &self.get_bind_hosts() { for port in &self.get_bind_ports() { diff --git a/src/core/debug.rs b/src/core/debug.rs index b9a53038..21a5ada4 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -12,6 +12,7 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// Log event at given level in debug-mode (when debug-assertions are enabled). /// In release-mode it becomes DEBUG level, and possibly subject to elision. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! debug_event { ( $level:expr_2021, $($x:tt)+ ) => { if $crate::debug::logging() { diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 9c24d3b4..2eb6823a 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -33,6 +33,7 @@ //! option of replacing `error!` with `debug_error!`. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! Err { ($($args:tt)*) => { Err($crate::err!($($args)*)) @@ -40,6 +41,7 @@ macro_rules! Err { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err { (Request(Forbidden($level:ident!($($args:tt)+)))) => {{ let mut buf = String::new(); @@ -109,6 +111,7 @@ macro_rules! err { /// can share the same callsite metadata for the source of our Error and the /// associated logging and tracing event dispatches. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_log { ($out:ident, $level:ident, $($fields:tt)+) => {{ use $crate::tracing::{ diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index c5a1d167..e70bdcd5 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -31,12 +31,12 @@ const ROUTER_MANIFEST: &'static str = (); #[cargo_manifest(crate = "main")] const MAIN_MANIFEST: &'static str = (); -/// Processed list of features access all project crates. This is generated from +/// Processed list of features across all project crates. This is generated from /// the data in the MANIFEST strings and contains all possible project features. /// For *enabled* features see the info::rustc module instead. static FEATURES: OnceLock> = OnceLock::new(); -/// Processed list of dependencies. This is generated from the datas captured in +/// Processed list of dependencies. This is generated from the data captured in /// the MANIFEST. static DEPENDENCIES: OnceLock = OnceLock::new(); diff --git a/src/core/info/version.rs b/src/core/info/version.rs index 6abb6e13..c22c8ec8 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -26,13 +26,6 @@ pub fn user_agent() -> &'static str { USER_AGENT.get_or_init(init_user_agent) } fn init_user_agent() -> String { format!("{}/{}", name(), version()) } fn init_version() -> String { - option_env!("CONDUWUIT_VERSION_EXTRA") - .or(option_env!("CONDUIT_VERSION_EXTRA")) - .map_or(SEMANTIC.to_owned(), |extra| { - if extra.is_empty() { - SEMANTIC.to_owned() - } else { - format!("{SEMANTIC} ({extra})") - } - }) + conduwuit_build_metadata::version_tag() + .map_or(SEMANTIC.to_owned(), |extra| format!("{SEMANTIC} ({extra})")) } diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 5ac374e8..f7b2521a 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -33,6 +33,7 @@ pub struct Log { // the crate namespace like these. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! event { ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } } diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index e6a16c9f..f72fde47 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -16,9 +16,9 @@ use crate::{Result, error}; /// pulling in a version of tracing that's incompatible with the rest of our /// deps. /// -/// To work around this, we define an trait without the S paramter that forwards -/// to the reload::Handle::reload method, and then store the handle as a trait -/// object. +/// To work around this, we define an trait without the S parameter that +/// forwards to the reload::Handle::reload method, and then store the handle as +/// a trait object. /// /// [1]: pub trait ReloadHandle { diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs index ac9e29d6..e4c478cd 100644 --- a/src/core/matrix/event.rs +++ b/src/core/matrix/event.rs @@ -1,19 +1,10 @@ -use std::{ - borrow::Borrow, - fmt::{Debug, Display}, - hash::Hash, - sync::Arc, -}; - use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; /// Abstraction of a PDU so users can have their own PDU types. pub trait Event { - type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow; - /// The `EventId` of this event. - fn event_id(&self) -> &Self::Id; + fn event_id(&self) -> &EventId; /// The `RoomId` of this event. fn room_id(&self) -> &RoomId; @@ -35,20 +26,18 @@ pub trait Event { /// The events before this event. // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; /// All the authenticating events for this event. // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; /// If this event is a redaction event this is the event it redacts. - fn redacts(&self) -> Option<&Self::Id>; + fn redacts(&self) -> Option<&EventId>; } impl Event for &T { - type Id = T::Id; - - fn event_id(&self) -> &Self::Id { (*self).event_id() } + fn event_id(&self) -> &EventId { (*self).event_id() } fn room_id(&self) -> &RoomId { (*self).room_id() } @@ -62,41 +51,13 @@ impl Event for &T { fn state_key(&self) -> Option<&str> { (*self).state_key() } - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { (*self).prev_events() } - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { (*self).auth_events() } - fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } -} - -impl Event for Arc { - type Id = T::Id; - - fn event_id(&self) -> &Self::Id { (**self).event_id() } - - fn room_id(&self) -> &RoomId { (**self).room_id() } - - fn sender(&self) -> &UserId { (**self).sender() } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (**self).origin_server_ts() } - - fn event_type(&self) -> &TimelineEventType { (**self).event_type() } - - fn content(&self) -> &RawJsonValue { (**self).content() } - - fn state_key(&self) -> Option<&str> { (**self).state_key() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (**self).prev_events() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (**self).auth_events() - } - - fn redacts(&self) -> Option<&Self::Id> { (**self).redacts() } + fn redacts(&self) -> Option<&EventId> { (*self).redacts() } } diff --git a/src/core/matrix/pdu.rs b/src/core/matrix/pdu.rs index 7e1ecfa8..188586bd 100644 --- a/src/core/matrix/pdu.rs +++ b/src/core/matrix/pdu.rs @@ -79,9 +79,7 @@ impl Pdu { } impl Event for Pdu { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } + fn event_id(&self) -> &EventId { &self.event_id } fn room_id(&self) -> &RoomId { &self.room_id } @@ -97,15 +95,15 @@ impl Event for Pdu { fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter() + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter().map(AsRef::as_ref) } - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.auth_events.iter() + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter().map(AsRef::as_ref) } - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } + fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() } } /// Prevent derived equality which wouldn't limit itself to event_id diff --git a/src/core/matrix/pdu/strip.rs b/src/core/matrix/pdu/strip.rs index 3683caaa..a39e7d35 100644 --- a/src/core/matrix/pdu/strip.rs +++ b/src/core/matrix/pdu/strip.rs @@ -1,8 +1,8 @@ use ruma::{ events::{ - AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, - AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, + AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, + AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, room::member::RoomMemberEventContent, + space::child::HierarchySpaceChildEvent, }, serde::Raw, }; @@ -10,41 +10,6 @@ use serde_json::{json, value::Value as JsonValue}; use crate::implement; -/// This only works for events that are also AnyRoomEvents. -#[must_use] -#[implement(super::Pdu)] -pub fn into_any_event(self) -> Raw { - serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works") -} - -/// This only works for events that are also AnyRoomEvents. -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_any_event_value(self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - #[implement(super::Pdu)] #[must_use] #[inline] @@ -53,7 +18,8 @@ pub fn into_room_event(self) -> Raw { self.to_room_event() } #[implement(super::Pdu)] #[must_use] pub fn to_room_event(&self) -> Raw { - serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works") + let value = self.to_room_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -91,8 +57,8 @@ pub fn into_message_like_event(self) -> Raw { self.to_messa #[implement(super::Pdu)] #[must_use] pub fn to_message_like_event(&self) -> Raw { - serde_json::from_value(self.to_message_like_event_value()) - .expect("Raw::from_value always works") + let value = self.to_message_like_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -130,7 +96,8 @@ pub fn into_sync_room_event(self) -> Raw { self.to_sync_ro #[implement(super::Pdu)] #[must_use] pub fn to_sync_room_event(&self) -> Raw { - serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works") + let value = self.to_sync_room_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -162,7 +129,8 @@ pub fn to_sync_room_event_value(&self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_state_event(self) -> Raw { - serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") + let value = self.into_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -189,8 +157,8 @@ pub fn into_state_event_value(self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_sync_state_event(self) -> Raw { - serde_json::from_value(self.into_sync_state_event_value()) - .expect("Raw::from_value always works") + let value = self.into_sync_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -223,8 +191,8 @@ pub fn into_stripped_state_event(self) -> Raw { #[implement(super::Pdu)] #[must_use] pub fn to_stripped_state_event(&self) -> Raw { - serde_json::from_value(self.to_stripped_state_event_value()) - .expect("Raw::from_value always works") + let value = self.to_stripped_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -242,8 +210,8 @@ pub fn to_stripped_state_event_value(&self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_stripped_spacechild_state_event(self) -> Raw { - serde_json::from_value(self.into_stripped_spacechild_state_event_value()) - .expect("Raw::from_value always works") + let value = self.into_stripped_spacechild_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -262,7 +230,8 @@ pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_member_event(self) -> Raw> { - serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works") + let value = self.into_member_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs index 7a1ae5bf..12eeab9d 100644 --- a/src/core/matrix/state_res/benches.rs +++ b/src/core/matrix/state_res/benches.rs @@ -4,10 +4,7 @@ extern crate test; use std::{ borrow::Borrow, collections::{HashMap, HashSet}, - sync::{ - Arc, - atomic::{AtomicU64, Ordering::SeqCst}, - }, + sync::atomic::{AtomicU64, Ordering::SeqCst}, }; use futures::{future, future::ready}; @@ -55,7 +52,6 @@ fn lexico_topo_sort(c: &mut test::Bencher) { #[cfg(conduwuit_bench)] #[cfg_attr(conduwuit_bench, bench)] fn resolution_shallow_auth_chain(c: &mut test::Bencher) { - let parallel_fetches = 32; let mut store = TestStore(hashmap! {}); // build up the DAG @@ -64,7 +60,7 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { c.iter(|| async { let ev_map = store.0.clone(); let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).clone()); let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); let auth_chain_sets: Vec> = state_sets .iter() @@ -81,7 +77,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { &auth_chain_sets, &fetch, &exists, - parallel_fetches, ) .await { @@ -94,7 +89,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { #[cfg(conduwuit_bench)] #[cfg_attr(conduwuit_bench, bench)] fn resolve_deeper_event_set(c: &mut test::Bencher) { - let parallel_fetches = 32; let mut inner = INITIAL_EVENTS(); let ban = BAN_STATE_SET(); @@ -148,7 +142,7 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { }) .collect(); - let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let fetch = |id: OwnedEventId| ready(inner.get(&id).clone()); let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); let _ = match state_res::resolve( &RoomVersionId::V6, @@ -156,7 +150,6 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { &auth_chain_sets, &fetch, &exists, - parallel_fetches, ) .await { @@ -171,20 +164,20 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { // IMPLEMENTATION DETAILS AHEAD // /////////////////////////////////////////////////////////////////////*/ -struct TestStore(HashMap>); +struct TestStore(HashMap); #[allow(unused)] -impl TestStore { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result { self.0 .get(event_id) - .map(Arc::clone) + .cloned() .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) } /// Returns the events that correspond to the `event_ids` sorted in the same /// order. - fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result> { let mut events = vec![]; for id in event_ids { events.push(self.get_event(room_id, id)?); @@ -193,7 +186,11 @@ impl TestStore { } /// Returns a Vec of the related auth events to the given `event`. - fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + fn auth_event_ids( + &self, + room_id: &RoomId, + event_ids: Vec, + ) -> Result> { let mut result = HashSet::new(); let mut stack = event_ids; @@ -219,8 +216,8 @@ impl TestStore { fn auth_chain_diff( &self, room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { + event_ids: Vec>, + ) -> Result> { let mut auth_chain_sets = vec![]; for ids in event_ids { // TODO state store `auth_event_ids` returns self in the event ids list @@ -241,7 +238,7 @@ impl TestStore { Ok(auth_chain_sets .into_iter() .flatten() - .filter(|id| !common.contains(id.borrow())) + .filter(|id| !common.contains(id)) .collect()) } else { Ok(vec![]) @@ -264,7 +261,7 @@ impl TestStore { &[], ); let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); + self.0.insert(cre.clone(), create_event.clone()); let alice_mem = to_pdu_event( "IMA", @@ -276,7 +273,7 @@ impl TestStore { &[cre.clone()], ); self.0 - .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); let join_rules = to_pdu_event( "IJR", @@ -383,7 +380,7 @@ fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> Arc +) -> PduEvent where S: AsRef, { @@ -407,7 +404,7 @@ where .collect::>(); let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -424,12 +421,12 @@ where hashes: EventHash::new(String::new()), signatures: Signatures::new(), }), - }) + } } // all graphs start with these input events #[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap> { +fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -511,7 +508,7 @@ fn INITIAL_EVENTS() -> HashMap> { // all graphs start with these input events #[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap> { +fn BAN_STATE_SET() -> HashMap { vec![ to_pdu_event( "PA", @@ -568,7 +565,7 @@ impl EventTypeExt for &TimelineEventType { mod event { use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::{TimelineEventType, pdu::Pdu}, }; use serde::{Deserialize, Serialize}; @@ -577,9 +574,7 @@ mod event { use super::Event; impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } + fn event_id(&self) -> &EventId { &self.event_id } fn room_id(&self) -> &RoomId { match &self.rest { @@ -635,28 +630,30 @@ mod event { } } - fn prev_events(&self) -> Box + Send + '_> { + fn prev_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)), #[cfg(not(feature = "unstable-exhaustive-types"))] | _ => unreachable!("new PDU version"), } } - fn auth_events(&self) -> Box + Send + '_> { + fn auth_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)), #[cfg(not(feature = "unstable-exhaustive-types"))] | _ => unreachable!("new PDU version"), } } - fn redacts(&self) -> Option<&Self::Id> { + fn redacts(&self) -> Option<&EventId> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(), #[cfg(not(feature = "unstable-exhaustive-types"))] | _ => unreachable!("new PDU version"), } diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 65bec802..759ab5cb 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -38,7 +38,7 @@ struct GetMembership { membership: MembershipState, } -#[derive(Deserialize)] +#[derive(Deserialize, Debug)] struct RoomMemberContentFields { membership: Option>, join_authorised_via_users_server: Option>, @@ -133,7 +133,7 @@ pub fn auth_types_for_event( level = "debug", skip_all, fields( - event_id = incoming_event.event_id().borrow().as_str() + event_id = incoming_event.event_id().as_str(), ) )] pub async fn auth_check( @@ -149,9 +149,9 @@ where Incoming: Event + Send + Sync, { debug!( - "auth_check beginning for {} ({})", - incoming_event.event_id(), - incoming_event.event_type() + event_id = format!("{}", incoming_event.event_id()), + event_type = format!("{}", incoming_event.event_type()), + "auth_check beginning" ); // [synapse] check that all the events are in the same room as `incoming_event` @@ -259,7 +259,7 @@ where // 3. If event does not have m.room.create in auth_events reject if !incoming_event .auth_events() - .any(|id| id.borrow() == room_create_event.event_id().borrow()) + .any(|id| id == room_create_event.event_id()) { warn!("no m.room.create event in auth events"); return Ok(false); @@ -383,10 +383,15 @@ where let sender_membership_event_content: RoomMemberContentFields = from_json_str(sender_member_event.content().get())?; - let membership_state = sender_membership_event_content - .membership - .expect("we should test before that this field exists") - .deserialize()?; + let Some(membership_state) = sender_membership_event_content.membership else { + warn!( + sender_membership_event_content = format!("{sender_membership_event_content:?}"), + event_id = format!("{}", incoming_event.event_id()), + "Sender membership event content missing membership field" + ); + return Err(Error::InvalidPdu("Missing membership field".to_owned())); + }; + let membership_state = membership_state.deserialize()?; if !matches!(membership_state, MembershipState::Join) { warn!("sender's membership is not join"); @@ -633,7 +638,7 @@ fn valid_membership_change( warn!(?target_user_membership_event_id, "Banned user can't join"); false } else if (join_rules == JoinRule::Invite - || room_version.allow_knocking && join_rules == JoinRule::Knock) + || room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_)))) // If the join_rule is invite then allow if membership state is invite or join && (target_user_current_membership == MembershipState::Join || target_user_current_membership == MembershipState::Invite) @@ -1016,11 +1021,11 @@ fn check_redaction( // If the domain of the event_id of the event being redacted is the same as the // domain of the event_id of the m.room.redaction, allow - if redaction_event.event_id().borrow().server_name() + if redaction_event.event_id().server_name() == redaction_event .redacts() .as_ref() - .and_then(|&id| id.borrow().server_name()) + .and_then(|&id| id.server_name()) { debug!("redaction event allowed via room version 1 rules"); return Ok(true); @@ -1112,8 +1117,6 @@ fn verify_third_party_invite( #[cfg(test)] mod tests { - use std::sync::Arc; - use ruma::events::{ StateEventType, TimelineEventType, room::{ @@ -1143,7 +1146,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1188,7 +1191,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1233,7 +1236,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1278,7 +1281,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1340,7 +1343,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1412,7 +1415,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 93c00d15..651f6130 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -15,13 +15,12 @@ use std::{ borrow::Borrow, cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap, HashSet}, - fmt::Debug, hash::{BuildHasher, Hash}, }; -use futures::{Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt, future, stream}; +use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future}; use ruma::{ - EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, + EventId, Int, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, events::{ StateEventType, TimelineEventType, room::member::{MembershipState, RoomMemberEventContent}, @@ -37,9 +36,11 @@ pub use self::{ room_version::RoomVersion, }; use crate::{ - debug, + debug, debug_error, matrix::{event::Event, pdu::StateKey}, - trace, warn, + trace, + utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, WidebandExt}, + warn, }; /// A mapping of event type and state_key to some value `T`, usually an @@ -66,9 +67,6 @@ type Result = crate::Result; /// * `event_fetch` - Any event not found in the `event_map` will defer to this /// closure to find the event. /// -/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight -/// for any given operation. -/// /// ## Invariants /// /// The caller of `resolve` must ensure that all the events are from the same @@ -79,21 +77,19 @@ type Result = crate::Result; pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( room_version: &RoomVersionId, state_sets: Sets, - auth_chain_sets: &'a [HashSet], + auth_chain_sets: &'a [HashSet], event_fetch: &Fetch, event_exists: &Exists, - parallel_fetches: usize, -) -> Result> +) -> Result> where - Fetch: Fn(E::Id) -> FetchFut + Sync, + Fetch: Fn(OwnedEventId) -> FetchFut + Sync, FetchFut: Future> + Send, - Exists: Fn(E::Id) -> ExistsFut + Sync, + Exists: Fn(OwnedEventId) -> ExistsFut + Sync, ExistsFut: Future + Send, Sets: IntoIterator + Send, - SetIter: Iterator> + Clone + Send, + SetIter: Iterator> + Clone + Send, Hasher: BuildHasher + Send + Sync, E: Event + Clone + Send + Sync, - E::Id: Borrow + Send + Sync, for<'b> &'b E: Send, { debug!("State resolution starting"); @@ -112,20 +108,16 @@ where debug!(count = conflicting.len(), "conflicting events"); trace!(map = ?conflicting, "conflicting events"); - let auth_chain_diff = - get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten()); + let conflicting_values = conflicting.into_values().flatten().stream(); // `all_conflicted` contains unique items // synapse says `full_set = {eid for eid in full_conflicted_set if eid in // event_map}` - let all_conflicted: HashSet<_> = stream::iter(auth_chain_diff) - // Don't honor events we cannot "verify" - .map(|id| event_exists(id.clone()).map(move |exists| (id, exists))) - .buffer_unordered(parallel_fetches) - .filter_map(|(id, exists)| future::ready(exists.then_some(id))) - .collect() - .boxed() - .await; + let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets) + .chain(conflicting_values) + .broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id)) + .collect() + .await; debug!(count = all_conflicted.len(), "full conflicted set"); trace!(set = ?all_conflicted, "full conflicted set"); @@ -135,23 +127,21 @@ where // Get only the control events with a state_key: "" or ban/kick event (sender != // state_key) - let control_events: Vec<_> = stream::iter(all_conflicted.iter()) - .map(|id| is_power_event_id(id, &event_fetch).map(move |is| (id, is))) - .buffer_unordered(parallel_fetches) - .filter_map(|(id, is)| future::ready(is.then_some(id.clone()))) + let control_events: Vec<_> = all_conflicted + .iter() + .stream() + .wide_filter_map(async |id| { + is_power_event_id(id, &event_fetch) + .await + .then_some(id.clone()) + }) .collect() - .boxed() .await; // Sort the control events based on power_level/clock/event_id and // outgoing/incoming edges - let sorted_control_levels = reverse_topological_power_sort( - control_events, - &all_conflicted, - &event_fetch, - parallel_fetches, - ) - .await?; + let sorted_control_levels = + reverse_topological_power_sort(control_events, &all_conflicted, &event_fetch).await?; debug!(count = sorted_control_levels.len(), "power events"); trace!(list = ?sorted_control_levels, "sorted power events"); @@ -160,10 +150,9 @@ where // Sequentially auth check each control event. let resolved_control = iterative_auth_check( &room_version, - sorted_control_levels.iter(), + sorted_control_levels.iter().stream().map(AsRef::as_ref), clean.clone(), &event_fetch, - parallel_fetches, ) .await?; @@ -172,36 +161,35 @@ where // At this point the control_events have been resolved we now have to // sort the remaining events using the mainline of the resolved power level. - let deduped_power_ev = sorted_control_levels.into_iter().collect::>(); + let deduped_power_ev: HashSet<_> = sorted_control_levels.into_iter().collect(); // This removes the control events that passed auth and more importantly those // that failed auth - let events_to_resolve = all_conflicted + let events_to_resolve: Vec<_> = all_conflicted .iter() - .filter(|&id| !deduped_power_ev.contains(id.borrow())) + .filter(|&id| !deduped_power_ev.contains(id)) .cloned() - .collect::>(); + .collect(); debug!(count = events_to_resolve.len(), "events left to resolve"); trace!(list = ?events_to_resolve, "events left to resolve"); // This "epochs" power level event - let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, StateKey::new())); + let power_levels_ty_sk = (StateEventType::RoomPowerLevels, StateKey::new()); + let power_event = resolved_control.get(&power_levels_ty_sk); debug!(event_id = ?power_event, "power event"); let sorted_left_events = - mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) - .await?; + mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?; trace!(list = ?sorted_left_events, "events left, sorted"); let mut resolved_state = iterative_auth_check( &room_version, - sorted_left_events.iter(), + sorted_left_events.iter().stream().map(AsRef::as_ref), resolved_control, // The control events are added to the final resolved state &event_fetch, - parallel_fetches, ) .await?; @@ -265,7 +253,7 @@ where #[allow(clippy::arithmetic_side_effects)] fn get_auth_chain_diff( auth_chain_sets: &[HashSet], -) -> impl Iterator + Send + use +) -> impl Stream + Send + use where Id: Clone + Eq + Hash + Send, Hasher: BuildHasher + Send + Sync, @@ -279,6 +267,7 @@ where id_counts .into_iter() .filter_map(move |(id, count)| (count < num_sets).then_some(id)) + .stream() } /// Events are sorted from "earliest" to "latest". @@ -291,16 +280,14 @@ where /// earlier (further back in time) origin server timestamp. #[tracing::instrument(level = "debug", skip_all)] async fn reverse_topological_power_sort( - events_to_sort: Vec, - auth_diff: &HashSet, + events_to_sort: Vec, + auth_diff: &HashSet, fetch_event: &F, - parallel_fetches: usize, -) -> Result> +) -> Result> where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send + Sync, - E::Id: Borrow + Send + Sync, { debug!("reverse topological sort of power events"); @@ -310,33 +297,36 @@ where } // This is used in the `key_fn` passed to the lexico_topo_sort fn - let event_to_pl = stream::iter(graph.keys()) - .map(|event_id| { - get_power_level_for_sender(event_id.clone(), fetch_event, parallel_fetches) - .map(move |res| res.map(|pl| (event_id, pl))) + let event_to_pl: HashMap<_, _> = graph + .keys() + .cloned() + .stream() + .broad_filter_map(async |event_id| { + let pl = get_power_level_for_sender(&event_id, fetch_event) + .await + .ok()?; + Some((event_id, pl)) }) - .buffer_unordered(parallel_fetches) - .try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + .inspect(|(event_id, pl)| { debug!( - event_id = event_id.borrow().as_str(), - power_level = i64::from(pl), + event_id = event_id.as_str(), + power_level = i64::from(*pl), "found the power level of an event's sender", ); - - event_to_pl.insert(event_id.clone(), pl); - future::ok(event_to_pl) }) + .collect() .boxed() - .await?; + .await; - let event_to_pl = &event_to_pl; - let fetcher = |event_id: E::Id| async move { + let fetcher = async |event_id: OwnedEventId| { let pl = *event_to_pl - .get(event_id.borrow()) + .get(&event_id) .ok_or_else(|| Error::NotFound(String::new()))?; + let ev = fetch_event(event_id) .await .ok_or_else(|| Error::NotFound(String::new()))?; + Ok((pl, ev.origin_server_ts())) }; @@ -473,31 +463,27 @@ where /// the eventId at the eventId's generation (we walk backwards to `EventId`s /// most recent previous power level event). async fn get_power_level_for_sender( - event_id: E::Id, + event_id: &EventId, fetch_event: &F, - parallel_fetches: usize, ) -> serde_json::Result where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send, - E::Id: Borrow + Send, { debug!("fetch event ({event_id}) senders power level"); - let event = fetch_event(event_id.clone()).await; + let event = fetch_event(event_id.to_owned()).await; - let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + let auth_events = event.as_ref().map(Event::auth_events); - let pl = stream::iter(auth_events) - .map(|aid| fetch_event(aid.clone())) - .buffer_unordered(parallel_fetches.min(5)) - .filter_map(future::ready) - .collect::>() - .boxed() - .await + let pl = auth_events .into_iter() - .find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")); + .flatten() + .stream() + .broadn_filter_map(5, |aid| fetch_event(aid.to_owned())) + .ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")) + .await; let content: PowerLevelsContentFields = match pl { | None => return Ok(int!(0)), @@ -525,48 +511,41 @@ where /// For each `events_to_check` event we gather the events needed to auth it from /// the the `fetch_event` closure and verify each event using the /// `event_auth::auth_check` function. -async fn iterative_auth_check<'a, E, F, Fut, I>( +async fn iterative_auth_check<'a, E, F, Fut, S>( room_version: &RoomVersion, - events_to_check: I, - unconflicted_state: StateMap, + events_to_check: S, + unconflicted_state: StateMap, fetch_event: &F, - parallel_fetches: usize, -) -> Result> +) -> Result> where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, - E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, - I: Iterator + Debug + Send + 'a, + S: Stream + Send + 'a, E: Event + Clone + Send + Sync, { debug!("starting iterative auth check"); - trace!( - list = ?events_to_check, - "events to check" - ); - let events_to_check: Vec<_> = stream::iter(events_to_check) + let events_to_check: Vec<_> = events_to_check .map(Result::Ok) - .map_ok(|event_id| { - fetch_event(event_id.clone()).map(move |result| { - result.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) - }) + .broad_and_then(async |event_id| { + fetch_event(event_id.to_owned()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) }) - .try_buffer_unordered(parallel_fetches) .try_collect() .boxed() .await?; - let auth_event_ids: HashSet = events_to_check + let auth_event_ids: HashSet = events_to_check .iter() - .flat_map(|event: &E| event.auth_events().map(Clone::clone)) + .flat_map(|event: &E| event.auth_events().map(ToOwned::to_owned)) .collect(); - let auth_events: HashMap = stream::iter(auth_event_ids.into_iter()) - .map(fetch_event) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) - .map(|auth_event| (auth_event.event_id().clone(), auth_event)) + let auth_events: HashMap = auth_event_ids + .into_iter() + .stream() + .broad_filter_map(fetch_event) + .map(|auth_event| (auth_event.event_id().to_owned(), auth_event)) .collect() .boxed() .await; @@ -574,7 +553,6 @@ where let auth_events = &auth_events; let mut resolved_state = unconflicted_state; for event in &events_to_check { - let event_id = event.event_id(); let state_key = event .state_key() .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; @@ -588,7 +566,7 @@ where let mut auth_state = StateMap::new(); for aid in event.auth_events() { - if let Some(ev) = auth_events.get(aid.borrow()) { + if let Some(ev) = auth_events.get(aid) { //TODO: synapse checks "rejected_reason" which is most likely related to // soft-failing auth_state.insert( @@ -599,28 +577,26 @@ where ev.clone(), ); } else { - warn!(event_id = aid.borrow().as_str(), "missing auth event"); + warn!(event_id = aid.as_str(), "missing auth event"); } } - stream::iter( - auth_types - .iter() - .filter_map(|key| Some((key, resolved_state.get(key)?))), - ) - .filter_map(|(key, ev_id)| async move { - if let Some(event) = auth_events.get(ev_id.borrow()) { - Some((key, event.clone())) - } else { - Some((key, fetch_event(ev_id.clone()).await?)) - } - }) - .for_each(|(key, event)| { - //TODO: synapse checks "rejected_reason" is None here - auth_state.insert(key.to_owned(), event); - future::ready(()) - }) - .await; + auth_types + .iter() + .stream() + .ready_filter_map(|key| Some((key, resolved_state.get(key)?))) + .filter_map(|(key, ev_id)| async move { + if let Some(event) = auth_events.get(ev_id) { + Some((key, event.clone())) + } else { + Some((key, fetch_event(ev_id.clone()).await?)) + } + }) + .ready_for_each(|(key, event)| { + //TODO: synapse checks "rejected_reason" is None here + auth_state.insert(key.to_owned(), event); + }) + .await; debug!("event to check {:?}", event.event_id()); @@ -634,12 +610,25 @@ where future::ready(auth_state.get(&ty.with_state_key(key))) }; - if auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await? { - // add event to resolved state map - resolved_state.insert(event.event_type().with_state_key(state_key), event_id.clone()); - } else { - // synapse passes here on AuthError. We do not add this event to resolved_state. - warn!("event {event_id} failed the authentication check"); + let auth_result = + auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await; + + match auth_result { + | Ok(true) => { + // add event to resolved state map + resolved_state.insert( + event.event_type().with_state_key(state_key), + event.event_id().to_owned(), + ); + }, + | Ok(false) => { + // synapse passes here on AuthError. We do not add this event to resolved_state. + warn!("event {} failed the authentication check", event.event_id()); + }, + | Err(e) => { + debug_error!("event {} failed the authentication check: {e}", event.event_id()); + return Err(e); + }, } } @@ -656,16 +645,14 @@ where /// level as a parent) will be marked as depth 1. depth 1 is "older" than depth /// 0. async fn mainline_sort( - to_sort: &[E::Id], - resolved_power_level: Option, + to_sort: &[OwnedEventId], + resolved_power_level: Option, fetch_event: &F, - parallel_fetches: usize, -) -> Result> +) -> Result> where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Clone + Send + Sync, - E::Id: Borrow + Clone + Send + Sync, { debug!("mainline sort of events"); @@ -682,11 +669,13 @@ where let event = fetch_event(p.clone()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; + pl = None; for aid in event.auth_events() { - let ev = fetch_event(aid.clone()) + let ev = fetch_event(aid.to_owned()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { pl = Some(aid.to_owned()); break; @@ -694,36 +683,32 @@ where } } - let mainline_map = mainline + let mainline_map: HashMap<_, _> = mainline .iter() .rev() .enumerate() .map(|(idx, eid)| ((*eid).clone(), idx)) - .collect::>(); + .collect(); - let order_map = stream::iter(to_sort.iter()) - .map(|ev_id| { - fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id))) + let order_map: HashMap<_, _> = to_sort + .iter() + .stream() + .broad_filter_map(async |ev_id| { + fetch_event(ev_id.clone()).await.map(|event| (event, ev_id)) }) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) - .map(|(event, ev_id)| { + .broad_filter_map(|(event, ev_id)| { get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) - .map_ok(move |depth| (depth, event, ev_id)) + .map_ok(move |depth| (ev_id, (depth, event.origin_server_ts(), ev_id))) .map(Result::ok) }) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) - .fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| { - order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id)); - future::ready(order_map) - }) + .collect() .boxed() .await; // Sort the event_ids by their depth, timestamp and EventId // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) - let mut sort_event_ids = order_map.keys().map(|&k| k.clone()).collect::>(); + let mut sort_event_ids: Vec<_> = order_map.keys().map(|&k| k.clone()).collect(); + sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); Ok(sort_event_ids) @@ -733,27 +718,28 @@ where /// that has an associated mainline depth. async fn get_mainline_depth( mut event: Option, - mainline_map: &HashMap, + mainline_map: &HashMap, fetch_event: &F, ) -> Result where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send + Sync, - E::Id: Borrow + Send + Sync, { while let Some(sort_ev) = event { - debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + let id = sort_ev.event_id(); - if let Some(depth) = mainline_map.get(id.borrow()) { + if let Some(depth) = mainline_map.get(id) { return Ok(*depth); } event = None; for aid in sort_ev.auth_events() { - let aev = fetch_event(aid.clone()) + let aev = fetch_event(aid.to_owned()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { event = Some(aev); break; @@ -765,15 +751,14 @@ where } async fn add_event_and_auth_chain_to_graph( - graph: &mut HashMap>, - event_id: E::Id, - auth_diff: &HashSet, + graph: &mut HashMap>, + event_id: OwnedEventId, + auth_diff: &HashSet, fetch_event: &F, ) where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send + Sync, - E::Id: Borrow + Clone + Send + Sync, { let mut state = vec![event_id]; while let Some(eid) = state.pop() { @@ -783,26 +768,27 @@ async fn add_event_and_auth_chain_to_graph( // Prefer the store to event as the store filters dedups the events for aid in auth_events { - if auth_diff.contains(aid.borrow()) { - if !graph.contains_key(aid.borrow()) { + if auth_diff.contains(aid) { + if !graph.contains_key(aid) { state.push(aid.to_owned()); } - // We just inserted this at the start of the while loop - graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned()); + graph + .get_mut(&eid) + .expect("We just inserted this at the start of the while loop") + .insert(aid.to_owned()); } } } } -async fn is_power_event_id(event_id: &E::Id, fetch: &F) -> bool +async fn is_power_event_id(event_id: &EventId, fetch: &F) -> bool where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send, - E::Id: Borrow + Send + Sync, { - match fetch(event_id.clone()).await.as_ref() { + match fetch(event_id.to_owned()).await.as_ref() { | Some(state) => is_power_event(state), | _ => false, } @@ -858,10 +844,7 @@ where #[cfg(test)] mod tests { - use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - }; + use std::collections::{HashMap, HashSet}; use maplit::{hashmap, hashset}; use rand::seq::SliceRandom; @@ -884,7 +867,7 @@ mod tests { zara, }, }; - use crate::debug; + use crate::{debug, utils::stream::IterStream}; async fn test_event_sort() { use futures::future::ready; @@ -903,22 +886,21 @@ mod tests { let power_events = event_map .values() - .filter(|&pdu| is_power_event(&**pdu)) + .filter(|&pdu| is_power_event(&*pdu)) .map(|pdu| pdu.event_id.clone()) .collect::>(); let fetcher = |id| ready(events.get(&id).cloned()); let sorted_power_events = - super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher) .await .unwrap(); let resolved_power = super::iterative_auth_check( &RoomVersion::V6, - sorted_power_events.iter(), + sorted_power_events.iter().map(AsRef::as_ref).stream(), HashMap::new(), // unconflicted events &fetcher, - 1, ) .await .expect("iterative auth check failed on resolved events"); @@ -932,7 +914,7 @@ mod tests { .get(&(StateEventType::RoomPowerLevels, "".into())) .cloned(); - let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher) .await .unwrap(); @@ -1301,7 +1283,7 @@ mod tests { let ev_map = store.0.clone(); let fetcher = |id| ready(ev_map.get(&id).cloned()); - let exists = |id: ::Id| ready(ev_map.get(&*id).is_some()); + let exists = |id: OwnedEventId| ready(ev_map.get(&*id).is_some()); let state_sets = [state_at_bob, state_at_charlie]; let auth_chain: Vec<_> = state_sets @@ -1313,19 +1295,13 @@ mod tests { }) .collect(); - let resolved = match super::resolve( - &RoomVersionId::V2, - &state_sets, - &auth_chain, - &fetcher, - &exists, - 1, - ) - .await - { - | Ok(state) => state, - | Err(e) => panic!("{e}"), - }; + let resolved = + match super::resolve(&RoomVersionId::V2, &state_sets, &auth_chain, &fetcher, &exists) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; assert_eq!(expected, resolved); } @@ -1430,21 +1406,15 @@ mod tests { }) .collect(); - let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); - let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); - let resolved = match super::resolve( - &RoomVersionId::V6, - &state_sets, - &auth_chain, - &fetcher, - &exists, - 1, - ) - .await - { - | Ok(state) => state, - | Err(e) => panic!("{e}"), - }; + let fetcher = |id: OwnedEventId| ready(ev_map.get(&id).cloned()); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let resolved = + match super::resolve(&RoomVersionId::V6, &state_sets, &auth_chain, &fetcher, &exists) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; debug!( resolved = ?resolved @@ -1487,7 +1457,7 @@ mod tests { } #[allow(non_snake_case)] - fn BAN_STATE_SET() -> HashMap> { + fn BAN_STATE_SET() -> HashMap { vec![ to_pdu_event( "PA", @@ -1532,7 +1502,7 @@ mod tests { } #[allow(non_snake_case)] - fn JOIN_RULE() -> HashMap> { + fn JOIN_RULE() -> HashMap { vec![ to_pdu_event( "JR", diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs index f2ee4238..c6945f66 100644 --- a/src/core/matrix/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -1,10 +1,7 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, - sync::{ - Arc, - atomic::{AtomicU64, Ordering::SeqCst}, - }, + sync::atomic::{AtomicU64, Ordering::SeqCst}, }; use futures::future::ready; @@ -36,7 +33,7 @@ use crate::{ static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); pub(crate) async fn do_check( - events: &[Arc], + events: &[PduEvent], edges: Vec>, expected_state_ids: Vec, ) { @@ -85,7 +82,7 @@ pub(crate) async fn do_check( } // event_id -> PduEvent - let mut event_map: HashMap> = HashMap::new(); + let mut event_map: HashMap = HashMap::new(); // event_id -> StateMap let mut state_at_event: HashMap> = HashMap::new(); @@ -136,17 +133,11 @@ pub(crate) async fn do_check( .collect(); let event_map = &event_map; - let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); - let exists = |id: ::Id| ready(event_map.get(&id).is_some()); - let resolved = super::resolve( - &RoomVersionId::V6, - state_sets, - &auth_chain_sets, - &fetch, - &exists, - 1, - ) - .await; + let fetch = |id: OwnedEventId| ready(event_map.get(&id).cloned()); + let exists = |id: OwnedEventId| ready(event_map.get(&id).is_some()); + let resolved = + super::resolve(&RoomVersionId::V6, state_sets, &auth_chain_sets, &fetch, &exists) + .await; match resolved { | Ok(state) => state, @@ -194,7 +185,7 @@ pub(crate) async fn do_check( store.0.insert(ev_id.to_owned(), event.clone()); state_at_event.insert(node, state_after); - event_map.insert(event_id.to_owned(), Arc::clone(store.0.get(ev_id).unwrap())); + event_map.insert(event_id.to_owned(), store.0.get(ev_id).unwrap().clone()); } let mut expected_state = StateMap::new(); @@ -235,10 +226,10 @@ pub(crate) async fn do_check( } #[allow(clippy::exhaustive_structs)] -pub(crate) struct TestStore(pub(crate) HashMap>); +pub(crate) struct TestStore(pub(crate) HashMap); -impl TestStore { - pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result> { +impl TestStore { + pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result { self.0 .get(event_id) .cloned() @@ -250,8 +241,8 @@ impl TestStore { pub(crate) fn auth_event_ids( &self, room_id: &RoomId, - event_ids: Vec, - ) -> Result> { + event_ids: Vec, + ) -> Result> { let mut result = HashSet::new(); let mut stack = event_ids; @@ -288,7 +279,7 @@ impl TestStore { &[], ); let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); + self.0.insert(cre.clone(), create_event.clone()); let alice_mem = to_pdu_event( "IMA", @@ -300,7 +291,7 @@ impl TestStore { &[cre.clone()], ); self.0 - .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); let join_rules = to_pdu_event( "IJR", @@ -399,7 +390,7 @@ pub(crate) fn to_init_pdu_event( ev_type: TimelineEventType, state_key: Option<&str>, content: Box, -) -> Arc { +) -> PduEvent { let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); let id = if id.contains('$') { id.to_owned() @@ -408,7 +399,7 @@ pub(crate) fn to_init_pdu_event( }; let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -425,7 +416,7 @@ pub(crate) fn to_init_pdu_event( hashes: EventHash::new("".to_owned()), signatures: ServerSignatures::default(), }), - }) + } } pub(crate) fn to_pdu_event( @@ -436,7 +427,7 @@ pub(crate) fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> Arc +) -> PduEvent where S: AsRef, { @@ -458,7 +449,7 @@ where .collect::>(); let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -475,12 +466,12 @@ where hashes: EventHash::new("".to_owned()), signatures: ServerSignatures::default(), }), - }) + } } // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS() -> HashMap> { +pub(crate) fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -562,7 +553,7 @@ pub(crate) fn INITIAL_EVENTS() -> HashMap> { // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap> { +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap { vec![to_pdu_event::<&EventId>( "CREATE", alice(), @@ -587,7 +578,7 @@ pub(crate) fn INITIAL_EDGES() -> Vec { pub(crate) mod event { use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::{TimelineEventType, pdu::Pdu}, }; use serde::{Deserialize, Serialize}; @@ -596,9 +587,7 @@ pub(crate) mod event { use crate::Event; impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } + fn event_id(&self) -> &EventId { &self.event_id } fn room_id(&self) -> &RoomId { match &self.rest { @@ -655,29 +644,31 @@ pub(crate) mod event { } #[allow(refining_impl_trait)] - fn prev_events(&self) -> Box + Send + '_> { + fn prev_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)), #[allow(unreachable_patterns)] | _ => unreachable!("new PDU version"), } } #[allow(refining_impl_trait)] - fn auth_events(&self) -> Box + Send + '_> { + fn auth_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)), #[allow(unreachable_patterns)] | _ => unreachable!("new PDU version"), } } - fn redacts(&self) -> Option<&Self::Id> { + fn redacts(&self) -> Option<&EventId> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(), #[allow(unreachable_patterns)] | _ => unreachable!("new PDU version"), } diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs index c93c7dbc..24f239ff 100644 --- a/src/core/utils/future/bool_ext.rs +++ b/src/core/utils/future/bool_ext.rs @@ -22,30 +22,6 @@ where Self: Sized + Unpin; } -pub async fn and(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - try_join_all(args).map(|result| result.is_ok()) -} - -pub async fn or(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send + Unpin, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - select_ok(args).map(|result| result.is_ok()) -} - impl BoolExt for Fut where Fut: Future + Send, @@ -80,3 +56,27 @@ where try_select(a, b).map(|result| result.is_ok()) } } + +pub async fn and(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + try_join_all(args).map(|result| result.is_ok()) +} + +pub async fn or(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send + Unpin, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + select_ok(args).map(|result| result.is_ok()) +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 4edd0102..d896e66d 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -2,10 +2,12 @@ mod bool_ext; mod ext_ext; mod option_ext; mod option_stream; +mod ready_eq_ext; mod try_ext_ext; pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use option_stream::OptionStream; +pub use ready_eq_ext::ReadyEqExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/ready_eq_ext.rs b/src/core/utils/future/ready_eq_ext.rs new file mode 100644 index 00000000..1625adae --- /dev/null +++ b/src/core/utils/future/ready_eq_ext.rs @@ -0,0 +1,25 @@ +//! Future extension for Partial Equality against present value + +use futures::{Future, FutureExt}; + +pub trait ReadyEqExt +where + Self: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + fn eq(self, t: &T) -> impl Future + Send; + + fn ne(self, t: &T) -> impl Future + Send; +} + +impl ReadyEqExt for Fut +where + Fut: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + #[inline] + fn eq(self, t: &T) -> impl Future + Send { self.map(move |r| r.eq(t)) } + + #[inline] + fn ne(self, t: &T) -> impl Future + Send { self.map(move |r| r.ne(t)) } +} diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index 488f2a13..9316731c 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -10,6 +10,7 @@ use crate::{Err, Error, Result, debug::type_name, err}; /// Checked arithmetic expression. Returns a Result #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! checked { ($($input:tt)+) => { $crate::utils::math::checked_ops!($($input)+) @@ -22,6 +23,7 @@ macro_rules! checked { /// has no realistic expectation for error and no interest in cluttering the /// callsite with result handling from checked!. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! expected { ($msg:literal, $($input:tt)+) => { $crate::checked!($($input)+).expect($msg) @@ -37,6 +39,7 @@ macro_rules! expected { /// regression analysis. #[cfg(not(debug_assertions))] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { //#[allow(clippy::arithmetic_side_effects)] { @@ -53,6 +56,7 @@ macro_rules! validated { /// the expression is obviously safe. The check is elided in release-mode. #[cfg(debug_assertions)] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { $crate::expected!($($input)+) } } diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 117fb739..54404e4c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -28,7 +28,7 @@ pub use self::{ bool::BoolExt, bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, debug::slice_truncated as debug_slice_truncated, - future::TryExtExt as TryFutureExtExt, + future::{BoolExt as FutureBoolExt, OptionStream, TryExtExt as TryFutureExtExt}, hash::sha256::delimited as calculate_hash, html::Escape as HtmlEscape, json::{deserialize_from_str, to_canonical_object}, @@ -173,7 +173,6 @@ macro_rules! is_equal { /// Functor for |x| *x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! deref_at { ($idx:tt) => { |t| *t.$idx @@ -182,7 +181,6 @@ macro_rules! deref_at { /// Functor for |ref x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! ref_at { ($idx:tt) => { |ref t| &t.$idx @@ -191,7 +189,6 @@ macro_rules! ref_at { /// Functor for |&x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! val_at { ($idx:tt) => { |&t| t.$idx @@ -200,7 +197,6 @@ macro_rules! val_at { /// Functor for |x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! at { ($idx:tt) => { |t| t.$idx diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs index 3509bb83..ec572714 100644 --- a/src/core/utils/stream/expect.rs +++ b/src/core/utils/stream/expect.rs @@ -10,7 +10,7 @@ pub trait TryExpect<'a, Item> { impl<'a, T, Item> TryExpect<'a, Item> for T where - T: Stream> + TryStream + Send + 'a, + T: Stream> + Send + TryStream + 'a, Item: 'a, { #[inline] diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index dce7d378..be4d1b25 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{Ready, ready}, + future::{FutureExt, Ready, ready}, stream::{ All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, }, @@ -16,7 +16,7 @@ use futures::{ /// This interface is not necessarily complete; feel free to add as-needed. pub trait ReadyExt where - Self: Stream + Send + Sized, + Self: Stream + Sized, { fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> where @@ -26,6 +26,12 @@ where where F: Fn(Item) -> bool; + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send; + fn ready_filter<'a, F>( self, f: F, @@ -93,7 +99,7 @@ where impl ReadyExt for S where - S: Stream + Send + Sized, + S: Stream + Sized, { #[inline] fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> @@ -111,6 +117,19 @@ where self.any(move |t| ready(f(t))) } + #[inline] + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send, + { + self.ready_filter(f) + .take(1) + .into_future() + .map(|(curr, _next)| curr) + } + #[inline] fn ready_filter<'a, F>( self, diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 611c177f..287fa1e1 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -13,8 +13,8 @@ use crate::Result; /// This interface is not necessarily complete; feel free to add as-needed. pub trait TryReadyExt where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn ready_and_then( self, @@ -67,8 +67,8 @@ where impl TryReadyExt for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn ready_and_then( diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs index ea3b50fc..417806fc 100644 --- a/src/core/utils/stream/try_tools.rs +++ b/src/core/utils/stream/try_tools.rs @@ -8,8 +8,8 @@ use crate::Result; /// TryStreamTools pub trait TryTools where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn try_take( self, @@ -23,8 +23,8 @@ where impl TryTools for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn try_take( diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index d8fa3f95..7d81903d 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -14,6 +14,7 @@ pub const EMPTY: &str = ""; /// returned otherwise the input (i.e. &'static str) is returned. If multiple /// arguments are provided the first is assumed to be a format string. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! format_maybe { ($s:literal $(,)?) => { if $crate::is_format!($s) { std::format!($s).into() } else { $s.into() } @@ -27,6 +28,7 @@ macro_rules! format_maybe { /// Constant expression to decide if a literal is a format string. Note: could /// use some improvement. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! is_format { ($s:literal) => { ::const_str::contains!($s, "{") && ::const_str::contains!($s, "}") diff --git a/src/core/utils/string/between.rs b/src/core/utils/string/between.rs index 05c137b4..8d3b6979 100644 --- a/src/core/utils/string/between.rs +++ b/src/core/utils/string/between.rs @@ -1,12 +1,12 @@ type Delim<'a> = (&'a str, &'a str); -/// Slice a string between a pair of delimeters. +/// Slice a string between a pair of delimiters. pub trait Between<'a> { - /// Extract a string between the delimeters. If the delimeters were not + /// Extract a string between the delimiters. If the delimiters were not /// found None is returned, otherwise the first extraction is returned. fn between(&self, delim: Delim<'_>) -> Option<&'a str>; - /// Extract a string between the delimeters. If the delimeters were not + /// Extract a string between the delimiters. If the delimiters were not /// found the original string is returned; take note of this behavior, /// if an empty slice is desired for this case use the fallible version and /// unwrap to EMPTY. diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 452b04b2..b71c3437 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -117,7 +117,7 @@ pub fn name_from_path(path: &Path) -> Result { /// Get the (major, minor) of the block device on which Path is mounted. #[allow(clippy::useless_conversion, clippy::unnecessary_fallible_conversions)] -pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { +fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { #[cfg(target_family = "unix")] use std::os::unix::fs::MetadataExt; diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 067c6f5f..55d4793f 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -17,19 +17,31 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", -] -jemalloc = [ - "rust-rocksdb/jemalloc", -] io_uring = [ "rust-rocksdb/io-uring", ] +jemalloc = [ + "conduwuit-core/jemalloc", + "rust-rocksdb/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] zstd_compression = [ + "conduwuit-core/zstd_compression", "rust-rocksdb/zstd", ] diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index bb110630..ac72e6d4 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -1,24 +1,16 @@ -use std::fmt::Write; +use std::{ffi::OsString, path::PathBuf}; -use conduwuit::{Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; +use conduwuit::{Err, Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use super::Engine; -use crate::{or_else, util::map_err}; +use crate::util::map_err; #[implement(Engine)] #[tracing::instrument(skip(self))] pub fn backup(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok(()); - } - - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?; - let mut engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)?; + let mut engine = self.backup_engine()?; + let config = &self.ctx.server.config; if config.database_backups_to_keep > 0 { let flush = !self.is_read_only(); engine @@ -40,34 +32,62 @@ pub fn backup(&self) -> Result { } } + if config.database_backups_to_keep == 0 { + warn!("Configuration item `database_backups_to_keep` is set to 0."); + } + Ok(()) } #[implement(Engine)] -pub fn backup_list(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok("Configure database_backup_path to enable backups, or the path specified is \ - not valid" - .to_owned()); +pub fn backup_list(&self) -> Result + Send> { + let info = self.backup_engine()?.get_backup_info(); + + if info.is_empty() { + return Err!("No backups found."); } - let mut res = String::new(); - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?; - let engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).or_else(or_else)?; - for info in engine.get_backup_info() { - writeln!( - res, + let list = info.into_iter().map(|info| { + format!( "#{} {}: {} bytes, {} files", info.backup_id, rfc2822_from_seconds(info.timestamp), info.size, info.num_files, - )?; + ) + }); + + Ok(list) +} + +#[implement(Engine)] +pub fn backup_count(&self) -> Result { + let info = self.backup_engine()?.get_backup_info(); + + Ok(info.len()) +} + +#[implement(Engine)] +fn backup_engine(&self) -> Result { + let path = self.backup_path()?; + let options = BackupEngineOptions::new(path).map_err(map_err)?; + BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err) +} + +#[implement(Engine)] +fn backup_path(&self) -> Result { + let path = self + .ctx + .server + .config + .database_backup_path + .clone() + .map(PathBuf::into_os_string) + .unwrap_or_default(); + + if path.is_empty() { + return Err!(Config("database_backup_path", "Configure path to enable backups")); } - Ok(res) + Ok(path) } diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 7ceec722..cbbd1012 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -193,7 +193,7 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { return None; } - // Some cache capacities are overriden by server config in a strange but + // Some cache capacities are overridden by server config in a strange but // legacy-compat way let config = &ctx.server.config; let cap = match desc.name { diff --git a/src/macros/admin.rs b/src/macros/admin.rs index bf1586a0..fe227b43 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -8,7 +8,7 @@ use crate::{Result, utils::camel_to_snake_string}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { - #[conduwuit_macros::implement(crate::Command, params = "<'_>")] + #[conduwuit_macros::implement(crate::Context, params = "<'_>")] }; item.attrs.push(attr); @@ -19,15 +19,16 @@ pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result = item.variants.iter().map(dispatch_arm).try_collect()?; let switch = quote! { + #[allow(clippy::large_stack_frames)] //TODO: fixme pub(super) async fn process( command: #name, - context: &crate::Command<'_> + context: &crate::Context<'_> ) -> Result { use #name::*; #[allow(non_snake_case)] - Ok(match command { + match command { #( #arm )* - }) + } } }; @@ -47,8 +48,7 @@ fn dispatch_arm(v: &Variant) -> Result { let arg = field.clone(); quote! { #name { #( #field ),* } => { - let c = Box::pin(context.#handler(#( #arg ),*)).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler(#( #arg ),*)).await }, } }, @@ -58,15 +58,14 @@ fn dispatch_arm(v: &Variant) -> Result { }; quote! { #name ( #field ) => { - Box::pin(#handler::process(#field, context)).await?; + Box::pin(#handler::process(#field, context)).await } } }, | Fields::Unit => { quote! { #name => { - let c = Box::pin(context.#handler()).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler()).await }, } }, diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 87ca48c8..0c5e2b6f 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -36,6 +36,7 @@ assets = [ [features] default = [ + "blurhashing", "brotli_compression", "element_hacks", "gzip_compression", @@ -70,6 +71,7 @@ element_hacks = [ ] gzip_compression = [ "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", "conduwuit-router/gzip_compression", "conduwuit-service/gzip_compression", ] @@ -141,6 +143,7 @@ zstd_compression = [ "conduwuit-core/zstd_compression", "conduwuit-database/zstd_compression", "conduwuit-router/zstd_compression", + "conduwuit-service/zstd_compression", ] conduwuit_mods = [ "conduwuit-core/conduwuit_mods", diff --git a/src/main/clap.rs b/src/main/clap.rs index 707a1c76..9b63af19 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -74,17 +74,30 @@ pub(crate) struct Args { /// with the exception of the last bucket, try increasing this value to e.g. /// 50 or 100. Inversely, decrease to 10 etc if the histogram lacks /// resolution. - #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL", default_value = "25")] + #[arg( + long, + hide(true), + env = "CONTINUWUITY_RUNTIME_HISTOGRAM_INTERVAL", + env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL", + default_value = "25" + )] pub(crate) worker_histogram_interval: u64, /// Set the histogram bucket count (tokio_unstable). Default is 20. - #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", default_value = "20")] + #[arg( + long, + hide(true), + env = "CONTINUWUITY_RUNTIME_HISTOGRAM_BUCKETS", + env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", + default_value = "20" + )] pub(crate) worker_histogram_buckets: usize, /// Toggles worker affinity feature. #[arg( long, hide(true), + env = "CONTINUWUITY_RUNTIME_WORKER_AFFINITY", env = "CONDUWUIT_RUNTIME_WORKER_AFFINITY", action = ArgAction::Set, num_args = 0..=1, @@ -99,6 +112,7 @@ pub(crate) struct Args { #[arg( long, hide(true), + env = "CONTINUWUITY_RUNTIME_GC_ON_PARK", env = "CONDUWUIT_RUNTIME_GC_ON_PARK", action = ArgAction::Set, num_args = 0..=1, diff --git a/src/main/main.rs b/src/main/main.rs index 1a9d3fe4..3416bc68 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -73,7 +73,7 @@ async fn async_main(server: &Arc) -> Result<(), Error> { .lock() .await .take() - .expect("services initialied"), + .expect("services initialized"), ) .await { diff --git a/src/main/restart.rs b/src/main/restart.rs index b9d1dc94..631c1e21 100644 --- a/src/main/restart.rs +++ b/src/main/restart.rs @@ -13,8 +13,8 @@ pub(super) fn restart() -> ! { // // We can (and do) prevent that panic by checking the result of current_exe() // prior to committing to restart, returning an error to the user without any - // unexpected shutdown. In a nutshell that is the execuse for this unsafety. - // Nevertheless, we still want a way to override the restart preventation (i.e. + // unexpected shutdown. In a nutshell that is the excuse for this unsafety. + // Nevertheless, we still want a way to override the restart presentation (i.e. // admin server restart --force). let exe = unsafe { utils::sys::current_exe().expect("program path must be available") }; let envs = env::vars(); diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 1c58ea81..e9029012 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -98,12 +98,7 @@ pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { Level::INFO }; - debug!( - timeout = ?SHUTDOWN_TIMEOUT, - "Waiting for runtime..." - ); - - runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + wait_shutdown(server, runtime); let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default(); event!(LEVEL, ?runtime_metrics, "Final runtime metrics"); @@ -111,13 +106,23 @@ pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { #[cfg(not(tokio_unstable))] #[tracing::instrument(name = "stop", level = "info", skip_all)] -pub(super) fn shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { +pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { + wait_shutdown(server, runtime); +} + +fn wait_shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { debug!( timeout = ?SHUTDOWN_TIMEOUT, "Waiting for runtime..." ); runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + + // Join any jemalloc threads so they don't appear in use at exit. + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] + conduwuit_core::alloc::je::background_thread_enable(false) + .log_debug_err() + .ok(); } #[tracing::instrument( diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 51e15aed..9fcb8d6a 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -17,34 +17,79 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-admin/brotli_compression", + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "tower-http/compression-br", +] +direct_tls = [ + "axum-server/tls-rustls", + "dep:rustls", + "dep:axum-server-dual-protocol", +] +gzip_compression = [ + "conduwuit-admin/gzip_compression", + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", + "tower-http/compression-gzip", +] +io_uring = [ + "conduwuit-admin/io_uring", + "conduwuit-api/io_uring", + "conduwuit-service/io_uring", + "conduwuit-api/io_uring", +] +jemalloc = [ + "conduwuit-admin/jemalloc", + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-admin/jemalloc_conf", + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-admin/jemalloc_prof", + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-admin/jemalloc_stats", + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-admin/release_max_log_level", + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] sentry_telemetry = [ + "conduwuit-core/sentry_telemetry", "dep:sentry", "dep:sentry-tracing", "dep:sentry-tower", ] -zstd_compression = [ - "tower-http/compression-zstd", -] -gzip_compression = [ - "tower-http/compression-gzip", -] -brotli_compression = [ - "tower-http/compression-br", -] systemd = [ "dep:sd-notify", ] - -direct_tls = [ - "axum-server/tls-rustls", - "dep:rustls", - "dep:axum-server-dual-protocol", +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "tower-http/compression-zstd", ] [dependencies] @@ -58,6 +103,7 @@ conduwuit-admin.workspace = true conduwuit-api.workspace = true conduwuit-core.workspace = true conduwuit-service.workspace = true +conduwuit-web.workspace = true const-str.workspace = true futures.workspace = true http.workspace = true @@ -69,11 +115,11 @@ ruma.workspace = true rustls.workspace = true rustls.optional = true sentry.optional = true +sentry.workspace = true sentry-tower.optional = true sentry-tower.workspace = true sentry-tracing.optional = true sentry-tracing.workspace = true -sentry.workspace = true serde_json.workspace = true tokio.workspace = true tower.workspace = true diff --git a/src/router/layers.rs b/src/router/layers.rs index 6920555d..70f3a660 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -6,8 +6,7 @@ use axum::{ }; use axum_client_ip::SecureClientIpSource; use conduwuit::{Result, Server, debug, error}; -use conduwuit_api::router::state::Guard; -use conduwuit_service::Services; +use conduwuit_service::{Services, state::Guard}; use http::{ HeaderValue, Method, StatusCode, header::{self, HeaderName}, diff --git a/src/router/router.rs b/src/router/router.rs index 0f95b924..fdaf9126 100644 --- a/src/router/router.rs +++ b/src/router/router.rs @@ -1,9 +1,8 @@ use std::sync::Arc; -use axum::{Router, response::IntoResponse, routing::get}; +use axum::{Router, response::IntoResponse}; use conduwuit::Error; -use conduwuit_api::router::{state, state::Guard}; -use conduwuit_service::Services; +use conduwuit_service::{Services, state, state::Guard}; use http::{StatusCode, Uri}; use ruma::api::client::error::ErrorKind; @@ -11,7 +10,7 @@ pub(crate) fn build(services: &Arc) -> (Router, Guard) { let router = Router::::new(); let (state, guard) = state::create(services.clone()); let router = conduwuit_api::router::build(router, &services.server) - .route("/", get(it_works)) + .merge(conduwuit_web::build()) .fallback(not_found) .with_state(state); @@ -21,5 +20,3 @@ pub(crate) fn build(services: &Arc) -> (Router, Guard) { async fn not_found(_uri: Uri) -> impl IntoResponse { Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND) } - -async fn it_works() -> &'static str { "hewwo from conduwuit woof!" } diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index dd46ab53..20b58601 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -31,12 +31,14 @@ pub(super) async fn serve( .install_default() .expect("failed to initialise aws-lc-rs rustls crypto provider"); - debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); info!( "Note: It is strongly recommended that you use a reverse proxy instead of running \ conduwuit directly with TLS." ); - let conf = RustlsConfig::from_pem_file(certs, key).await?; + debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); + let conf = RustlsConfig::from_pem_file(certs, key) + .await + .map_err(|e| err!(Config("tls", "Failed to load certificates or key: {e}")))?; let mut join_set = JoinSet::new(); let app = app.into_make_service_with_connect_info::(); diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index caeea318..8b0d1405 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -17,7 +17,12 @@ crate-type = [ ] [features] +blurhashing = [ + "dep:image", + "dep:blurhash", +] brotli_compression = [ + "conduwuit-core/brotli_compression", "reqwest/brotli", ] console = [ @@ -26,25 +31,48 @@ console = [ ] element_hacks = [] gzip_compression = [ + "conduwuit-core/gzip_compression", "reqwest/gzip", ] +io_uring = [ + "conduwuit-database/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", +] media_thumbnail = [ "dep:image", ] release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", "log/max_level_trace", "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", ] url_preview = [ "dep:image", "dep:webpage", ] zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", "reqwest/zstd", ] -blurhashing = ["dep:image","dep:blurhash"] [dependencies] async-trait.workspace = true diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 4de37092..157b4d65 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use conduwuit::{Result, pdu::PduBuilder}; +use futures::FutureExt; use ruma::{ RoomId, RoomVersionId, events::room::{ @@ -63,6 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 2. Make server user/bot join @@ -78,6 +80,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 3. Power levels @@ -95,6 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.1 Join Rules @@ -107,6 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.2 History Visibility @@ -122,6 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.3 Guest Access @@ -137,6 +143,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 5. Events implied by name and topic @@ -150,6 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; services @@ -157,12 +165,13 @@ pub async fn create_admin_room(services: &Services) -> Result { .timeline .build_and_append_pdu( PduBuilder::state(String::new(), &RoomTopicEventContent { - topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name), + topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name), }), server_user, &room_id, &state_lock, ) + .boxed() .await?; // 6. Room alias @@ -180,6 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; services @@ -197,6 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; Ok(()) diff --git a/src/service/admin/execute.rs b/src/service/admin/execute.rs index 174b28ed..e0d724bd 100644 --- a/src/service/admin/execute.rs +++ b/src/service/admin/execute.rs @@ -25,7 +25,7 @@ pub(super) async fn console_auto_stop(&self) { /// Execute admin commands after startup #[implement(super::Service)] pub(super) async fn startup_execute(&self) -> Result { - // List of comamnds to execute + // List of commands to execute let commands = &self.services.server.config.admin_execute; // Determine if we're running in smoketest-mode which will change some behaviors @@ -64,7 +64,7 @@ pub(super) async fn startup_execute(&self) -> Result { /// Execute admin commands after signal #[implement(super::Service)] pub(super) async fn signal_execute(&self) -> Result { - // List of comamnds to execute + // List of commands to execute let commands = self.services.server.config.admin_signal_execute.clone(); // When true, errors are ignored and execution continues. diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 6780b7ae..2d90ea52 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -126,7 +126,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result { if self.services.server.config.admin_room_notices { let welcome_message = String::from( - "## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`", + "## Thank you for trying out Continuwuity!\n\nContinuwuity is a hard fork of conduwuit, which is also a hard fork of Conduit, currently in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. Continuwuity is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> Source code: https://forgejo.ellis.link/continuwuation/continuwuity\n> Documentation: https://continuwuity.org/\n> Report issues: https://forgejo.ellis.link/continuwuation/continuwuity/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nContinuwuity space: `/join #space:continuwuity.org`\nContinuwuity main room (Ask questions and get notified on updates): `/join #continuwuity:continuwuity.org`\nContinuwuity offtopic room: `/join #offtopic:continuwuity.org`", ); // Send welcome message diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b3466711..683f5400 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -166,7 +166,7 @@ impl Service { .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) } - /// Dispatches a comamnd to the processor on the current task and waits for + /// Dispatches a command to the processor on the current task and waits for /// completion. pub async fn command_in_place( &self, diff --git a/src/service/announcements/mod.rs b/src/service/announcements/mod.rs new file mode 100644 index 00000000..4df8971b --- /dev/null +++ b/src/service/announcements/mod.rs @@ -0,0 +1,169 @@ +//! # Announcements service +//! +//! This service is responsible for checking for announcements and sending them +//! to the client. +//! +//! It is used to send announcements to the admin room and logs. +//! Annuncements are stored in /docs/static/announcements right now. +//! The highest seen announcement id is stored in the database. When the +//! announcement check is run, all announcements with an ID higher than those +//! seen before are printed to the console and sent to the admin room. +//! +//! Old announcements should be deleted to avoid spamming the room on first +//! install. +//! +//! Announcements are displayed as markdown in the admin room, but plain text in +//! the console. + +use std::{sync::Arc, time::Duration}; + +use async_trait::async_trait; +use conduwuit::{Result, Server, debug, info, warn}; +use database::{Deserialized, Map}; +use ruma::events::room::message::RoomMessageEventContent; +use serde::Deserialize; +use tokio::{ + sync::Notify, + time::{MissedTickBehavior, interval}, +}; + +use crate::{Dep, admin, client, globals}; + +pub struct Service { + interval: Duration, + interrupt: Notify, + db: Arc, + services: Services, +} + +struct Services { + admin: Dep, + client: Dep, + globals: Dep, + server: Arc, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponse { + announcements: Vec, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponseEntry { + id: u64, + date: Option, + message: String, +} + +const CHECK_FOR_ANNOUNCEMENTS_URL: &str = + "https://continuwuity.org/.well-known/continuwuity/announcements"; +const CHECK_FOR_ANNOUNCEMENTS_INTERVAL: u64 = 7200; // 2 hours +const LAST_CHECK_FOR_ANNOUNCEMENTS_ID: &[u8; 25] = b"last_seen_announcement_id"; +// In conduwuit, this was under b"a" + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + interval: Duration::from_secs(CHECK_FOR_ANNOUNCEMENTS_INTERVAL), + interrupt: Notify::new(), + db: args.db["global"].clone(), + services: Services { + globals: args.depend::("globals"), + admin: args.depend::("admin"), + client: args.depend::("client"), + server: args.server.clone(), + }, + })) + } + + #[tracing::instrument(skip_all, name = "announcements", level = "debug")] + async fn worker(self: Arc) -> Result<()> { + if !self.services.globals.allow_announcements_check() { + debug!("Disabling announcements check"); + return Ok(()); + } + + let mut i = interval(self.interval); + i.set_missed_tick_behavior(MissedTickBehavior::Delay); + i.reset_after(self.interval); + loop { + tokio::select! { + () = self.interrupt.notified() => break, + _ = i.tick() => (), + } + + if let Err(e) = self.check().await { + warn!(%e, "Failed to check for announcements"); + } + } + + Ok(()) + } + + fn interrupt(&self) { self.interrupt.notify_waiters(); } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +impl Service { + #[tracing::instrument(skip_all)] + async fn check(&self) -> Result<()> { + debug_assert!(self.services.server.running(), "server must not be shutting down"); + + let response = self + .services + .client + .default + .get(CHECK_FOR_ANNOUNCEMENTS_URL) + .send() + .await? + .text() + .await?; + + let response = serde_json::from_str::(&response)?; + for announcement in &response.announcements { + if announcement.id > self.last_check_for_announcements_id().await { + self.handle(announcement).await; + self.update_check_for_announcements_id(announcement.id); + } + } + + Ok(()) + } + + #[tracing::instrument(skip_all)] + async fn handle(&self, announcement: &CheckForAnnouncementsResponseEntry) { + if let Some(date) = &announcement.date { + info!("[announcements] {date} {:#}", announcement.message); + } else { + info!("[announcements] {:#}", announcement.message); + } + + self.services + .admin + .send_message(RoomMessageEventContent::text_markdown(format!( + "### New announcement{}\n\n{}", + announcement + .date + .as_ref() + .map_or_else(String::new, |date| format!(" - `{date}`")), + announcement.message + ))) + .await + .ok(); + } + + #[inline] + pub fn update_check_for_announcements_id(&self, id: u64) { + self.db.raw_put(LAST_CHECK_FOR_ANNOUNCEMENTS_ID, id); + } + + pub async fn last_check_for_announcements_id(&self) -> u64 { + self.db + .get(LAST_CHECK_FOR_ANNOUNCEMENTS_ID) + .await + .deserialized() + .unwrap_or(0_u64) + } +} diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 50a60033..7be8a471 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,20 +1,20 @@ mod namespace_regex; mod registration_info; -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc}; use async_trait::async_trait; -use conduwuit::{Result, err, utils::stream::TryIgnore}; +use conduwuit::{Result, err, utils::stream::IterStream}; use database::Map; -use futures::{Future, StreamExt, TryStreamExt}; +use futures::{Future, FutureExt, Stream, TryStreamExt}; use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; -use tokio::sync::RwLock; +use tokio::sync::{RwLock, RwLockReadGuard}; pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; use crate::{Dep, sending}; pub struct Service { - registration_info: RwLock>, + registration_info: RwLock, services: Services, db: Data, } @@ -27,6 +27,8 @@ struct Data { id_appserviceregistrations: Arc, } +type Registrations = BTreeMap; + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -41,19 +43,18 @@ impl crate::Service for Service { })) } - async fn worker(self: Arc) -> Result<()> { + async fn worker(self: Arc) -> Result { // Inserting registrations into cache - for appservice in self.iter_db_ids().await? { - self.registration_info.write().await.insert( - appservice.0, - appservice - .1 - .try_into() - .expect("Should be validated on registration"), - ); - } + self.iter_db_ids() + .try_for_each(async |appservice| { + self.registration_info + .write() + .await + .insert(appservice.0, appservice.1.try_into()?); - Ok(()) + Ok(()) + }) + .await } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } @@ -84,7 +85,7 @@ impl Service { /// # Arguments /// /// * `service_name` - the registration ID of the appservice - pub async fn unregister_appservice(&self, appservice_id: &str) -> Result<()> { + pub async fn unregister_appservice(&self, appservice_id: &str) -> Result { // removes the appservice registration info self.registration_info .write() @@ -112,15 +113,6 @@ impl Service { .map(|info| info.registration) } - pub async fn iter_ids(&self) -> Vec { - self.registration_info - .read() - .await - .keys() - .cloned() - .collect() - } - pub async fn find_from_token(&self, token: &str) -> Option { self.read() .await @@ -156,15 +148,22 @@ impl Service { .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) } - pub fn read( - &self, - ) -> impl Future>> - { - self.registration_info.read() + pub fn iter_ids(&self) -> impl Stream + Send { + self.read() + .map(|info| info.keys().cloned().collect::>()) + .map(IntoIterator::into_iter) + .map(IterStream::stream) + .flatten_stream() } - #[inline] - pub async fn all(&self) -> Result> { self.iter_db_ids().await } + pub fn iter_db_ids(&self) -> impl Stream> + Send { + self.db + .id_appserviceregistrations + .keys() + .and_then(move |id: &str| async move { + Ok((id.to_owned(), self.get_db_registration(id).await?)) + }) + } pub async fn get_db_registration(&self, id: &str) -> Result { self.db @@ -175,16 +174,7 @@ impl Service { .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) } - async fn iter_db_ids(&self) -> Result> { - self.db - .id_appserviceregistrations - .keys() - .ignore_err() - .then(|id: String| async move { - let reg = self.get_db_registration(&id).await?; - Ok((id, reg)) - }) - .try_collect() - .await + pub fn read(&self) -> impl Future> + Send { + self.registration_info.read() } } diff --git a/src/service/appservice/namespace_regex.rs b/src/service/appservice/namespace_regex.rs index fe0fd91f..76b754ae 100644 --- a/src/service/appservice/namespace_regex.rs +++ b/src/service/appservice/namespace_regex.rs @@ -26,7 +26,7 @@ impl NamespaceRegex { false } - /// Checks if this namespace has exlusive rights to a namespace + /// Checks if this namespace has exclusive rights to a namespace #[inline] #[must_use] pub fn is_exclusive_match(&self, heystack: &str) -> bool { diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 97314ffb..1d1d1154 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -64,13 +64,7 @@ where return Err!(Config("allow_federation", "Federation is disabled.")); } - if self - .services - .server - .config - .forbidden_remote_server_names - .is_match(dest.host()) - { + if self.services.moderation.is_remote_server_forbidden(dest) { return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs index ce7765ee..15521875 100644 --- a/src/service/federation/mod.rs +++ b/src/service/federation/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use conduwuit::{Result, Server}; -use crate::{Dep, client, resolver, server_keys}; +use crate::{Dep, client, moderation, resolver, server_keys}; pub struct Service { services: Services, @@ -15,6 +15,7 @@ struct Services { client: Dep, resolver: Dep, server_keys: Dep, + moderation: Dep, } impl crate::Service for Service { @@ -25,6 +26,7 @@ impl crate::Service for Service { client: args.depend::("client"), resolver: args.depend::("resolver"), server_keys: args.depend::("server_keys"), + moderation: args.depend::("moderation"), }, })) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index b43b7c5f..21c09252 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -72,10 +72,4 @@ impl Data { pub fn bump_database_version(&self, new_version: u64) { self.global.raw_put(b"version", new_version); } - - #[inline] - pub fn backup(&self) -> Result { self.db.db.backup() } - - #[inline] - pub fn backup_list(&self) -> Result { self.db.db.backup_list() } } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index a7a9be9d..a23a4c21 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -127,7 +127,9 @@ impl Service { &self.server.config.new_user_displayname_suffix } - pub fn allow_check_for_updates(&self) -> bool { self.server.config.allow_check_for_updates } + pub fn allow_announcements_check(&self) -> bool { + self.server.config.allow_announcements_check + } pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.server.config.trusted_servers } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 5c26efe8..d053ba54 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -22,7 +22,7 @@ use tokio::{ use self::data::{Data, Metadata}; pub use self::thumbnail::Dim; -use crate::{Dep, client, globals, sending}; +use crate::{Dep, client, globals, moderation, sending}; #[derive(Debug)] pub struct FileMeta { @@ -42,6 +42,7 @@ struct Services { client: Dep, globals: Dep, sending: Dep, + moderation: Dep, } /// generated MXC ID (`media-id`) length @@ -64,6 +65,7 @@ impl crate::Service for Service { client: args.depend::("client"), globals: args.depend::("globals"), sending: args.depend::("sending"), + moderation: args.depend::("moderation"), }, })) } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index cdcb429e..f234fa13 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -338,7 +338,7 @@ fn handle_federation_error( return fallback(); } - // Reached for 5xx errors. This is where we don't fallback given the likelyhood + // Reached for 5xx errors. This is where we don't fallback given the likelihood // the other endpoint will also be a 5xx and we're wasting time. error } @@ -356,7 +356,7 @@ pub async fn fetch_remote_thumbnail_legacy( self.check_legacy_freeze()?; self.check_fetch_authorized(&mxc)?; - let reponse = self + let response = self .services .sending .send_federation_request(mxc.server_name, media::get_content_thumbnail::v3::Request { @@ -373,10 +373,17 @@ pub async fn fetch_remote_thumbnail_legacy( .await?; let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - self.upload_thumbnail(&mxc, None, None, reponse.content_type.as_deref(), &dim, &reponse.file) - .await?; + self.upload_thumbnail( + &mxc, + None, + None, + response.content_type.as_deref(), + &dim, + &response.file, + ) + .await?; - Ok(reponse) + Ok(response) } #[implement(super::Service)] @@ -423,16 +430,8 @@ pub async fn fetch_remote_content_legacy( fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { if self .services - .server - .config - .prevent_media_downloads_from - .is_match(mxc.server_name.host()) - || self - .services - .server - .config - .forbidden_remote_server_names - .is_match(mxc.server_name.host()) + .moderation + .is_remote_server_media_downloads_forbidden(mxc.server_name) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. diff --git a/src/service/mod.rs b/src/service/mod.rs index 63a51213..3d7a3aa9 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,13 +1,15 @@ -#![type_length_limit = "2048"] +#![type_length_limit = "8192"] #![allow(refining_impl_trait)] mod manager; mod migrations; mod service; pub mod services; +pub mod state; pub mod account_data; pub mod admin; +pub mod announcements; pub mod appservice; pub mod client; pub mod config; @@ -16,6 +18,7 @@ pub mod federation; pub mod globals; pub mod key_backups; pub mod media; +pub mod moderation; pub mod presence; pub mod pusher; pub mod resolver; @@ -25,7 +28,6 @@ pub mod server_keys; pub mod sync; pub mod transaction_ids; pub mod uiaa; -pub mod updates; pub mod users; extern crate conduwuit_core as conduwuit; diff --git a/src/service/moderation.rs b/src/service/moderation.rs new file mode 100644 index 00000000..c3e55a1d --- /dev/null +++ b/src/service/moderation.rs @@ -0,0 +1,93 @@ +use std::sync::Arc; + +use conduwuit::{Result, implement}; +use ruma::ServerName; + +use crate::{Dep, config}; + +pub struct Service { + services: Services, +} + +struct Services { + // pub server: Arc, + pub config: Dep, +} + +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { + // server: args.server.clone(), + config: args.depend::("config"), + }, + })) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_ignored(&self, server_name: &ServerName) -> bool { + // We must never block federating with ourselves + if server_name == self.services.config.server_name { + return false; + } + + self.services + .config + .ignore_messages_from_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { + // We must never block federating with ourselves + if server_name == self.services.config.server_name { + return false; + } + + // Check if server is explicitly allowed + if self + .services + .config + .allowed_remote_server_names + .is_match(server_name.host()) + { + return false; + } + + // Check if server is explicitly forbidden + self.services + .config + .forbidden_remote_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_room_directory_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .config + .forbidden_remote_room_directory_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_media_downloads_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .config + .prevent_media_downloads_from + .is_match(server_name.host()) +} diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 0151c4d7..d23ef95a 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -296,7 +296,7 @@ impl super::Service { expire: CachedOverride::default_expire(), overriding: (hostname != untername) .then_some(hostname.into()) - .inspect(|_| debug_info!("{untername:?} overriden by {hostname:?}")), + .inspect(|_| debug_info!("{untername:?} overridden by {hostname:?}")), }); Ok(()) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index b3a7a71b..cd747e04 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -8,7 +8,7 @@ use conduwuit::{ Error, Result, err, implement, state_res::{self, StateMap}, trace, - utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width}, + utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt}, }; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; @@ -112,14 +112,7 @@ where { let event_fetch = |event_id| self.event_fetch(event_id); let event_exists = |event_id| self.event_exists(event_id); - state_res::resolve( - room_version, - state_sets, - auth_chain_sets, - &event_fetch, - &event_exists, - automatic_width(), - ) - .map_err(|e| err!(error!("State resolution failed: {e:?}"))) - .await + state_res::resolve(room_version, state_sets, auth_chain_sets, &event_fetch, &event_exists) + .map_err(|e| err!(error!("State resolution failed: {e:?}"))) + .await } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index ea9756ba..53d2b742 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -399,7 +399,7 @@ async fn get_room_summary( Ok(summary) } -/// With the given identifier, checks if a room is accessable +/// With the given identifier, checks if a room is accessible #[implement(Service)] async fn is_accessible_child<'a, I>( &self, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 947e1c38..4b2f3cb2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -267,15 +267,15 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu<'a, Leafs>( + pub async fn append_pdu<'a, Leaves>( &'a self, pdu: &'a PduEvent, mut pdu_json: CanonicalJsonObject, - leafs: Leafs, + leaves: Leaves, state_lock: &'a RoomMutexGuard, ) -> Result where - Leafs: Iterator + Send + 'a, + Leaves: Iterator + Send + 'a, { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -344,7 +344,7 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, leafs, state_lock) + .set_forward_extremities(&pdu.room_id, leaves, state_lock) .await; let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; @@ -951,17 +951,17 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu<'a, Leafs>( + pub async fn append_incoming_pdu<'a, Leaves>( &'a self, pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, - new_room_leafs: Leafs, + new_room_leaves: Leaves, state_ids_compressed: Arc, soft_fail: bool, state_lock: &'a RoomMutexGuard, ) -> Result> where - Leafs: Iterator + Send + 'a, + Leaves: Iterator + Send + 'a, { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't @@ -978,14 +978,14 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock) + .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) .await; return Ok(None); } let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leafs, state_lock) + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) .await?; Ok(Some(pdu_id)) diff --git a/src/service/services.rs b/src/service/services.rs index dc390054..daece245 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -10,11 +10,12 @@ use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, + account_data, admin, announcements, appservice, client, config, emergency, federation, + globals, key_backups, manager::Manager, - media, presence, pusher, resolver, rooms, sending, server_keys, service, + media, moderation, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, - sync, transaction_ids, uiaa, updates, users, + sync, transaction_ids, uiaa, users, }; pub struct Services { @@ -37,8 +38,9 @@ pub struct Services { pub sync: Arc, pub transaction_ids: Arc, pub uiaa: Arc, - pub updates: Arc, pub users: Arc, + pub moderation: Arc, + pub announcements: Arc, manager: Mutex>>, pub(crate) service: Arc, @@ -104,8 +106,9 @@ impl Services { sync: build!(sync::Service), transaction_ids: build!(transaction_ids::Service), uiaa: build!(uiaa::Service), - updates: build!(updates::Service), users: build!(users::Service), + moderation: build!(moderation::Service), + announcements: build!(announcements::Service), manager: Mutex::new(None), service, diff --git a/src/api/router/state.rs b/src/service/state.rs similarity index 98% rename from src/api/router/state.rs rename to src/service/state.rs index 57eb94ca..c0884a5c 100644 --- a/src/api/router/state.rs +++ b/src/service/state.rs @@ -1,6 +1,6 @@ use std::{ops::Deref, sync::Arc}; -use conduwuit_service::Services; +use crate::Services; #[derive(Clone, Copy)] pub struct State { diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index bf2bc142..b095d2c1 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -8,7 +8,7 @@ use std::{ use conduwuit::{Result, Server}; use database::Map; use ruma::{ - DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, + OwnedDeviceId, OwnedRoomId, OwnedUserId, api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, @@ -49,8 +49,8 @@ struct Services { struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, - known_rooms: BTreeMap>, /* For every room, the - * roomsince number */ + // For every room, the roomsince number + known_rooms: BTreeMap>, extensions: ExtensionsConfig, } @@ -98,79 +98,35 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -/// load params from cache if body doesn't contain it, as long as it's allowed -/// in some cases we may need to allow an empty list as an actual value -fn list_or_sticky(target: &mut Vec, cached: &Vec) { - if target.is_empty() { - target.clone_from(cached); - } -} -fn some_or_sticky(target: &mut Option, cached: Option) { - if target.is_none() { - *target = cached; - } -} - impl Service { - pub fn snake_connection_cached( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) -> bool { - self.snake_connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) - } - - pub fn forget_snake_sync_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) { + pub fn snake_connection_cached(&self, key: &SnakeConnectionsKey) -> bool { self.snake_connections .lock() .expect("locked") - .remove(&(user_id, device_id, conn_id)); + .contains_key(key) } - pub fn remembered( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) -> bool { - self.connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) + pub fn forget_snake_sync_connection(&self, key: &SnakeConnectionsKey) { + self.snake_connections.lock().expect("locked").remove(key); } - pub fn forget_sync_request_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) { - self.connections - .lock() - .expect("locked") - .remove(&(user_id, device_id, conn_id)); + pub fn remembered(&self, key: &DbConnectionsKey) -> bool { + self.connections.lock().expect("locked").contains_key(key) + } + + pub fn forget_sync_request_connection(&self, key: &DbConnectionsKey) { + self.connections.lock().expect("locked").remove(key); } pub fn update_snake_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + snake_key: &SnakeConnectionsKey, request: &mut v5::Request, ) -> BTreeMap> { - let conn_id = request.conn_id.clone(); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(snake_key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -268,25 +224,23 @@ impl Service { pub fn update_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + key: &SnakeConnectionsKey, request: &mut sync_events::v4::Request, ) -> BTreeMap> { let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; + let key = into_db_key(key.0.clone(), key.1.clone(), conn_id); let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -371,22 +325,18 @@ impl Service { pub fn update_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, + key: &DbConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -395,90 +345,81 @@ impl Service { pub fn update_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &DbConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id.to_owned(), device_id.to_owned(), conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &SnakeConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { + assert!(key.2.is_some(), "Some(conn_id) required for this call"); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id.to_owned(), device_id.to_owned(), Some(conn_id))) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, + key: &SnakeConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -487,3 +428,37 @@ impl Service { cached.subscriptions = subscriptions; } } + +#[inline] +pub fn into_snake_key(user_id: U, device_id: D, conn_id: C) -> SnakeConnectionsKey +where + U: Into, + D: Into, + C: Into>, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +#[inline] +pub fn into_db_key(user_id: U, device_id: D, conn_id: C) -> DbConnectionsKey +where + U: Into, + D: Into, + C: Into, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +/// load params from cache if body doesn't contain it, as long as it's allowed +/// in some cases we may need to allow an empty list as an actual value +fn list_or_sticky(target: &mut Vec, cached: &Vec) { + if target.is_empty() { + target.clone_from(cached); + } +} + +fn some_or_sticky(target: &mut Option, cached: Option) { + if target.is_none() { + *target = cached; + } +} diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs deleted file mode 100644 index 28bee65a..00000000 --- a/src/service/updates/mod.rs +++ /dev/null @@ -1,142 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use async_trait::async_trait; -use conduwuit::{Result, Server, debug, info, warn}; -use database::{Deserialized, Map}; -use ruma::events::room::message::RoomMessageEventContent; -use serde::Deserialize; -use tokio::{ - sync::Notify, - time::{MissedTickBehavior, interval}, -}; - -use crate::{Dep, admin, client, globals}; - -pub struct Service { - interval: Duration, - interrupt: Notify, - db: Arc, - services: Services, -} - -struct Services { - admin: Dep, - client: Dep, - globals: Dep, - server: Arc, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponse { - updates: Vec, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponseEntry { - id: u64, - date: String, - message: String, -} - -const CHECK_FOR_UPDATES_URL: &str = "https://pupbrain.dev/check-for-updates/stable"; -const CHECK_FOR_UPDATES_INTERVAL: u64 = 7200; // 2 hours -const LAST_CHECK_FOR_UPDATES_COUNT: &[u8; 1] = b"u"; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - interval: Duration::from_secs(CHECK_FOR_UPDATES_INTERVAL), - interrupt: Notify::new(), - db: args.db["global"].clone(), - services: Services { - globals: args.depend::("globals"), - admin: args.depend::("admin"), - client: args.depend::("client"), - server: args.server.clone(), - }, - })) - } - - #[tracing::instrument(skip_all, name = "updates", level = "debug")] - async fn worker(self: Arc) -> Result<()> { - if !self.services.globals.allow_check_for_updates() { - debug!("Disabling update check"); - return Ok(()); - } - - let mut i = interval(self.interval); - i.set_missed_tick_behavior(MissedTickBehavior::Delay); - i.reset_after(self.interval); - loop { - tokio::select! { - () = self.interrupt.notified() => break, - _ = i.tick() => (), - } - - if let Err(e) = self.check().await { - warn!(%e, "Failed to check for updates"); - } - } - - Ok(()) - } - - fn interrupt(&self) { self.interrupt.notify_waiters(); } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - #[tracing::instrument(skip_all)] - async fn check(&self) -> Result<()> { - debug_assert!(self.services.server.running(), "server must not be shutting down"); - - let response = self - .services - .client - .default - .get(CHECK_FOR_UPDATES_URL) - .send() - .await? - .text() - .await?; - - let response = serde_json::from_str::(&response)?; - for update in &response.updates { - if update.id > self.last_check_for_updates_id().await { - self.handle(update).await; - self.update_check_for_updates_id(update.id); - } - } - - Ok(()) - } - - #[tracing::instrument(skip_all)] - async fn handle(&self, update: &CheckForUpdatesResponseEntry) { - info!("{} {:#}", update.date, update.message); - self.services - .admin - .send_message(RoomMessageEventContent::text_markdown(format!( - "### the following is a message from the conduwuit puppy\n\nit was sent on \ - `{}`:\n\n@room: {}", - update.date, update.message - ))) - .await - .ok(); - } - - #[inline] - pub fn update_check_for_updates_id(&self, id: u64) { - self.db.raw_put(LAST_CHECK_FOR_UPDATES_COUNT, id); - } - - pub async fn last_check_for_updates_id(&self) -> u64 { - self.db - .get(LAST_CHECK_FOR_UPDATES_COUNT) - .await - .deserialized() - .unwrap_or(0_u64) - } -} diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1eb289fc..701561a8 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -577,7 +577,7 @@ impl Service { self.db .userid_usersigningkeyid - .put(user_id, user_signing_key_key); + .raw_put(user_id, user_signing_key_key); } if notify { diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml new file mode 100644 index 00000000..5c2dbebb --- /dev/null +++ b/src/web/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "conduwuit_web" +categories.workspace = true +description.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +[lib] +path = "mod.rs" +crate-type = [ + "rlib", +# "dylib", +] + +[features] + + +[dependencies] +conduwuit-build-metadata.workspace = true +conduwuit-service.workspace = true + +askama = "0.14.0" + +axum.workspace = true +futures.workspace = true +tracing.workspace = true +rand.workspace = true +thiserror.workspace = true + +[lints] +workspace = true diff --git a/src/web/css/index.css b/src/web/css/index.css new file mode 100644 index 00000000..86cb6d8d --- /dev/null +++ b/src/web/css/index.css @@ -0,0 +1,68 @@ +:root { + color-scheme: light; + --font-stack: sans-serif; + + --background-color: #fff; + --text-color: #000; + + --bg: oklch(0.76 0.0854 317.27); + --panel-bg: oklch(0.91 0.042 317.27); + + --name-lightness: 0.45; + + @media (prefers-color-scheme: dark) { + color-scheme: dark; + --text-color: #fff; + --bg: oklch(0.15 0.042 317.27); + --panel-bg: oklch(0.24 0.03 317.27); + + --name-lightness: 0.8; + } + + --c1: oklch(0.44 0.177 353.06); + --c2: oklch(0.59 0.158 150.88); + + --normal-font-size: 1rem; + --small-font-size: 0.8rem; +} + +body { + color: var(--text-color); + font-family: var(--font-stack); + margin: 0; + padding: 0; + display: grid; + place-items: center; + min-height: 100vh; +} + +html { + background-color: var(--bg); + background-image: linear-gradient( + 70deg, + oklch(from var(--bg) l + 0.2 c h), + oklch(from var(--bg) l - 0.2 c h) + ); + font-size: 16px; +} + +.panel { + width: min(clamp(24rem, 12rem + 40vw, 48rem), 100vw); + border-radius: 15px; + background-color: var(--panel-bg); + padding-inline: 1.5rem; + padding-block: 1rem; + box-shadow: 0 0.25em 0.375em hsla(0, 0%, 0%, 0.1); +} + +.project-name { + text-decoration: none; + background: linear-gradient( + 130deg, + oklch(from var(--c1) var(--name-lightness) c h), + oklch(from var(--c2) var(--name-lightness) c h) + ); + background-clip: text; + color: transparent; + filter: brightness(1.2); +} diff --git a/src/web/mod.rs b/src/web/mod.rs new file mode 100644 index 00000000..9c6a5d83 --- /dev/null +++ b/src/web/mod.rs @@ -0,0 +1,73 @@ +use askama::Template; +use axum::{ + Router, + extract::State, + http::{StatusCode, header}, + response::{Html, IntoResponse, Response}, + routing::get, +}; +use conduwuit_build_metadata::{GIT_REMOTE_COMMIT_URL, GIT_REMOTE_WEB_URL, version_tag}; +use conduwuit_service::state; + +pub fn build() -> Router { + let router = Router::::new(); + router.route("/", get(index_handler)) +} + +async fn index_handler( + State(services): State, +) -> Result { + #[derive(Debug, Template)] + #[template(path = "index.html.j2")] + struct Tmpl<'a> { + nonce: &'a str, + server_name: &'a str, + } + let nonce = rand::random::().to_string(); + + let template = Tmpl { + nonce: &nonce, + server_name: services.config.server_name.as_str(), + }; + Ok(( + [(header::CONTENT_SECURITY_POLICY, format!("default-src 'none' 'nonce-{nonce}';"))], + Html(template.render()?), + )) +} + +#[derive(Debug, thiserror::Error)] +enum WebError { + #[error("Failed to render template: {0}")] + Render(#[from] askama::Error), +} + +impl IntoResponse for WebError { + fn into_response(self) -> Response { + #[derive(Debug, Template)] + #[template(path = "error.html.j2")] + struct Tmpl<'a> { + nonce: &'a str, + err: WebError, + } + + let nonce = rand::random::().to_string(); + + let status = match &self { + | Self::Render(_) => StatusCode::INTERNAL_SERVER_ERROR, + }; + let tmpl = Tmpl { nonce: &nonce, err: self }; + if let Ok(body) = tmpl.render() { + ( + status, + [( + header::CONTENT_SECURITY_POLICY, + format!("default-src 'none' 'nonce-{nonce}';"), + )], + Html(body), + ) + .into_response() + } else { + (status, "Something went wrong").into_response() + } + } +} diff --git a/src/web/templates/_layout.html.j2 b/src/web/templates/_layout.html.j2 new file mode 100644 index 00000000..d298b68c --- /dev/null +++ b/src/web/templates/_layout.html.j2 @@ -0,0 +1,32 @@ + + + + + + {% block title %}Continuwuity{% endblock %} + + + + + + +
    {%~ block content %}{% endblock ~%}
    + {%~ block footer ~%} +
    +

    Powered by Continuwuity + {%~ if let Some(version_info) = self::version_tag() ~%} + {%~ if let Some(url) = GIT_REMOTE_COMMIT_URL.or(GIT_REMOTE_WEB_URL) ~%} + ({{ version_info }}) + {%~ else ~%} + ({{ version_info }}) + {%~ endif ~%} + {%~ endif ~%}

    +
    + {%~ endblock ~%} + + + diff --git a/src/web/templates/error.html.j2 b/src/web/templates/error.html.j2 new file mode 100644 index 00000000..e320d0ed --- /dev/null +++ b/src/web/templates/error.html.j2 @@ -0,0 +1,20 @@ +{% extends "_layout.html.j2" %} + +{%- block title -%} +Server Error +{%- endblock -%} + +{%- block content -%} +

    + {%- match err -%} + {% else -%} 500: Internal Server Error + {%- endmatch -%} +

    + +{%- match err -%} + {% when WebError::Render(err) -%} +
    {{ err }}
    + {% else -%}

    An error occurred

    +{%- endmatch -%} + +{%- endblock -%} diff --git a/src/web/templates/index.html.j2 b/src/web/templates/index.html.j2 new file mode 100644 index 00000000..7f11cb1c --- /dev/null +++ b/src/web/templates/index.html.j2 @@ -0,0 +1,16 @@ +{% extends "_layout.html.j2" %} +{%- block content -%} +
    +
    +

    Welcome to Continuwuity!

    +

    Continuwuity is successfully installed and working.

    +

    To get started, you can:

    + +
    + +{%- endblock content -%} diff --git a/theme/css/chrome.css b/theme/css/chrome.css index 52b35c2c..d6cc2b32 100644 --- a/theme/css/chrome.css +++ b/theme/css/chrome.css @@ -495,7 +495,7 @@ ul#searchresults span.teaser em { .chapter li { display: flex; - color: var(--sidebar-non-existant); + color: var(--sidebar-non-existent); } .chapter li a { display: block; diff --git a/theme/css/variables.css b/theme/css/variables.css index e7feed98..ca9fd271 100644 --- a/theme/css/variables.css +++ b/theme/css/variables.css @@ -20,7 +20,7 @@ --sidebar-bg: #14191f; --sidebar-fg: #c8c9db; - --sidebar-non-existant: #5c6773; + --sidebar-non-existent: #5c6773; --sidebar-active: #ffb454; --sidebar-spacer: #2d334f; @@ -64,7 +64,7 @@ --sidebar-bg: #292c2f; --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; + --sidebar-non-existent: #505254; --sidebar-active: #3473ad; --sidebar-spacer: #393939; @@ -108,7 +108,7 @@ --sidebar-bg: #fafafa; --sidebar-fg: #AE518E; - --sidebar-non-existant: #aaaaaa; + --sidebar-non-existent: #aaaaaa; --sidebar-active: #2F7E86; --sidebar-spacer: #f4f4f4; @@ -152,7 +152,7 @@ --sidebar-bg: #282d3f; --sidebar-fg: #fdcbec; - --sidebar-non-existant: #505274; + --sidebar-non-existent: #505274; --sidebar-active: #5BCEFA; --sidebar-spacer: #2d334f; @@ -196,7 +196,7 @@ --sidebar-bg: #3b2e2a; --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505254; + --sidebar-non-existent: #505254; --sidebar-active: #e69f67; --sidebar-spacer: #45373a; @@ -241,7 +241,7 @@ --sidebar-bg: #292c2f; --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; + --sidebar-non-existent: #505254; --sidebar-active: #3473ad; --sidebar-spacer: #393939;