Compare commits
71 commits
jade/enabl
...
alpine-pac
Author | SHA1 | Date | |
---|---|---|---|
b562b8cf92 | |||
80e8900f04 | |||
|
4158c1cf62 | ||
|
edd5fc6c7e | ||
|
6b0288dd4c | ||
|
90f1a193e3 | ||
|
a9a478f077 | ||
|
b2620e6922 | ||
|
60caa448b0 | ||
|
eb886b6760 | ||
|
73c991edd0 | ||
|
c698d65a92 | ||
|
1a5ab33852 | ||
|
77c4f9ff2f | ||
|
dcbacb5b78 | ||
|
c203c1fead | ||
|
cdf105a24e | ||
|
859ec56b4f | ||
|
45872ede7a | ||
|
f83238df78 | ||
|
4c8dfc4c2c | ||
|
bfd7ab5a22 | ||
|
ee11afb460 | ||
|
1d840950b3 | ||
|
f791dc6918 | ||
|
3eb4ee7af1 | ||
|
8f21403796 | ||
|
4fbecca2d3 | ||
|
0307238bf8 | ||
|
1d42b88f50 | ||
|
81f8151aca | ||
|
66e8cd8908 | ||
|
7beff25d3d | ||
|
0c302f3137 | ||
|
c7ac2483a9 | ||
|
c68378ffe3 | ||
|
fbd404fa84 | ||
|
ff93cfdc64 | ||
|
22e7617362 | ||
|
b7b7d3a9e7 | ||
|
1c59b41ff1 | ||
|
2d9bdc0979 | ||
|
5486dbda24 | ||
|
41581c9ae8 | ||
|
d3022b4112 | ||
|
6920814da9 | ||
|
fe7963d306 | ||
|
84445b8458 | ||
|
9e62076baa | ||
|
0eb9e4f3d2 | ||
|
e71138ab6f | ||
|
8e7373c027 | ||
|
576a783a6f | ||
|
21ec255159 | ||
|
3c5bbd4f05 | ||
|
4f8fec7e5a | ||
|
fb3020d8da | ||
|
ecf20f7ebb | ||
|
b3e5d2f683 | ||
|
83126cc667 | ||
|
eac713a2a9 | ||
|
e8a64bb59d | ||
|
05e65936fa | ||
|
e7c3f78377 | ||
|
d8b56c9c35 | ||
|
75fb19a5ca | ||
|
d98ec6bf46 | ||
|
1b1198771f | ||
|
d4561e950b | ||
|
298e2af3d7 | ||
|
c5b99fbccd |
142 changed files with 3322 additions and 2987 deletions
49
.forgejo/workflows/build-alpine.yml
Normal file
49
.forgejo/workflows/build-alpine.yml
Normal file
|
@ -0,0 +1,49 @@
|
|||
on:
|
||||
- workflow-dispatch
|
||||
- push
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:edge
|
||||
|
||||
steps:
|
||||
- name: set up dependencies
|
||||
run: |
|
||||
apk update
|
||||
apk upgrade
|
||||
apk add nodejs git alpine-sdk
|
||||
- uses: actions/checkout@v4
|
||||
name: checkout the alpine dir
|
||||
with:
|
||||
sparse-checkout: "alpine/"
|
||||
|
||||
# - uses: actions/checkout@v4
|
||||
# name: checkout the rest in the alpine dir
|
||||
# with:
|
||||
# path: 'alpine/continuwuity'
|
||||
- name: set up user
|
||||
run: adduser -DG abuild ci
|
||||
|
||||
- name: set up keys
|
||||
run: |
|
||||
pwd
|
||||
mkdir ~/.abuild
|
||||
echo "${{ secrets.abuild_privkey }}" > ~/.abuild/ci@continuwuity.rsa
|
||||
echo "${{ secrets.abuild_pubkey }}" > ~/.abuild/ci@continuwuity.rsa.pub
|
||||
echo $HOME
|
||||
echo 'PACKAGER_PRIVKEY="/root/.abuild/ci@continuwuity.rsa"' > ~/.abuild/abuild.conf
|
||||
ls ~/.abuild
|
||||
|
||||
- name: go go gadget abuild
|
||||
run: |
|
||||
cd alpine
|
||||
# modify the APKBUILD to use the current branch instead of the release
|
||||
# note that it seems to require the repo to be public (as you'll get
|
||||
# a 404 even if the token is provided)
|
||||
export ARCHIVE_URL="${{ github.server_url }}/${{ github.repository }}/archive/${{ github.ref_name }}.tar.gz"
|
||||
echo $ARCHIVE_URL
|
||||
sed -i '/^source=/c\source="'"$ARCHIVE_URL" APKBUILD
|
||||
abuild -F checksum
|
||||
abuild -Fr
|
|
@ -16,7 +16,7 @@ concurrency:
|
|||
jobs:
|
||||
docs:
|
||||
name: Build and Deploy Documentation
|
||||
runs-on: not-nexy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Sync repository
|
||||
|
@ -36,9 +36,14 @@ jobs:
|
|||
- name: Prepare static files for deployment
|
||||
run: |
|
||||
mkdir -p ./public/.well-known/matrix
|
||||
mkdir -p ./public/.well-known/continuwuity
|
||||
mkdir -p ./public/schema
|
||||
# Copy the Matrix .well-known files
|
||||
cp ./docs/static/server ./public/.well-known/matrix/server
|
||||
cp ./docs/static/client ./public/.well-known/matrix/client
|
||||
cp ./docs/static/client ./public/.well-known/matrix/support
|
||||
cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements
|
||||
cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json
|
||||
# Copy the custom headers file
|
||||
cp ./docs/static/_headers ./public/_headers
|
||||
echo "Copied .well-known files and _headers to ./public"
|
||||
|
@ -52,17 +57,17 @@ jobs:
|
|||
run: npm install --save-dev wrangler@latest
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Production)
|
||||
if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
|
||||
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Preview)
|
||||
if: ${{ github.event_name != 'push' || github.ref != 'refs/heads/main' }}
|
||||
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
||||
|
|
127
.forgejo/workflows/element.yml
Normal file
127
.forgejo/workflows/element.yml
Normal file
|
@ -0,0 +1,127 @@
|
|||
name: Deploy Element Web
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: "element-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
name: Build and Deploy Element Web
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Setup Node.js
|
||||
uses: https://code.forgejo.org/actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
|
||||
- name: Clone, setup, and build Element Web
|
||||
run: |
|
||||
echo "Cloning Element Web..."
|
||||
git clone https://github.com/maunium/element-web
|
||||
cd element-web
|
||||
git checkout develop
|
||||
git pull
|
||||
|
||||
echo "Cloning matrix-js-sdk..."
|
||||
git clone https://github.com/matrix-org/matrix-js-sdk.git
|
||||
|
||||
echo "Installing Yarn..."
|
||||
npm install -g yarn
|
||||
|
||||
echo "Installing dependencies..."
|
||||
yarn install
|
||||
|
||||
echo "Preparing build environment..."
|
||||
mkdir -p .home
|
||||
|
||||
echo "Cleaning up specific node_modules paths..."
|
||||
rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing."
|
||||
|
||||
echo "Getting matrix-js-sdk commit hash..."
|
||||
cd matrix-js-sdk
|
||||
jsver=$(git rev-parse HEAD)
|
||||
jsver=${jsver:0:12}
|
||||
cd ..
|
||||
echo "matrix-js-sdk version hash: $jsver"
|
||||
|
||||
echo "Getting element-web commit hash..."
|
||||
ver=$(git rev-parse HEAD)
|
||||
ver=${ver:0:12}
|
||||
echo "element-web version hash: $ver"
|
||||
|
||||
chmod +x ./build-sh
|
||||
|
||||
export VERSION="$ver-js-$jsver"
|
||||
echo "Building Element Web version: $VERSION"
|
||||
./build-sh
|
||||
|
||||
echo "Checking for build output..."
|
||||
ls -la webapp/
|
||||
|
||||
- name: Create config.json
|
||||
run: |
|
||||
cat <<EOF > ./element-web/webapp/config.json
|
||||
{
|
||||
"default_server_name": "continuwuity.org",
|
||||
"default_server_config": {
|
||||
"m.homeserver": {
|
||||
"base_url": "https://matrix.continuwuity.org"
|
||||
}
|
||||
},
|
||||
"default_country_code": "GB",
|
||||
"default_theme": "dark",
|
||||
"mobile_guide_toast": false,
|
||||
"show_labs_settings": true,
|
||||
"room_directory": [
|
||||
"continuwuity.org",
|
||||
"matrixrooms.info"
|
||||
],
|
||||
"settings_defaults": {
|
||||
"UIFeature.urlPreviews": true,
|
||||
"UIFeature.feedback": false,
|
||||
"UIFeature.voip": false,
|
||||
"UIFeature.shareQrCode": false,
|
||||
"UIFeature.shareSocial": false,
|
||||
"UIFeature.locationSharing": false,
|
||||
"enableSyntaxHighlightLanguageDetection": true
|
||||
},
|
||||
"features": {
|
||||
"feature_pinning": true,
|
||||
"feature_custom_themes": true
|
||||
}
|
||||
}
|
||||
EOF
|
||||
echo "Created ./element-web/webapp/config.json"
|
||||
cat ./element-web/webapp/config.json
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
with:
|
||||
name: element-web
|
||||
path: ./element-web/webapp/
|
||||
retention-days: 14
|
||||
|
||||
- name: Install Wrangler
|
||||
run: npm install --save-dev wrangler@latest
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Production)
|
||||
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
||||
|
||||
- name: Deploy to Cloudflare Pages (Preview)
|
||||
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
||||
with:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
||||
command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
|
@ -1,22 +1,25 @@
|
|||
name: Release Docker Image
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: "release-image-${{ github.ref }}"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths-ignore:
|
||||
- '.gitlab-ci.yml'
|
||||
- '.gitignore'
|
||||
- 'renovate.json'
|
||||
- 'debian/**'
|
||||
- 'docker/**'
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "debian/**"
|
||||
- "docker/**"
|
||||
- "docs/**"
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
BUILTIN_REGISTRY: forgejo.ellis.link
|
||||
BUILTIN_REGISTRY_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}"
|
||||
BUILTIN_REGISTRY: forgejo.ellis.link
|
||||
BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}"
|
||||
|
||||
jobs:
|
||||
define-variables:
|
||||
|
@ -35,7 +38,7 @@ jobs:
|
|||
script: |
|
||||
const githubRepo = '${{ github.repository }}'.toLowerCase()
|
||||
const repoId = githubRepo.split('/')[1]
|
||||
|
||||
|
||||
core.setOutput('github_repository', githubRepo)
|
||||
const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo
|
||||
let images = []
|
||||
|
@ -46,7 +49,7 @@ jobs:
|
|||
core.setOutput('images_list', images.join(","))
|
||||
const platforms = ['linux/amd64', 'linux/arm64']
|
||||
core.setOutput('build_matrix', JSON.stringify({
|
||||
platform: platforms,
|
||||
platform: platforms,
|
||||
include: platforms.map(platform => { return {
|
||||
platform,
|
||||
slug: platform.replace('/', '-')
|
||||
|
@ -63,22 +66,15 @@ jobs:
|
|||
attestations: write
|
||||
id-token: write
|
||||
strategy:
|
||||
matrix: {
|
||||
"include": [
|
||||
{
|
||||
"platform": "linux/amd64",
|
||||
"slug": "linux-amd64"
|
||||
},
|
||||
{
|
||||
"platform": "linux/arm64",
|
||||
"slug": "linux-arm64"
|
||||
}
|
||||
],
|
||||
"platform": [
|
||||
"linux/amd64",
|
||||
"linux/arm64"
|
||||
]
|
||||
}
|
||||
matrix:
|
||||
{
|
||||
"include":
|
||||
[
|
||||
{ "platform": "linux/amd64", "slug": "linux-amd64" },
|
||||
{ "platform": "linux/arm64", "slug": "linux-arm64" },
|
||||
],
|
||||
"platform": ["linux/amd64", "linux/arm64"],
|
||||
}
|
||||
steps:
|
||||
- name: Echo strategy
|
||||
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
|
||||
|
@ -88,7 +84,13 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- run: |
|
||||
if ! command -v rustup &> /dev/null ; then
|
||||
curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y
|
||||
echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
- uses: https://github.com/cargo-bins/cargo-binstall@main
|
||||
- run: cargo binstall timelord-cli@3.0.1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up QEMU
|
||||
|
@ -97,9 +99,9 @@ jobs:
|
|||
- name: Login to builtin registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
||||
- name: Extract metadata (labels, annotations) for Docker
|
||||
|
@ -122,6 +124,18 @@ jobs:
|
|||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||
- name: Get Git commit timestamps
|
||||
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
||||
- name: Set up timelord
|
||||
uses: actions/cache/restore@v3
|
||||
with:
|
||||
path: /timelord/
|
||||
key: timelord-v0 # Cache is already split per runner
|
||||
- name: Run timelord to set timestamps
|
||||
run: timelord sync --source-dir . --cache-dir /timelord/
|
||||
- name: Save timelord
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: /timelord/
|
||||
key: timelord-v0
|
||||
- name: Build and push Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
|
@ -133,8 +147,8 @@ jobs:
|
|||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
sbom: true
|
||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||
env:
|
||||
|
@ -145,7 +159,7 @@ jobs:
|
|||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: forgejo/upload-artifact@v4
|
||||
|
@ -154,7 +168,7 @@ jobs:
|
|||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
|
||||
merge:
|
||||
runs-on: dind
|
||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
||||
|
@ -170,9 +184,9 @@ jobs:
|
|||
- name: Login to builtin registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
@ -185,13 +199,13 @@ jobs:
|
|||
type=semver,pattern=v{{version}}
|
||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }}
|
||||
type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
|
||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) == github.ref && '' || 'branch-' }}
|
||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) 1= github.ref && 'branch-' || '' }}
|
||||
type=ref,event=pr
|
||||
type=sha,format=long
|
||||
images: ${{needs.define-variables.outputs.images}}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: /tmp/digests
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
@ -60,8 +59,7 @@ representative at an online or offline event.
|
|||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement over email at
|
||||
<strawberry@puppygock.gay> or over Matrix at @strawberry:puppygock.gay.
|
||||
reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at <tom@tcpip.uk>, <jade@continuwuity.org> and <nex@continuwuity.org> respectively.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
|
|
|
@ -4,7 +4,7 @@ This page is for about contributing to conduwuit. The
|
|||
[development](./development.md) page may be of interest for you as well.
|
||||
|
||||
If you would like to work on an [issue][issues] that is not assigned, preferably
|
||||
ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix],
|
||||
ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix],
|
||||
and comment on it.
|
||||
|
||||
### Linting and Formatting
|
||||
|
@ -23,9 +23,9 @@ suggestion, allow the lint and mention that in a comment.
|
|||
|
||||
### Running CI tests locally
|
||||
|
||||
conduwuit's CI for tests, linting, formatting, audit, etc use
|
||||
continuwuity's CI for tests, linting, formatting, audit, etc use
|
||||
[`engage`][engage]. engage can be installed from nixpkgs or `cargo install
|
||||
engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`.
|
||||
engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`.
|
||||
Use `engage --help` for more usage details.
|
||||
|
||||
To test, format, lint, etc that CI would do, install engage, allow the `.envrc`
|
||||
|
@ -111,33 +111,28 @@ applies here.
|
|||
|
||||
### Creating pull requests
|
||||
|
||||
Please try to keep contributions to the GitHub. While the mirrors of conduwuit
|
||||
allow for pull/merge requests, there is no guarantee I will see them in a timely
|
||||
Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity
|
||||
allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely
|
||||
manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts.
|
||||
This prevents me from having to ping once in a while to double check the status
|
||||
This prevents us from having to ping once in a while to double check the status
|
||||
of it, especially when the CI completed successfully and everything so it
|
||||
*looks* done.
|
||||
|
||||
If you open a pull request on one of the mirrors, it is your responsibility to
|
||||
inform me about its existence. In the future I may try to solve this with more
|
||||
repo bots in the conduwuit Matrix room. There is no mailing list or email-patch
|
||||
support on the sr.ht mirror, but if you'd like to email me a git patch you can
|
||||
do so at `strawberry@puppygock.gay`.
|
||||
|
||||
Direct all PRs/MRs to the `main` branch.
|
||||
|
||||
By sending a pull request or patch, you are agreeing that your changes are
|
||||
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
||||
in line with the Contributor's Covenant, and conduwuit's Code of Conduct.
|
||||
in line with the Contributor's Covenant, and continuwuity's Code of Conduct.
|
||||
|
||||
Contribution by users who violate either of these code of conducts will not have
|
||||
their contributions accepted. This includes users who have been banned from
|
||||
conduwuit Matrix rooms for Code of Conduct violations.
|
||||
continuwuityMatrix rooms for Code of Conduct violations.
|
||||
|
||||
[issues]: https://github.com/girlbossceo/conduwuit/issues
|
||||
[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay
|
||||
[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues
|
||||
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org
|
||||
[complement]: https://github.com/matrix-org/complement/
|
||||
[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml
|
||||
[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml
|
||||
[engage]: https://charles.page.computer.surgery/engage/
|
||||
[sytest]: https://github.com/matrix-org/sytest/
|
||||
[cargo-deb]: https://github.com/kornelski/cargo-deb
|
||||
|
@ -146,4 +141,4 @@ conduwuit Matrix rooms for Code of Conduct violations.
|
|||
[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit
|
||||
[direnv]: https://direnv.net/
|
||||
[mdbook]: https://rust-lang.github.io/mdBook/
|
||||
[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml
|
||||
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
||||
|
|
39
Cargo.lock
generated
39
Cargo.lock
generated
|
@ -725,7 +725,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"conduwuit_admin",
|
||||
|
@ -754,7 +754,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit_admin"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"conduwuit_api",
|
||||
|
@ -775,7 +775,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit_api"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
|
@ -784,7 +784,6 @@ dependencies = [
|
|||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"conduwuit_core",
|
||||
"conduwuit_database",
|
||||
"conduwuit_service",
|
||||
"const-str",
|
||||
"futures",
|
||||
|
@ -808,7 +807,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit_core"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"argon2",
|
||||
"arrayvec",
|
||||
|
@ -866,7 +865,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit_database"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"async-channel",
|
||||
"conduwuit_core",
|
||||
|
@ -884,7 +883,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit_macros"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"proc-macro2",
|
||||
|
@ -894,7 +893,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit_router"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"axum-client-ip",
|
||||
|
@ -927,7 +926,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "conduwuit_service"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
|
@ -3653,7 +3652,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma"
|
||||
version = "0.10.1"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"assign",
|
||||
"js_int",
|
||||
|
@ -3673,7 +3672,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-appservice-api"
|
||||
version = "0.10.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"ruma-common",
|
||||
|
@ -3685,7 +3684,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-client-api"
|
||||
version = "0.18.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"as_variant",
|
||||
"assign",
|
||||
|
@ -3708,7 +3707,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-common"
|
||||
version = "0.13.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"as_variant",
|
||||
"base64 0.22.1",
|
||||
|
@ -3740,7 +3739,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-events"
|
||||
version = "0.28.1"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"as_variant",
|
||||
"indexmap 2.8.0",
|
||||
|
@ -3765,7 +3764,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-federation-api"
|
||||
version = "0.9.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"headers",
|
||||
|
@ -3787,7 +3786,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-identifiers-validation"
|
||||
version = "0.9.5"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"thiserror 2.0.12",
|
||||
|
@ -3796,7 +3795,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-identity-service-api"
|
||||
version = "0.9.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"ruma-common",
|
||||
|
@ -3806,7 +3805,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-macros"
|
||||
version = "0.13.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"proc-macro-crate",
|
||||
|
@ -3821,7 +3820,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-push-gateway-api"
|
||||
version = "0.9.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"ruma-common",
|
||||
|
@ -3833,7 +3832,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-signatures"
|
||||
version = "0.15.0"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"ed25519-dalek",
|
||||
|
|
|
@ -21,7 +21,7 @@ license = "Apache-2.0"
|
|||
readme = "README.md"
|
||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
rust-version = "1.86.0"
|
||||
version = "0.5.0"
|
||||
version = "0.5.0-rc.5"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
|
@ -350,7 +350,7 @@ version = "0.1.2"
|
|||
[workspace.dependencies.ruma]
|
||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4"
|
||||
rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
|
|
|
@ -46,8 +46,9 @@ Continuwuity aims to:
|
|||
|
||||
### Can I try it out?
|
||||
|
||||
Not right now. We've still got work to do!
|
||||
Check out the [documentation](introduction) for installation instructions.
|
||||
|
||||
There are currently no open registration Continuwuity instances available.
|
||||
|
||||
### What are we working on?
|
||||
|
||||
|
@ -105,9 +106,10 @@ When incorporating code from other forks:
|
|||
|
||||
#### Contact
|
||||
|
||||
<!-- TODO: contact details -->
|
||||
Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project!
|
||||
|
||||
<!-- ANCHOR_END: footer -->
|
||||
|
||||
|
||||
[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity
|
||||
|
||||
|
|
63
alpine/APKBUILD
Normal file
63
alpine/APKBUILD
Normal file
|
@ -0,0 +1,63 @@
|
|||
# Contributor: magmaus3 <maia@magmaus3.eu.org>
|
||||
# Maintainer: magmaus3 <maia@magmaus3.eu.org>
|
||||
pkgname=continuwuity
|
||||
|
||||
# abuild doesn't like the format of v0.5.0-rc.5, so i had to change it
|
||||
# see https://wiki.alpinelinux.org/wiki/Package_policies
|
||||
pkgver=0.5.0_rc5
|
||||
pkgrel=0
|
||||
pkgdesc="a continuwuation of a very cool, featureful fork of conduit"
|
||||
url="https://continuwuity.org/"
|
||||
arch="all"
|
||||
license="Apache-2.0"
|
||||
depends="liburing"
|
||||
|
||||
# cargo version on alpine v3.21 is too old to use the 2024 edition
|
||||
# i recommend either building everything on edge, or adding
|
||||
# the edge repo as a tag
|
||||
makedepends="cargo liburing-dev clang-dev linux-headers"
|
||||
checkdepends=""
|
||||
install="$pkgname.pre-install"
|
||||
subpackages="$pkgname-openrc"
|
||||
source="https://forgejo.ellis.link/continuwuation/continuwuity/archive/v0.5.0-rc.5.tar.gz
|
||||
continuwuity.initd
|
||||
continuwuity.confd
|
||||
"
|
||||
builddir="$srcdir/continuwuity"
|
||||
options="net !check"
|
||||
|
||||
prepare() {
|
||||
default_prepare
|
||||
cd $srcdir/continuwuity
|
||||
|
||||
# add the default database path to the config (commented out)
|
||||
cat conduwuit-example.toml \
|
||||
| sed '/#database_path/ s:$: "/var/lib/continuwuity":' \
|
||||
> "$srcdir"/continuwuity.toml
|
||||
|
||||
cargo fetch --target="$CTARGET" --locked
|
||||
}
|
||||
|
||||
build() {
|
||||
cargo build --frozen --release --all-features
|
||||
}
|
||||
|
||||
check() {
|
||||
# TODO: make sure the tests work
|
||||
#cargo test --frozen
|
||||
return
|
||||
}
|
||||
|
||||
package() {
|
||||
cd $srcdir
|
||||
install -Dm755 continuwuity/target/release/conduwuit "$pkgdir"/usr/bin/continuwuity
|
||||
install -Dm644 "$srcdir"/continuwuity.toml -t "$pkgdir"/etc/continuwuity
|
||||
install -Dm755 "$srcdir"/continuwuity.initd "$pkgdir"/etc/init.d/continuwuity
|
||||
install -Dm644 "$srcdir"/continuwuity.confd "$pkgdir"/etc/conf.d/continuwuity
|
||||
}
|
||||
|
||||
sha512sums="
|
||||
66f6da5e98b6f7bb8c1082500101d5c87b1b79955c139b44c6ef5123919fb05feb0dffc669a3af1bc8d571ddb9f3576660f08dc10a6b19eab6db9e391175436a v0.5.0-rc.5.tar.gz
|
||||
0482674be24740496d70da256d4121c5a5e3b749f2445d2bbe0e8991f1449de052724f8427da21a6f55574bc53eac9ca1e47e5012b4c13049b2b39044734d80d continuwuity.initd
|
||||
38e2576278b450d16ba804dd8f4a128f18cd793e6c3ce55aedee1e186905755b31ee23baaa6586b1ab0e25a1f29bf1ea86bfaae4185b0cb1a29203726a199426 continuwuity.confd
|
||||
"
|
7
alpine/README.md
Normal file
7
alpine/README.md
Normal file
|
@ -0,0 +1,7 @@
|
|||
# building
|
||||
|
||||
1. [set up your build
|
||||
environment](https://wiki.alpinelinux.org/wiki/Include:Setup_your_system_and_account_for_building_packages)
|
||||
|
||||
2. run `abuild` (or `abuild -K` if you want to keep the source directory to make
|
||||
rebuilding faster)
|
3
alpine/continuwuity.confd
Normal file
3
alpine/continuwuity.confd
Normal file
|
@ -0,0 +1,3 @@
|
|||
supervisor=supervise-daemon
|
||||
export CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml
|
||||
|
19
alpine/continuwuity.initd
Normal file
19
alpine/continuwuity.initd
Normal file
|
@ -0,0 +1,19 @@
|
|||
#!/sbin/openrc-run
|
||||
|
||||
command="/usr/bin/continuwuity"
|
||||
command_user="continuwuity:continuwuity"
|
||||
command_args="--config ${CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml}"
|
||||
command_background=true
|
||||
pidfile="/run/$RC_SVCNAME.pid"
|
||||
|
||||
output_log="/var/log/continuwuity.log"
|
||||
error_log="/var/log/continuwuity.log"
|
||||
|
||||
depend() {
|
||||
need net
|
||||
}
|
||||
|
||||
start_pre() {
|
||||
checkpath -d -m 0755 -o "$command_user" /var/lib/continuwuity
|
||||
checkpath -f -m 0644 -o "$command_user" "$output_log"
|
||||
}
|
4
alpine/continuwuity.pre-install
Normal file
4
alpine/continuwuity.pre-install
Normal file
|
@ -0,0 +1,4 @@
|
|||
#!/bin/sh
|
||||
addgroup -S continuwuity 2>/dev/null
|
||||
adduser -S -D -H -h /var/lib/continuwuity -s /sbin/nologin -G continuwuity -g continuwuity continuwuity 2>/dev/null
|
||||
exit 0
|
|
@ -113,14 +113,10 @@
|
|||
#new_user_displayname_suffix = "🏳️⚧️"
|
||||
|
||||
# If enabled, conduwuit will send a simple GET request periodically to
|
||||
# `https://pupbrain.dev/check-for-updates/stable` for any new
|
||||
# announcements made. Despite the name, this is not an update check
|
||||
# endpoint, it is simply an announcement check endpoint.
|
||||
# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
||||
# announcements or major updates. This is not an update check endpoint.
|
||||
#
|
||||
# This is disabled by default as this is rarely used except for security
|
||||
# updates or major updates.
|
||||
#
|
||||
#allow_check_for_updates = false
|
||||
#allow_announcements_check = true
|
||||
|
||||
# Set this to any float value to multiply conduwuit's in-memory LRU caches
|
||||
# with such as "auth_chain_cache_capacity".
|
||||
|
@ -970,10 +966,10 @@
|
|||
#
|
||||
#rocksdb_compaction_ioprio_idle = true
|
||||
|
||||
# Disables RocksDB compaction. You should never ever have to set this
|
||||
# option to true. If you for some reason find yourself needing to use this
|
||||
# option as part of troubleshooting or a bug, please reach out to us in
|
||||
# the conduwuit Matrix room with information and details.
|
||||
# Enables RocksDB compaction. You should never ever have to set this
|
||||
# option to false. If you for some reason find yourself needing to use
|
||||
# this option as part of troubleshooting or a bug, please reach out to us
|
||||
# in the conduwuit Matrix room with information and details.
|
||||
#
|
||||
# Disabling compaction will lead to a significantly bloated and
|
||||
# explosively large database, gradually poor performance, unnecessarily
|
||||
|
@ -1186,6 +1182,34 @@
|
|||
#
|
||||
#prune_missing_media = false
|
||||
|
||||
# List of forbidden server names via regex patterns that we will block
|
||||
# incoming AND outgoing federation with, and block client room joins /
|
||||
# remote user invites.
|
||||
#
|
||||
# Note that your messages can still make it to forbidden servers through
|
||||
# backfilling. Events we receive from forbidden servers via backfill
|
||||
# from servers we *do* federate with will be stored in the database.
|
||||
#
|
||||
# This check is applied on the room ID, room alias, sender server name,
|
||||
# sender user's server name, inbound federation X-Matrix origin, and
|
||||
# outbound federation handler.
|
||||
#
|
||||
# You can set this to ["*"] to block all servers by default, and then
|
||||
# use `allowed_remote_server_names` to allow only specific servers.
|
||||
#
|
||||
# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_server_names = []
|
||||
|
||||
# List of allowed server names via regex patterns that we will allow,
|
||||
# regardless of if they match `forbidden_remote_server_names`.
|
||||
#
|
||||
# This option has no effect if `forbidden_remote_server_names` is empty.
|
||||
#
|
||||
# example: ["goodserver\\.tld$", "goodphrase"]
|
||||
#
|
||||
#allowed_remote_server_names = []
|
||||
|
||||
# Vector list of regex patterns of server names that conduwuit will refuse
|
||||
# to download remote media from.
|
||||
#
|
||||
|
@ -1193,20 +1217,6 @@
|
|||
#
|
||||
#prevent_media_downloads_from = []
|
||||
|
||||
# List of forbidden server names via regex patterns that we will block
|
||||
# incoming AND outgoing federation with, and block client room joins /
|
||||
# remote user invites.
|
||||
#
|
||||
# This check is applied on the room ID, room alias, sender server name,
|
||||
# sender user's server name, inbound federation X-Matrix origin, and
|
||||
# outbound federation handler.
|
||||
#
|
||||
# Basically "global" ACLs.
|
||||
#
|
||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
#
|
||||
#forbidden_remote_server_names = []
|
||||
|
||||
# List of forbidden server names via regex patterns that we will block all
|
||||
# outgoing federated room directory requests for. Useful for preventing
|
||||
# our users from wandering into bad servers or spaces.
|
||||
|
@ -1215,6 +1225,29 @@
|
|||
#
|
||||
#forbidden_remote_room_directory_server_names = []
|
||||
|
||||
# Vector list of regex patterns of server names that conduwuit will not
|
||||
# send messages to the client from.
|
||||
#
|
||||
# Note that there is no way for clients to receive messages once a server
|
||||
# has become unignored without doing a full sync. This is a protocol
|
||||
# limitation with the current sync protocols. This means this is somewhat
|
||||
# of a nuclear option.
|
||||
#
|
||||
# example: ["reallybadserver\.tld$", "reallybadphrase",
|
||||
# "69dollarfortnitecards"]
|
||||
#
|
||||
#ignore_messages_from_server_names = []
|
||||
|
||||
# Send messages from users that the user has ignored to the client.
|
||||
#
|
||||
# There is no way for clients to receive messages sent while a user was
|
||||
# ignored without doing a full sync. This is a protocol limitation with
|
||||
# the current sync protocols. Disabling this option will move
|
||||
# responsibility of ignoring messages to the client, which can avoid this
|
||||
# limitation.
|
||||
#
|
||||
#send_messages_from_ignored_users_to_client = false
|
||||
|
||||
# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
||||
# do not want conduwuit to send outbound requests to. Defaults to
|
||||
# RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
||||
|
|
2
debian/conduwuit.service
vendored
2
debian/conduwuit.service
vendored
|
@ -3,7 +3,7 @@ Description=conduwuit Matrix homeserver
|
|||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Alias=matrix-conduwuit.service
|
||||
Documentation=https://conduwuit.puppyirl.gay/
|
||||
Documentation=https://continuwuity.org/
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
|
|
|
@ -44,15 +44,11 @@ ENV CARGO_SBOM_VERSION=0.9.1
|
|||
# renovate: datasource=crate depName=lddtree
|
||||
ENV LDDTREE_VERSION=0.3.7
|
||||
|
||||
# renovate: datasource=crate depName=timelord-cli
|
||||
ENV TIMELORD_VERSION=3.0.1
|
||||
|
||||
# Install unpackaged tools
|
||||
RUN <<EOF
|
||||
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
||||
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
||||
cargo binstall --no-confirm timelord-cli --version $TIMELORD_VERSION
|
||||
EOF
|
||||
|
||||
# Set up xx (cross-compilation scripts)
|
||||
|
@ -134,10 +130,6 @@ RUN xx-cargo --print-target-triple
|
|||
# Get source
|
||||
COPY . .
|
||||
|
||||
# Timelord sync
|
||||
RUN --mount=type=cache,target=/timelord/ \
|
||||
timelord sync --source-dir . --cache-dir /timelord/
|
||||
|
||||
# Build the binary
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
|
|
|
@ -19,4 +19,4 @@
|
|||
- [Contributing](contributing.md)
|
||||
- [Testing](development/testing.md)
|
||||
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
||||
- [conduwuit Community Code of Conduct](conduwuit_coc.md)
|
||||
- [Community (and Guidelines)](community.md)
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
## Getting help
|
||||
|
||||
If you run into any problems while setting up an Appservice: ask us in
|
||||
[#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) or
|
||||
[open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new).
|
||||
[#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or
|
||||
[open an issue on Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new).
|
||||
|
||||
## Set up the appservice - general instructions
|
||||
|
||||
|
@ -14,7 +14,7 @@ later starting it.
|
|||
|
||||
At some point the appservice guide should ask you to add a registration yaml
|
||||
file to the homeserver. In Synapse you would do this by adding the path to the
|
||||
homeserver.yaml, but in conduwuit you can do this from within Matrix:
|
||||
homeserver.yaml, but in Continuwuity you can do this from within Matrix:
|
||||
|
||||
First, go into the `#admins` room of your homeserver. The first person that
|
||||
registered on the homeserver automatically joins it. Then send a message into
|
||||
|
@ -37,9 +37,9 @@ You can confirm it worked by sending a message like this:
|
|||
|
||||
The server bot should answer with `Appservices (1): your-bridge`
|
||||
|
||||
Then you are done. conduwuit will send messages to the appservices and the
|
||||
Then you are done. Continuwuity will send messages to the appservices and the
|
||||
appservice can send requests to the homeserver. You don't need to restart
|
||||
conduwuit, but if it doesn't work, restarting while the appservice is running
|
||||
Continuwuity, but if it doesn't work, restarting while the appservice is running
|
||||
could help.
|
||||
|
||||
## Appservice-specific instructions
|
||||
|
|
139
docs/community.md
Normal file
139
docs/community.md
Normal file
|
@ -0,0 +1,139 @@
|
|||
# Continuwuity Community Guidelines
|
||||
|
||||
Welcome to the Continuwuity commuwunity! We're excited to have you here. Continuwuity is a
|
||||
continuation of the conduwuit homeserver, which in turn is a hard-fork of the Conduit homeserver,
|
||||
aimed at making Matrix more accessible and inclusive for everyone.
|
||||
|
||||
This space is dedicated to fostering a positive, supportive, and welcoming environment for everyone.
|
||||
These guidelines apply to all Continuwuity spaces, including our Matrix rooms and any other
|
||||
community channels that reference them. We've written these guidelines to help us all create an
|
||||
environment where everyone feels safe and respected.
|
||||
|
||||
For code and contribution guidelines, please refer to the
|
||||
[Contributor's Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md).
|
||||
Below are additional guidelines specific to the Continuwuity community.
|
||||
|
||||
## Our Values and Expected Behaviors
|
||||
|
||||
We strive to create a community based on mutual respect, collaboration, and inclusivity. We expect
|
||||
all members to:
|
||||
|
||||
1. **Be Respectful and Inclusive**: Treat everyone with respect. We're committed to a community
|
||||
where everyone feels safe, regardless of background, identity, or experience. Discrimination,
|
||||
harassment, or hate speech won't be tolerated. Remember that each person experiences the world
|
||||
differently; share your own perspective and be open to learning about others'.
|
||||
|
||||
2. **Be Positive and Constructive**: Engage in discussions constructively and support each other.
|
||||
If you feel angry or frustrated, take a break before participating. Approach disagreements with
|
||||
the goal of understanding, not winning. Focus on the issue, not the person.
|
||||
|
||||
3. **Communicate Clearly and Kindly**: Our community includes neurodivergent individuals and those
|
||||
who may not appreciate sarcasm or subtlety. Communicate clearly and kindly. Avoid ambiguity and
|
||||
ensure your messages can be easily understood by all. Avoid placing the burden of education on
|
||||
marginalized groups; please make an effort to look into your questions before asking others for
|
||||
detailed explanations.
|
||||
|
||||
4. **Be Open to Improving Inclusivity**: Actively participate in making our community more inclusive.
|
||||
Report behaviour that contradicts these guidelines (see Reporting and Enforcement below) and be
|
||||
open to constructive feedback aimed at improving our community. Understand that discussing
|
||||
negative experiences can be emotionally taxing; focus on the message, not the tone.
|
||||
|
||||
5. **Commit to Our Values**: Building an inclusive community requires ongoing effort from everyone.
|
||||
Recognise that addressing bias and discrimination is a continuous process that needs commitment
|
||||
and action from all members.
|
||||
|
||||
## Unacceptable Behaviors
|
||||
|
||||
To ensure everyone feels safe and welcome, the following behaviors are considered unacceptable
|
||||
within the Continuwuity community:
|
||||
|
||||
* **Harassment and Discrimination**: Avoid offensive comments related to background, family status,
|
||||
gender, gender identity or expression, marital status, sex, sexual orientation, native language,
|
||||
age, ability, race and/or ethnicity, caste, national origin, socioeconomic status, religion,
|
||||
geographic location, or any other dimension of diversity. Don't deliberately misgender someone or
|
||||
question the legitimacy of their gender identity.
|
||||
|
||||
* **Violence and Threats**: Do not engage in any form of violence or threats, including inciting
|
||||
violence towards anyone or encouraging self-harm. Posting or threatening to post someone else's
|
||||
personally identifying information ("doxxing") is also forbidden.
|
||||
|
||||
* **Personal Attacks**: Disagreements happen, but they should never turn into personal attacks.
|
||||
Don't insult, demean, or belittle others.
|
||||
|
||||
* **Unwelcome Attention or Contact**: Avoid unwelcome sexual attention, inappropriate physical
|
||||
contact (or simulation thereof), sexualized comments, jokes, or imagery.
|
||||
|
||||
* **Disruption**: Do not engage in sustained disruption of discussions, events, or other
|
||||
community activities.
|
||||
|
||||
* **Bad Faith Actions**: Do not intentionally make false reports or otherwise abuse the reporting
|
||||
process.
|
||||
|
||||
This is not an exhaustive list. Any behaviour that makes others feel unsafe or unwelcome may be
|
||||
subject to enforcement action.
|
||||
|
||||
## Matrix Community
|
||||
|
||||
These Community Guidelines apply to the entire
|
||||
[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including:
|
||||
|
||||
### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org)
|
||||
|
||||
This room is for support and discussions about Continuwuity. Ask questions, share insights, and help
|
||||
each other out while adhering to these guidelines.
|
||||
|
||||
We ask that this room remain focused on the Continuwuity software specifically: the team are
|
||||
typically happy to engage in conversations about related subjects in the off-topic room.
|
||||
|
||||
### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org)
|
||||
|
||||
For off-topic community conversations about any subject. While this room allows for a wide range of
|
||||
topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid
|
||||
divisive or stressful subjects like specific country/world politics unless handled with exceptional
|
||||
care and respect for diverse viewpoints.
|
||||
|
||||
General topics, such as world events, are welcome as long as they follow the guidelines. If a member
|
||||
of the team asks for the conversation to end, please respect their decision.
|
||||
|
||||
### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org)
|
||||
|
||||
This room is dedicated to discussing active development of Continuwuity, including ongoing issues or
|
||||
code development. Collaboration here must follow these guidelines, and please consider raising
|
||||
[an issue](https://forgejo.ellis.link/continuwuation/continuwuity/issues) on the repository to help
|
||||
track progress.
|
||||
|
||||
## Reporting and Enforcement
|
||||
|
||||
We take these Community Guidelines seriously to protect our community members. If you witness or
|
||||
experience unacceptable behaviour, or have any other concerns, please report it.
|
||||
|
||||
**How to Report:**
|
||||
|
||||
* **Alert Moderators in the Room:** If you feel comfortable doing so, you can address the issue
|
||||
publicly in the relevant room by mentioning the moderation bot, `@rock:continuwuity.org`, which
|
||||
will immediately alert all available moderators.
|
||||
* **Direct Message:** If you're not comfortable raising the issue publicly, please send a direct
|
||||
message (DM) to one of the room moderators.
|
||||
|
||||
Reports will be handled with discretion. We will investigate promptly and thoroughly.
|
||||
|
||||
**Enforcement Actions:**
|
||||
|
||||
Anyone asked to stop unacceptable behaviour is expected to comply immediately. Failure to do so, or
|
||||
engaging in prohibited behaviour, may result in enforcement action. Moderators may take actions they
|
||||
deem appropriate, including but not limited to:
|
||||
|
||||
1. **Warning**: A direct message or public warning identifying the violation and requesting
|
||||
corrective action.
|
||||
2. **Temporary Mute**: Temporary restriction from participating in discussions for a specified
|
||||
period.
|
||||
3. **Kick or Ban**: Removal from a room (kick) or the entire community space (ban). Egregious or
|
||||
repeated violations may result in an immediate ban. Bans are typically permanent and reviewed
|
||||
only in exceptional circumstances.
|
||||
|
||||
Retaliation against those who report concerns in good faith will not be tolerated and will be
|
||||
subject to the same enforcement actions.
|
||||
|
||||
Together, let's build and maintain a community where everyone feels valued, safe, and respected.
|
||||
|
||||
— The Continuwuity Moderation Team
|
|
@ -1,93 +0,0 @@
|
|||
# conduwuit Community Code of Conduct
|
||||
|
||||
Welcome to the conduwuit community! We’re excited to have you here. conduwuit is
|
||||
a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible
|
||||
and inclusive for everyone.
|
||||
|
||||
This space is dedicated to fostering a positive, supportive, and inclusive
|
||||
environment for everyone. This Code of Conduct applies to all conduwuit spaces,
|
||||
including any further community rooms that reference this CoC. Here are our
|
||||
guidelines to help maintain the welcoming atmosphere that sets conduwuit apart.
|
||||
|
||||
For the general foundational rules, please refer to the [Contributor's
|
||||
Covenant](https://github.com/girlbossceo/conduwuit/blob/main/CODE_OF_CONDUCT.md).
|
||||
Below are additional guidelines specific to the conduwuit community.
|
||||
|
||||
## Our Values and Guidelines
|
||||
|
||||
1. **Respect and Inclusivity**: We are committed to maintaining a community
|
||||
where everyone feels safe and respected. Discrimination, harassment, or hate
|
||||
speech of any kind will not be tolerated. Recognise that each community member
|
||||
experiences the world differently based on their past experiences, background,
|
||||
and identity. Share your own experiences and be open to learning about others'
|
||||
diverse perspectives.
|
||||
|
||||
2. **Positivity and Constructiveness**: Engage in constructive discussions and
|
||||
support each other. If you feel angry, negative, or aggressive, take a break
|
||||
until you can participate in a positive and constructive manner. Process intense
|
||||
feelings with a friend or in a private setting before engaging in community
|
||||
conversations to help maintain a supportive and focused environment.
|
||||
|
||||
3. **Clarity and Understanding**: Our community includes neurodivergent
|
||||
individuals and those who may not appreciate sarcasm or subtlety. Communicate
|
||||
clearly and kindly, avoiding sarcasm and ensuring your messages are easily
|
||||
understood by all. Additionally, avoid putting the burden of education on
|
||||
marginalized groups by doing your own research before asking for explanations.
|
||||
|
||||
4. **Be Open to Inclusivity**: Actively engage in conversations about making our
|
||||
community more inclusive. Report discriminatory behavior to the moderators
|
||||
and be open to constructive feedback that aims to improve our community.
|
||||
Understand that discussing discrimination and negative experiences can be
|
||||
emotionally taxing, so focus on the message rather than critiquing the tone
|
||||
used.
|
||||
|
||||
5. **Commit to Inclusivity**: Building an inclusive community requires time,
|
||||
energy, and resources. Recognise that addressing discrimination and bias is
|
||||
an ongoing process that necessitates commitment and action from all community
|
||||
members.
|
||||
|
||||
## Matrix Community
|
||||
|
||||
This Code of Conduct applies to the entire [conduwuit Matrix
|
||||
Space](https://matrix.to/#/#conduwuit-space:puppygock.gay) and its rooms,
|
||||
including:
|
||||
|
||||
### [#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay)
|
||||
|
||||
This room is for support and discussions about conduwuit. Ask questions, share
|
||||
insights, and help each other out.
|
||||
|
||||
### [#conduwuit-offtopic:girlboss.ceo](https://matrix.to/#/#conduwuit-offtopic:girlboss.ceo)
|
||||
|
||||
For off-topic community conversations about any subject. While this room allows
|
||||
for a wide range of topics, the same CoC applies. Keep discussions respectful
|
||||
and inclusive, and avoid divisive subjects like country/world politics. General
|
||||
topics, such as world events, are welcome as long as they follow the CoC.
|
||||
|
||||
### [#conduwuit-dev:puppygock.gay](https://matrix.to/#/#conduwuit-dev:puppygock.gay)
|
||||
|
||||
This room is dedicated to discussing active development of conduwuit. Posting
|
||||
requires an elevated power level, which can be requested in one of the other
|
||||
rooms. Use this space to collaborate and innovate.
|
||||
|
||||
## Enforcement
|
||||
|
||||
We have a zero-tolerance policy for violations of this Code of Conduct. If
|
||||
someone’s behavior makes you uncomfortable, please report it to the moderators.
|
||||
Actions we may take include:
|
||||
|
||||
1. **Warning**: A warning given directly in the room or via a private message
|
||||
from the moderators, identifying the violation and requesting corrective
|
||||
action.
|
||||
2. **Temporary Mute**: Temporary restriction from participating in discussions
|
||||
for a specified period to allow for reflection and cooling off.
|
||||
3. **Kick or Ban**: Egregious behavior may result in an immediate kick or ban to
|
||||
protect other community members. Bans are considered permanent and will only
|
||||
be reversed in exceptional circumstances after proven good behavior.
|
||||
|
||||
Please highlight issues directly in rooms when possible, but if you don't feel
|
||||
comfortable doing that, then please send a DM to one of the moderators directly.
|
||||
|
||||
Together, let’s build a community where everyone feels valued and respected.
|
||||
|
||||
— The conduwuit Moderation Team
|
|
@ -1,10 +1,10 @@
|
|||
# Configuration
|
||||
|
||||
This chapter describes various ways to configure conduwuit.
|
||||
This chapter describes various ways to configure Continuwuity.
|
||||
|
||||
## Basics
|
||||
|
||||
conduwuit uses a config file for the majority of the settings, but also supports
|
||||
Continuwuity uses a config file for the majority of the settings, but also supports
|
||||
setting individual config options via commandline.
|
||||
|
||||
Please refer to the [example config
|
||||
|
@ -12,13 +12,13 @@ file](./configuration/examples.md#example-configuration) for all of those
|
|||
settings.
|
||||
|
||||
The config file to use can be specified on the commandline when running
|
||||
conduwuit by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||
Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||
the environment variable `CONDUWUIT_CONFIG` to specify the config file to used.
|
||||
Conduit's environment variables are supported for backwards compatibility.
|
||||
|
||||
## Option commandline flag
|
||||
|
||||
conduwuit supports setting individual config options in TOML format from the
|
||||
Continuwuity supports setting individual config options in TOML format from the
|
||||
`-O` / `--option` flag. For example, you can set your server name via `-O
|
||||
server_name=\"example.com\"`.
|
||||
|
||||
|
@ -33,7 +33,7 @@ string. This does not apply to options that take booleans or numbers:
|
|||
|
||||
## Execute commandline flag
|
||||
|
||||
conduwuit supports running admin commands on startup using the commandline
|
||||
Continuwuity supports running admin commands on startup using the commandline
|
||||
argument `--execute`. The most notable use for this is to create an admin user
|
||||
on first startup.
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# Deploying
|
||||
|
||||
This chapter describes various ways to deploy conduwuit.
|
||||
This chapter describes various ways to deploy Continuwuity.
|
||||
|
|
|
@ -1,15 +1,3 @@
|
|||
# conduwuit for Arch Linux
|
||||
# Continuwuity for Arch Linux
|
||||
|
||||
Currently conduwuit is only on the Arch User Repository (AUR).
|
||||
|
||||
The conduwuit AUR packages are community maintained and are not maintained by
|
||||
conduwuit development team, but the AUR package maintainers are in the Matrix
|
||||
room. Please attempt to verify your AUR package's PKGBUILD file looks fine
|
||||
before asking for support.
|
||||
|
||||
- [conduwuit](https://aur.archlinux.org/packages/conduwuit) - latest tagged
|
||||
conduwuit
|
||||
- [conduwuit-git](https://aur.archlinux.org/packages/conduwuit-git) - latest git
|
||||
conduwuit from `main` branch
|
||||
- [conduwuit-bin](https://aur.archlinux.org/packages/conduwuit-bin) - latest
|
||||
tagged conduwuit static binary
|
||||
Continuwuity does not have any Arch Linux packages at this time.
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
# conduwuit - Behind Traefik Reverse Proxy
|
||||
# Continuwuity - Behind Traefik Reverse Proxy
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- db:/var/lib/conduwuit
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||
networks:
|
||||
- proxy
|
||||
|
@ -35,14 +36,14 @@ services:
|
|||
server=your.server.name.example:443
|
||||
}
|
||||
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
||||
ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
nofile:
|
||||
soft: 1048567
|
||||
hard: 1048567
|
||||
|
||||
### Uncomment if you want to use your own Element-Web App.
|
||||
### Note: You need to provide a config.json for Element and you also need a second
|
||||
### Domain or Subdomain for the communication between Element and conduwuit
|
||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||
# element-web:
|
||||
# image: vectorim/element-web:latest
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# conduwuit - Traefik Reverse Proxy Labels
|
||||
# Continuwuity - Traefik Reverse Proxy Labels
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
|
@ -6,7 +6,7 @@ services:
|
|||
- "traefik.enable=true"
|
||||
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
||||
|
||||
- "traefik.http.routers.to-conduwuit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which conduwuit is hosted
|
||||
- "traefik.http.routers.to-conduwuit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Continuwuity is hosted
|
||||
- "traefik.http.routers.to-conduwuit.tls=true"
|
||||
- "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker"
|
||||
|
@ -16,7 +16,7 @@ services:
|
|||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
|
||||
|
||||
# If you want to have your account on <DOMAIN>, but host conduwuit on a subdomain,
|
||||
# If you want to have your account on <DOMAIN>, but host Continuwuity on a subdomain,
|
||||
# you can let it only handle the well known file on that domain instead
|
||||
#- "traefik.http.routers.to-matrix-wellknown.rule=Host(`<DOMAIN>`) && PathPrefix(`/.well-known/matrix`)"
|
||||
#- "traefik.http.routers.to-matrix-wellknown.tls=true"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
services:
|
||||
caddy:
|
||||
# This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit!
|
||||
# This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity!
|
||||
# For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy
|
||||
image: lucaslorentz/caddy-docker-proxy:ci-alpine
|
||||
ports:
|
||||
|
@ -20,12 +20,13 @@ services:
|
|||
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
|
||||
|
||||
homeserver:
|
||||
### If you already built the conduwuit image with 'docker build' or want to use a registry image,
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- db:/var/lib/conduwuit
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||
environment:
|
||||
CONDUWUIT_SERVER_NAME: example.com # EDIT THIS
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
# conduwuit - Behind Traefik Reverse Proxy
|
||||
# Continuwuity - Behind Traefik Reverse Proxy
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image,
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- db:/var/lib/conduwuit
|
||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||
networks:
|
||||
- proxy
|
||||
|
@ -21,7 +22,7 @@ services:
|
|||
CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
|
||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
||||
#CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above
|
||||
### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too
|
||||
### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too
|
||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||
# CONDUWUIT_LOG: info # default is: "warn,state_res=warn"
|
||||
# CONDUWUIT_ALLOW_ENCRYPTION: 'true'
|
||||
|
@ -43,14 +44,14 @@ services:
|
|||
server=your.server.name.example:443
|
||||
}
|
||||
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
||||
ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||
nofile:
|
||||
soft: 1048567
|
||||
hard: 1048567
|
||||
|
||||
### Uncomment if you want to use your own Element-Web App.
|
||||
### Note: You need to provide a config.json for Element and you also need a second
|
||||
### Domain or Subdomain for the communication between Element and conduwuit
|
||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||
# element-web:
|
||||
# image: vectorim/element-web:latest
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# conduwuit
|
||||
# Continuwuity
|
||||
|
||||
services:
|
||||
homeserver:
|
||||
### If you already built the conduwuit image with 'docker build' or want to use a registry image,
|
||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
||||
### then you are ready to go.
|
||||
image: girlbossceo/conduwuit:latest
|
||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8448:6167
|
||||
|
@ -28,7 +28,7 @@ services:
|
|||
#
|
||||
### Uncomment if you want to use your own Element-Web App.
|
||||
### Note: You need to provide a config.json for Element and you also need a second
|
||||
### Domain or Subdomain for the communication between Element and conduwuit
|
||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||
# element-web:
|
||||
# image: vectorim/element-web:latest
|
||||
|
|
|
@ -1,31 +1,20 @@
|
|||
# conduwuit for Docker
|
||||
# Continuwuity for Docker
|
||||
|
||||
## Docker
|
||||
|
||||
To run conduwuit with Docker you can either build the image yourself or pull it
|
||||
To run Continuwuity with Docker you can either build the image yourself or pull it
|
||||
from a registry.
|
||||
|
||||
### Use a registry
|
||||
|
||||
OCI images for conduwuit are available in the registries listed below.
|
||||
OCI images for Continuwuity are available in the registries listed below.
|
||||
|
||||
| Registry | Image | Size | Notes |
|
||||
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
||||
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||
| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. |
|
||||
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. |
|
||||
| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. |
|
||||
| Registry | Image | Notes |
|
||||
| --------------- | --------------------------------------------------------------- | -----------------------|
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. |
|
||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. |
|
||||
|
||||
[dh]: https://hub.docker.com/r/girlbossceo/conduwuit
|
||||
[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit
|
||||
[gl]: https://gitlab.com/conduwuit/conduwuit/container_registry/6369729
|
||||
[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest
|
||||
[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main
|
||||
|
||||
OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a
|
||||
commit hash/revision or a tagged release: <https://pup.systems/~strawberry/conduwuit/>
|
||||
[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity
|
||||
|
||||
Use
|
||||
|
||||
|
@ -52,11 +41,11 @@ or you can use [docker compose](#docker-compose).
|
|||
The `-d` flag lets the container run in detached mode. You may supply an
|
||||
optional `conduwuit.toml` config file, the example config can be found
|
||||
[here](../configuration/examples.md). You can pass in different env vars to
|
||||
change config values on the fly. You can even configure conduwuit completely by
|
||||
change config values on the fly. You can even configure Continuwuity completely by
|
||||
using env vars. For an overview of possible values, please take a look at the
|
||||
[`docker-compose.yml`](docker-compose.yml) file.
|
||||
|
||||
If you just want to test conduwuit for a short time, you can use the `--rm`
|
||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
||||
flag, which will clean up everything related to your container after you stop
|
||||
it.
|
||||
|
||||
|
@ -91,32 +80,32 @@ docker network create caddy
|
|||
After that, you can rename it so it matches `docker-compose.yml` and spin up the
|
||||
containers!
|
||||
|
||||
Additional info about deploying conduwuit can be found [here](generic.md).
|
||||
Additional info about deploying Continuwuity can be found [here](generic.md).
|
||||
|
||||
### Build
|
||||
|
||||
Official conduwuit images are built using Nix's
|
||||
[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are
|
||||
repeatable and reproducible by anyone, keeps the images lightweight, and can be
|
||||
built offline.
|
||||
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently.
|
||||
|
||||
This also ensures portability of our images because `buildLayeredImage` builds
|
||||
OCI images, not Docker images, and works with other container software.
|
||||
The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd.
|
||||
|
||||
The OCI images are OS-less with only a very minimal environment of the `tini`
|
||||
init system, CA certificates, and the conduwuit binary. This does mean there is
|
||||
not a shell, but in theory you can get a shell by adding the necessary layers
|
||||
to the layered image. However it's very unlikely you will need a shell for any
|
||||
real troubleshooting.
|
||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
|
||||
|
||||
The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def].
|
||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
||||
|
||||
To build an OCI image using Nix, the following outputs can be built:
|
||||
- `nix build -L .#oci-image` (default features, x86_64 glibc)
|
||||
- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl)
|
||||
- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl)
|
||||
- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl)
|
||||
- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl)
|
||||
```bash
|
||||
# Build for the current platform and load into the local Docker daemon
|
||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
||||
|
||||
# Example: Build for specific platforms and push to a registry.
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
||||
|
||||
# Example: Build binary optimized for the current CPU
|
||||
# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile .
|
||||
```
|
||||
|
||||
Refer to the Docker Buildx documentation for more advanced build options.
|
||||
|
||||
[dockerfile-path]: ../../docker/Dockerfile
|
||||
|
||||
### Run
|
||||
|
||||
|
@ -138,10 +127,10 @@ web. With the two provided files,
|
|||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
|
||||
to deploy and use conduwuit, with a little caveat. If you already took a look at
|
||||
to deploy and use Continuwuity, with a little caveat. If you already took a look at
|
||||
the files, then you should have seen the `well-known` service, and that is the
|
||||
little caveat. Traefik is simply a proxy and loadbalancer and is not able to
|
||||
serve any kind of content, but for conduwuit to federate, we need to either
|
||||
serve any kind of content, but for Continuwuity to federate, we need to either
|
||||
expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client`
|
||||
and `.well-known/matrix/server`.
|
||||
|
||||
|
@ -153,4 +142,3 @@ those two files.
|
|||
See the [TURN](../turn.md) page.
|
||||
|
||||
[nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage
|
||||
[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# conduwuit for FreeBSD
|
||||
# Continuwuity for FreeBSD
|
||||
|
||||
conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB.
|
||||
Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
||||
|
||||
Contributions for getting conduwuit packaged are welcome.
|
||||
Contributions for getting Continuwuity packaged are welcome.
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
> ### Getting help
|
||||
>
|
||||
> If you run into any problems while setting up conduwuit, ask us in
|
||||
> `#conduwuit:puppygock.gay` or [open an issue on
|
||||
> GitHub](https://github.com/girlbossceo/conduwuit/issues/new).
|
||||
> If you run into any problems while setting up Continuwuity, ask us in
|
||||
> `#continuwuity:continuwuity.org` or [open an issue on
|
||||
> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new).
|
||||
|
||||
## Installing conduwuit
|
||||
## Installing Continuwuity
|
||||
|
||||
### Static prebuilt binary
|
||||
|
||||
|
@ -14,12 +14,10 @@ You may simply download the binary that fits your machine architecture (x86_64
|
|||
or aarch64). Run `uname -m` to see what you need.
|
||||
|
||||
Prebuilt fully static musl binaries can be downloaded from the latest tagged
|
||||
release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or
|
||||
release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or
|
||||
`main` CI branch workflow artifact output. These also include Debian/Ubuntu
|
||||
packages.
|
||||
|
||||
Binaries are also available on my website directly at: <https://pup.systems/~strawberry/conduwuit/>
|
||||
|
||||
These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit
|
||||
hash/revision, and `releases` are tagged releases. Sort by descending last
|
||||
modified for the latest.
|
||||
|
@ -37,7 +35,7 @@ for performance.
|
|||
### Compiling
|
||||
|
||||
Alternatively, you may compile the binary yourself. We recommend using
|
||||
Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most
|
||||
Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most
|
||||
guaranteed reproducibiltiy and easiest to get a build environment and output
|
||||
going. This also allows easy cross-compilation.
|
||||
|
||||
|
@ -51,35 +49,35 @@ If wanting to build using standard Rust toolchains, make sure you install:
|
|||
- `liburing-dev` on the compiling machine, and `liburing` on the target host
|
||||
- LLVM and libclang for RocksDB
|
||||
|
||||
You can build conduwuit using `cargo build --release --all-features`
|
||||
You can build Continuwuity using `cargo build --release --all-features`
|
||||
|
||||
## Adding a conduwuit user
|
||||
## Adding a Continuwuity user
|
||||
|
||||
While conduwuit can run as any user it is better to use dedicated users for
|
||||
While Continuwuity can run as any user it is better to use dedicated users for
|
||||
different services. This also allows you to make sure that the file permissions
|
||||
are correctly set up.
|
||||
|
||||
In Debian, you can use this command to create a conduwuit user:
|
||||
In Debian, you can use this command to create a Continuwuity user:
|
||||
|
||||
```bash
|
||||
sudo adduser --system conduwuit --group --disabled-login --no-create-home
|
||||
sudo adduser --system continuwuity --group --disabled-login --no-create-home
|
||||
```
|
||||
|
||||
For distros without `adduser` (or where it's a symlink to `useradd`):
|
||||
|
||||
```bash
|
||||
sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit
|
||||
sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity
|
||||
```
|
||||
|
||||
## Forwarding ports in the firewall or the router
|
||||
|
||||
Matrix's default federation port is port 8448, and clients must be using port 443.
|
||||
If you would like to use only port 443, or a different port, you will need to setup
|
||||
delegation. conduwuit has config options for doing delegation, or you can configure
|
||||
delegation. Continuwuity has config options for doing delegation, or you can configure
|
||||
your reverse proxy to manually serve the necessary JSON files to do delegation
|
||||
(see the `[global.well_known]` config section).
|
||||
|
||||
If conduwuit runs behind a router or in a container and has a different public
|
||||
If Continuwuity runs behind a router or in a container and has a different public
|
||||
IP address than the host system these public ports need to be forwarded directly
|
||||
or indirectly to the port mentioned in the config.
|
||||
|
||||
|
@ -94,9 +92,9 @@ on the network level, consider something like NextDNS or Pi-Hole.
|
|||
|
||||
## Setting up a systemd service
|
||||
|
||||
Two example systemd units for conduwuit can be found
|
||||
Two example systemd units for Continuwuity can be found
|
||||
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
|
||||
You may need to change the `ExecStart=` path to where you placed the conduwuit
|
||||
You may need to change the `ExecStart=` path to where you placed the Continuwuity
|
||||
binary if it is not `/usr/bin/conduwuit`.
|
||||
|
||||
On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros
|
||||
|
@ -114,9 +112,9 @@ and entering the following:
|
|||
ReadWritePaths=/path/to/custom/database/path
|
||||
```
|
||||
|
||||
## Creating the conduwuit configuration file
|
||||
## Creating the Continuwuity configuration file
|
||||
|
||||
Now we need to create the conduwuit's config file in
|
||||
Now we need to create the Continuwuity's config file in
|
||||
`/etc/conduwuit/conduwuit.toml`. The example config can be found at
|
||||
[conduwuit-example.toml](../configuration/examples.md).
|
||||
|
||||
|
@ -127,7 +125,7 @@ RocksDB is the only supported database backend.
|
|||
|
||||
## Setting the correct file permissions
|
||||
|
||||
If you are using a dedicated user for conduwuit, you will need to allow it to
|
||||
If you are using a dedicated user for Continuwuity, you will need to allow it to
|
||||
read the config. To do that you can run this:
|
||||
|
||||
```bash
|
||||
|
@ -139,7 +137,7 @@ If you use the default database path you also need to run this:
|
|||
|
||||
```bash
|
||||
sudo mkdir -p /var/lib/conduwuit/
|
||||
sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/
|
||||
sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/
|
||||
sudo chmod 700 /var/lib/conduwuit/
|
||||
```
|
||||
|
||||
|
@ -174,13 +172,13 @@ As we would prefer our users to use Caddy, we will not provide configuration fil
|
|||
|
||||
You will need to reverse proxy everything under following routes:
|
||||
- `/_matrix/` - core Matrix C-S and S-S APIs
|
||||
- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and
|
||||
- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and
|
||||
`/server_version`
|
||||
|
||||
You can optionally reverse proxy the following individual routes:
|
||||
- `/.well-known/matrix/client` and `/.well-known/matrix/server` if using
|
||||
conduwuit to perform delegation (see the `[global.well_known]` config section)
|
||||
- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin
|
||||
Continuwuity to perform delegation (see the `[global.well_known]` config section)
|
||||
- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin
|
||||
contact and support page (formerly known as MSC1929)
|
||||
- `/` if you would like to see `hewwo from conduwuit woof!` at the root
|
||||
|
||||
|
@ -200,7 +198,7 @@ header, making federation non-functional. If a workaround is found, feel free to
|
|||
|
||||
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
|
||||
|
||||
If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so:
|
||||
If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so:
|
||||
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
||||
- `proxy_pass http://127.0.0.1:6167;`
|
||||
|
||||
|
@ -209,7 +207,7 @@ Nginx users need to increase `client_max_body_size` (default is 1M) to match
|
|||
|
||||
## You're done
|
||||
|
||||
Now you can start conduwuit with:
|
||||
Now you can start Continuwuity with:
|
||||
|
||||
```bash
|
||||
sudo systemctl start conduwuit
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
# conduwuit for Kubernetes
|
||||
# Continuwuity for Kubernetes
|
||||
|
||||
conduwuit doesn't support horizontal scalability or distributed loading
|
||||
Continuwuity doesn't support horizontal scalability or distributed loading
|
||||
natively, however a community maintained Helm Chart is available here to run
|
||||
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
||||
|
||||
Should changes need to be made, please reach out to the maintainer in our
|
||||
Matrix room as this is not maintained/controlled by the conduwuit maintainers.
|
||||
This should be compatible with continuwuity, but you will need to change the image reference.
|
||||
|
||||
Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers.
|
||||
|
|
|
@ -1,66 +1,33 @@
|
|||
# conduwuit for NixOS
|
||||
# Continuwuity for NixOS
|
||||
|
||||
conduwuit can be acquired by Nix (or [Lix][lix]) from various places:
|
||||
Continuwuity can be acquired by Nix (or [Lix][lix]) from various places:
|
||||
|
||||
* The `flake.nix` at the root of the repo
|
||||
* The `default.nix` at the root of the repo
|
||||
* From conduwuit's binary cache
|
||||
|
||||
A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit)
|
||||
|
||||
### Binary cache
|
||||
|
||||
A binary cache for conduwuit that the CI/CD publishes to is available at the
|
||||
following places (both are the same just different names):
|
||||
|
||||
```
|
||||
https://attic.kennel.juneis.dog/conduit
|
||||
conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=
|
||||
|
||||
https://attic.kennel.juneis.dog/conduwuit
|
||||
conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=
|
||||
```
|
||||
|
||||
The binary caches were recreated some months ago due to attic issues. The old public
|
||||
keys were:
|
||||
|
||||
```
|
||||
conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg=
|
||||
conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw=
|
||||
```
|
||||
|
||||
If needed, we have a binary cache on Cachix but it is only limited to 5GB:
|
||||
|
||||
```
|
||||
https://conduwuit.cachix.org
|
||||
conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||
```
|
||||
|
||||
If specifying a Git remote URL in your flake, you can use any remotes that
|
||||
are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit`
|
||||
* From Continuwuity's binary cache
|
||||
|
||||
### NixOS module
|
||||
|
||||
The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions
|
||||
welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure
|
||||
conduwuit.
|
||||
Continuwuity.
|
||||
|
||||
### Conduit NixOS Config Module and SQLite
|
||||
|
||||
Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend.
|
||||
Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB.
|
||||
Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB.
|
||||
Make sure that you are using the RocksDB backend before migrating!
|
||||
|
||||
There is a [tool to migrate a Conduit SQLite database to
|
||||
RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
|
||||
|
||||
If you want to run the latest code, you should get conduwuit from the `flake.nix`
|
||||
If you want to run the latest code, you should get Continuwuity from the `flake.nix`
|
||||
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
||||
appropriately to use conduwuit instead of Conduit.
|
||||
appropriately to use Continuwuity instead of Conduit.
|
||||
|
||||
### UNIX sockets
|
||||
|
||||
Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module
|
||||
Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module
|
||||
a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX
|
||||
socket option does not exist in Conduit, and the module forcibly sets the `address` and
|
||||
`port` config options.
|
||||
|
@ -84,13 +51,13 @@ disallows the namespace from accessing or creating UNIX sockets and has to be en
|
|||
systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ];
|
||||
```
|
||||
|
||||
Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and
|
||||
Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and
|
||||
published by the community, would be appreciated.
|
||||
|
||||
### jemalloc and hardened profile
|
||||
|
||||
conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
|
||||
due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or
|
||||
Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
|
||||
due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or
|
||||
disable jemalloc like so:
|
||||
|
||||
```nix
|
||||
|
|
|
@ -4,9 +4,9 @@ Information about developing the project. If you are only interested in using
|
|||
it, you can safely ignore this page. If you plan on contributing, see the
|
||||
[contributor's guide](./contributing.md).
|
||||
|
||||
## conduwuit project layout
|
||||
## Continuwuity project layout
|
||||
|
||||
conduwuit uses a collection of sub-crates, packages, or workspace members
|
||||
Continuwuity uses a collection of sub-crates, packages, or workspace members
|
||||
that indicate what each general area of code is for. All of the workspace
|
||||
members are under `src/`. The workspace definition is at the top level / root
|
||||
`Cargo.toml`.
|
||||
|
@ -14,11 +14,11 @@ members are under `src/`. The workspace definition is at the top level / root
|
|||
The crate names are generally self-explanatory:
|
||||
- `admin` is the admin room
|
||||
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
||||
- `core` is core conduwuit functionality like config loading, error definitions,
|
||||
- `core` is core Continuwuity functionality like config loading, error definitions,
|
||||
global utilities, logging infrastructure, etc
|
||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
|
||||
utilities, or functions
|
||||
- `macros` are conduwuit Rust [macros][macros] like general helper macros, logging
|
||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging
|
||||
and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
|
||||
used for admin room commands and others
|
||||
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
||||
|
@ -35,7 +35,7 @@ if you truly find yourself needing to, we recommend reaching out to us in
|
|||
the Matrix room for discussions about it beforehand.
|
||||
|
||||
The primary inspiration for this design was apart of hot reloadable development,
|
||||
to support "conduwuit as a library" where specific parts can simply be swapped out.
|
||||
to support "Continuwuity as a library" where specific parts can simply be swapped out.
|
||||
There is evidence Conduit wanted to go this route too as `axum` is technically an
|
||||
optional feature in Conduit, and can be compiled without the binary or axum library
|
||||
for handling inbound web requests; but it was never completed or worked.
|
||||
|
@ -68,10 +68,10 @@ do this if Rust supported workspace-level features to begin with.
|
|||
|
||||
## List of forked dependencies
|
||||
|
||||
During conduwuit development, we have had to fork
|
||||
During Continuwuity development, we have had to fork
|
||||
some dependencies to support our use-cases in some areas. This ranges from
|
||||
things said upstream project won't accept for any reason, faster-paced
|
||||
development (unresponsive or slow upstream), conduwuit-specific usecases, or
|
||||
development (unresponsive or slow upstream), Continuwuity-specific usecases, or
|
||||
lack of time to upstream some things.
|
||||
|
||||
- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance
|
||||
|
@ -84,7 +84,7 @@ builds seem to be broken on upstream, fixes some broken/suspicious code in
|
|||
places, additional safety measures, and support redzones for Valgrind
|
||||
- [zyansheep/rustyline-async][4]:
|
||||
<https://github.com/girlbossceo/rustyline-async> - tab completion callback and
|
||||
`CTRL+\` signal quit event for conduwuit console CLI
|
||||
`CTRL+\` signal quit event for Continuwuity console CLI
|
||||
- [rust-rocksdb/rust-rocksdb][5]:
|
||||
<https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`][8]'s fork
|
||||
has quicker updates, more up to date dependencies, etc. Our fork fixes musl build
|
||||
|
@ -97,7 +97,7 @@ alongside other logging/metrics things
|
|||
## Debugging with `tokio-console`
|
||||
|
||||
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
||||
`tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature,
|
||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature,
|
||||
disable the default `release_max_log_level` feature, and set the `--cfg
|
||||
tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
||||
this:
|
||||
|
@ -109,7 +109,7 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
|||
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
||||
```
|
||||
|
||||
You will also need to enable the `tokio_console` config option in conduwuit when
|
||||
You will also need to enable the `tokio_console` config option in Continuwuity when
|
||||
starting it. This was due to tokio-console causing gradual memory leak/usage
|
||||
if left enabled.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ guaranteed to work at this time.
|
|||
|
||||
### Summary
|
||||
|
||||
When developing in debug-builds with the nightly toolchain, conduwuit is modular
|
||||
When developing in debug-builds with the nightly toolchain, Continuwuity is modular
|
||||
using dynamic libraries and various parts of the application are hot-reloadable
|
||||
while the server is running: http api handlers, admin commands, services,
|
||||
database, etc. These are all split up into individual workspace crates as seen
|
||||
|
@ -42,7 +42,7 @@ library, macOS, and likely other host architectures are not supported (if other
|
|||
architectures work, feel free to let us know and/or make a PR updating this).
|
||||
This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you
|
||||
happen to have linker issues it's recommended to try using `mold` or `gold`
|
||||
linkers, and please let us know in the [conduwuit Matrix room][7] the linker
|
||||
linkers, and please let us know in the [Continuwuity Matrix room][7] the linker
|
||||
error and what linker solved this issue so we can figure out a solution. Ideally
|
||||
there should be minimal friction to using this, and in the future a build script
|
||||
(`build.rs`) may be suitable to making this easier to use if the capabilities
|
||||
|
@ -52,13 +52,13 @@ allow us.
|
|||
|
||||
As of 19 May 2024, the instructions for using this are:
|
||||
|
||||
0. Have patience. Don't hesitate to join the [conduwuit Matrix room][7] to
|
||||
0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to
|
||||
receive help using this. As indicated by the various rustflags used and some
|
||||
of the interesting issues linked at the bottom, this is definitely not something
|
||||
the Rust ecosystem or toolchain is used to doing.
|
||||
|
||||
1. Install the nightly toolchain using rustup. You may need to use `rustup
|
||||
override set nightly` in your local conduwuit directory, or use `cargo
|
||||
override set nightly` in your local Continuwuity directory, or use `cargo
|
||||
+nightly` for all actions.
|
||||
|
||||
2. Uncomment `cargo-features` at the top level / root Cargo.toml
|
||||
|
@ -85,14 +85,14 @@ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown
|
|||
Cargo should only rebuild what was changed / what's necessary, so it should
|
||||
not be rebuilding all the crates.
|
||||
|
||||
9. In your conduwuit server terminal, hit/send `CTRL+C` signal. This will tell
|
||||
conduwuit to find which libraries need to be reloaded, and reloads them as
|
||||
9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell
|
||||
Continuwuity to find which libraries need to be reloaded, and reloads them as
|
||||
necessary.
|
||||
|
||||
10. If there were no errors, it will tell you it successfully reloaded `#`
|
||||
modules, and your changes should now be visible. Repeat 7 - 9 as needed.
|
||||
|
||||
To shutdown conduwuit in this setup, hit/send `CTRL+\`. Normal builds still
|
||||
To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still
|
||||
shutdown with `CTRL+C` as usual.
|
||||
|
||||
Steps 1 - 5 are the initial first-time steps for using this. To remove the hot
|
||||
|
@ -101,7 +101,7 @@ reload setup, revert/comment all the Cargo.toml changes.
|
|||
As mentioned in the requirements section, if you happen to have some linker
|
||||
issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the
|
||||
`rustflags` definitions in the top level Cargo.toml, and please let us know in
|
||||
the [conduwuit Matrix room][7] the problem. mold can be installed typically
|
||||
the [Continuwuity Matrix room][7] the problem. mold can be installed typically
|
||||
through your distro, and gold is provided by the binutils package.
|
||||
|
||||
It's possible a helper script can be made to do all of this, or most preferably
|
||||
|
@ -136,7 +136,7 @@ acyclic graph. The primary rule is simple and illustrated in the figure below:
|
|||
**no crate is allowed to call a function or use a variable from a crate below
|
||||
it.**
|
||||
|
||||

|
||||
|
||||
When a symbol is referenced between crates they become bound: **crates cannot be
|
||||
|
@ -147,7 +147,7 @@ by using an `RTLD_LOCAL` binding for just one link between the main executable
|
|||
and the first crate, freeing the executable from all modules as no global
|
||||
binding ever occurs between them.
|
||||
|
||||

|
||||
|
||||
Proper resource management is essential for reliable reloading to occur. This is
|
||||
|
@ -196,5 +196,5 @@ The initial implementation PR is available [here][1].
|
|||
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049
|
||||
[5]: https://github.com/rust-lang/cargo/issues/12746
|
||||
[6]: https://crates.io/crates/hot-lib-reloader/
|
||||
[7]: https://matrix.to/#/#conduwuit:puppygock.gay
|
||||
[7]: https://matrix.to/#/#continuwuity:continuwuity.org
|
||||
[8]: https://crates.io/crates/libloading
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# conduwuit
|
||||
# Continuwuity
|
||||
|
||||
{{#include ../README.md:catchphrase}}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
|||
|
||||
- [Deployment options](deploying.md)
|
||||
|
||||
If you want to connect an appservice to conduwuit, take a look at the
|
||||
If you want to connect an appservice to Continuwuity, take a look at the
|
||||
[appservices documentation](appservices.md).
|
||||
|
||||
#### How can I contribute?
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
# Maintaining your conduwuit setup
|
||||
# Maintaining your Continuwuity setup
|
||||
|
||||
## Moderation
|
||||
|
||||
conduwuit has moderation through admin room commands. "binary commands" (medium
|
||||
Continuwuity has moderation through admin room commands. "binary commands" (medium
|
||||
priority) and an admin API (low priority) is planned. Some moderation-related
|
||||
config options are available in the example config such as "global ACLs" and
|
||||
blocking media requests to certain servers. See the example config for the
|
||||
moderation config options under the "Moderation / Privacy / Security" section.
|
||||
|
||||
conduwuit has moderation admin commands for:
|
||||
Continuwuity has moderation admin commands for:
|
||||
|
||||
- managing room aliases (`!admin rooms alias`)
|
||||
- managing room directory (`!admin rooms directory`)
|
||||
|
@ -36,7 +36,7 @@ each object being newline delimited. An example of doing this is:
|
|||
## Database (RocksDB)
|
||||
|
||||
Generally there is very little you need to do. [Compaction][rocksdb-compaction]
|
||||
is ran automatically based on various defined thresholds tuned for conduwuit to
|
||||
is ran automatically based on various defined thresholds tuned for Continuwuity to
|
||||
be high performance with the least I/O amplifcation or overhead. Manually
|
||||
running compaction is not recommended, or compaction via a timer, due to
|
||||
creating unnecessary I/O amplification. RocksDB is built with io_uring support
|
||||
|
@ -50,7 +50,7 @@ Some RocksDB settings can be adjusted such as the compression method chosen. See
|
|||
the RocksDB section in the [example config](configuration/examples.md).
|
||||
|
||||
btrfs users have reported that database compression does not need to be disabled
|
||||
on conduwuit as the filesystem already does not attempt to compress. This can be
|
||||
on Continuwuity as the filesystem already does not attempt to compress. This can be
|
||||
validated by using `filefrag -v` on a `.SST` file in your database, and ensure
|
||||
the `physical_offset` matches (no filesystem compression). It is very important
|
||||
to ensure no additional filesystem compression takes place as this can render
|
||||
|
@ -70,7 +70,7 @@ they're server logs or database logs, however they are critical RocksDB files
|
|||
related to WAL tracking.
|
||||
|
||||
The only safe files that can be deleted are the `LOG` files (all caps). These
|
||||
are the real RocksDB telemetry/log files, however conduwuit has already
|
||||
are the real RocksDB telemetry/log files, however Continuwuity has already
|
||||
configured to only store up to 3 RocksDB `LOG` files due to generall being
|
||||
useless for average users unless troubleshooting something low-level. If you
|
||||
would like to store nearly none at all, see the `rocksdb_max_log_files`
|
||||
|
@ -88,7 +88,7 @@ still be joined together.
|
|||
|
||||
To restore a backup from an online RocksDB backup:
|
||||
|
||||
- shutdown conduwuit
|
||||
- shutdown Continuwuity
|
||||
- create a new directory for merging together the data
|
||||
- in the online backup created, copy all `.sst` files in
|
||||
`$DATABASE_BACKUP_PATH/shared_checksum` to your new directory
|
||||
|
@ -99,9 +99,9 @@ To restore a backup from an online RocksDB backup:
|
|||
if you have multiple) to your new directory
|
||||
- set your `database_path` config option to your new directory, or replace your
|
||||
old one with the new one you crafted
|
||||
- start up conduwuit again and it should open as normal
|
||||
- start up Continuwuity again and it should open as normal
|
||||
|
||||
If you'd like to do an offline backup, shutdown conduwuit and copy your
|
||||
If you'd like to do an offline backup, shutdown Continuwuity and copy your
|
||||
`database_path` directory elsewhere. This can be restored with no modifications
|
||||
needed.
|
||||
|
||||
|
@ -110,7 +110,7 @@ directory.
|
|||
|
||||
## Media
|
||||
|
||||
Media still needs various work, however conduwuit implements media deletion via:
|
||||
Media still needs various work, however Continuwuity implements media deletion via:
|
||||
|
||||
- MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the
|
||||
event)
|
||||
|
@ -118,17 +118,17 @@ event)
|
|||
- Delete remote media in the past `N` seconds/minutes via filesystem metadata on
|
||||
the file created time (`btime`) or file modified time (`mtime`)
|
||||
|
||||
See the `!admin media` command for further information. All media in conduwuit
|
||||
See the `!admin media` command for further information. All media in Continuwuity
|
||||
is stored at `$DATABASE_DIR/media`. This will be configurable soon.
|
||||
|
||||
If you are finding yourself needing extensive granular control over media, we
|
||||
recommend looking into [Matrix Media
|
||||
Repo](https://github.com/t2bot/matrix-media-repo). conduwuit intends to
|
||||
Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to
|
||||
implement various utilities for media, but MMR is dedicated to extensive media
|
||||
management.
|
||||
|
||||
Built-in S3 support is also planned, but for now using a "S3 filesystem" on
|
||||
`media/` works. conduwuit also sends a `Cache-Control` header of 1 year and
|
||||
`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and
|
||||
immutable for all media requests (download and thumbnail) to reduce unnecessary
|
||||
media requests from browsers, reduce bandwidth usage, and reduce load.
|
||||
|
||||
|
|
3
docs/static/_headers
vendored
3
docs/static/_headers
vendored
|
@ -1,3 +1,6 @@
|
|||
/.well-known/matrix/*
|
||||
Access-Control-Allow-Origin: *
|
||||
Content-Type: application/json
|
||||
/.well-known/continuwuity/*
|
||||
Access-Control-Allow-Origin: *
|
||||
Content-Type: application/json
|
9
docs/static/announcements.json
vendored
Normal file
9
docs/static/announcements.json
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"$schema": "https://continuwuity.org/schema/announcements.schema.json",
|
||||
"announcements": [
|
||||
{
|
||||
"id": 1,
|
||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
}
|
||||
]
|
||||
}
|
31
docs/static/announcements.schema.json
vendored
Normal file
31
docs/static/announcements.schema.json
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"$id": "https://continwuity.org/schema/announcements.schema.json",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"updates": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"date": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"message"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"updates"
|
||||
]
|
||||
}
|
24
docs/static/support
vendored
Normal file
24
docs/static/support
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"contacts": [
|
||||
{
|
||||
"email_address": "security@continuwuity.org",
|
||||
"role": "m.role.security"
|
||||
},
|
||||
{
|
||||
"matrix_id": "@tom:continuwuity.org",
|
||||
"email_address": "tom@tcpip.uk",
|
||||
"role": "m.role.admin"
|
||||
},
|
||||
{
|
||||
"matrix_id": "@jade:continuwuity.org",
|
||||
"email_address": "jade@continuwuity.org",
|
||||
"role": "m.role.admin"
|
||||
},
|
||||
{
|
||||
"matrix_id": "@nex:continuwuity.org",
|
||||
"email_address": "nex@continuwuity.org",
|
||||
"role": "m.role.admin"
|
||||
}
|
||||
],
|
||||
"support_page": "https://continuwuity.org/introduction#contact"
|
||||
}
|
|
@ -1,47 +1,48 @@
|
|||
# Troubleshooting conduwuit
|
||||
# Troubleshooting Continuwuity
|
||||
|
||||
> ## Docker users ⚠️
|
||||
> **Docker users ⚠️**
|
||||
>
|
||||
> Docker is extremely UX unfriendly. Because of this, a ton of issues or support
|
||||
> is actually Docker support, not conduwuit support. We also cannot document the
|
||||
> ever-growing list of Docker issues here.
|
||||
>
|
||||
> If you intend on asking for support and you are using Docker, **PLEASE**
|
||||
> triple validate your issues are **NOT** because you have a misconfiguration in
|
||||
> your Docker setup.
|
||||
>
|
||||
> If there are things like Compose file issues or Dockerhub image issues, those
|
||||
> can still be mentioned as long as they're something we can fix.
|
||||
> Docker can be difficult to use and debug. It's common for Docker
|
||||
> misconfigurations to cause issues, particularly with networking and permissions.
|
||||
> Please check that your issues are not due to problems with your Docker setup.
|
||||
|
||||
## conduwuit and Matrix issues
|
||||
## Continuwuity and Matrix issues
|
||||
|
||||
#### Lost access to admin room
|
||||
### Lost access to admin room
|
||||
|
||||
You can reinvite yourself to the admin room through the following methods:
|
||||
- Use the `--execute "users make_user_admin <username>"` conduwuit binary
|
||||
|
||||
- Use the `--execute "users make_user_admin <username>"` Continuwuity binary
|
||||
argument once to invite yourslf to the admin room on startup
|
||||
- Use the conduwuit console/CLI to run the `users make_user_admin` command
|
||||
- Use the Continuwuity console/CLI to run the `users make_user_admin` command
|
||||
- Or specify the `emergency_password` config option to allow you to temporarily
|
||||
log into the server account (`@conduit`) from a web client
|
||||
|
||||
## General potential issues
|
||||
|
||||
#### Potential DNS issues when using Docker
|
||||
### Potential DNS issues when using Docker
|
||||
|
||||
Docker has issues with its default DNS setup that may cause DNS to not be
|
||||
properly functional when running conduwuit, resulting in federation issues. The
|
||||
symptoms of this have shown in excessively long room joins (30+ minutes) from
|
||||
very long DNS timeouts, log entries of "mismatching responding nameservers",
|
||||
Docker's DNS setup for containers in a non-default network intercepts queries to
|
||||
enable resolving of container hostnames to IP addresses. However, due to
|
||||
performance issues with Docker's built-in resolver, this can cause DNS queries
|
||||
to take a long time to resolve, resulting in federation issues.
|
||||
|
||||
This is particularly common with Docker Compose, as custom networks are easily
|
||||
created and configured.
|
||||
|
||||
Symptoms of this include excessively long room joins (30+ minutes) from very
|
||||
long DNS timeouts, log entries of "mismatching responding nameservers",
|
||||
and/or partial or non-functional inbound/outbound federation.
|
||||
|
||||
This is **not** a conduwuit issue, and is purely a Docker issue. It is not
|
||||
sustainable for heavy DNS activity which is normal for Matrix federation. The
|
||||
workarounds for this are:
|
||||
- Use DNS over TCP via the config option `query_over_tcp_only = true`
|
||||
- Don't use Docker's default DNS setup and instead allow the container to use
|
||||
and communicate with your host's DNS servers (host's `/etc/resolv.conf`)
|
||||
This is not a bug in continuwuity. Docker's default DNS resolver is not suitable
|
||||
for heavy DNS activity, which is normal for federated protocols like Matrix.
|
||||
|
||||
#### DNS No connections available error message
|
||||
Workarounds:
|
||||
|
||||
- Use DNS over TCP via the config option `query_over_tcp_only = true`
|
||||
- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`.
|
||||
|
||||
### DNS No connections available error message
|
||||
|
||||
If you receive spurious amounts of error logs saying "DNS No connections
|
||||
available", this is due to your DNS server (servers from `/etc/resolv.conf`)
|
||||
|
@ -64,7 +65,7 @@ very computationally expensive, and is extremely susceptible to denial of
|
|||
service, especially on Matrix. Many servers also strangely have broken DNSSEC
|
||||
setups and will result in non-functional federation.
|
||||
|
||||
conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but
|
||||
Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but
|
||||
the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch]
|
||||
may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors
|
||||
config options and removing the `validator` module.
|
||||
|
@ -75,9 +76,9 @@ high load, and we have identified its DNS caching to not be very effective.
|
|||
dnsmasq can possibly work, but it does **not** support TCP fallback which can be
|
||||
problematic when receiving large DNS responses such as from large SRV records.
|
||||
If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback`
|
||||
in conduwuit config.
|
||||
in Continuwuity config.
|
||||
|
||||
Raising `dns_cache_entries` in conduwuit config from the default can also assist
|
||||
Raising `dns_cache_entries` in Continuwuity config from the default can also assist
|
||||
in DNS caching, but a full-fledged external caching resolver is better and more
|
||||
reliable.
|
||||
|
||||
|
@ -91,13 +92,13 @@ reliability at a slight performance cost due to TCP overhead.
|
|||
|
||||
## RocksDB / database issues
|
||||
|
||||
#### Database corruption
|
||||
### Database corruption
|
||||
|
||||
If your database is corrupted *and* is failing to start (e.g. checksum
|
||||
mismatch), it may be recoverable but careful steps must be taken, and there is
|
||||
no guarantee it may be recoverable.
|
||||
|
||||
The first thing that can be done is launching conduwuit with the
|
||||
The first thing that can be done is launching Continuwuity with the
|
||||
`rocksdb_repair` config option set to true. This will tell RocksDB to attempt to
|
||||
repair itself at launch. If this does not work, disable the option and continue
|
||||
reading.
|
||||
|
@ -109,7 +110,7 @@ RocksDB has the following recovery modes:
|
|||
- `PointInTime`
|
||||
- `SkipAnyCorruptedRecord`
|
||||
|
||||
By default, conduwuit uses `TolerateCorruptedTailRecords` as generally these may
|
||||
By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may
|
||||
be due to bad federation and we can re-fetch the correct data over federation.
|
||||
The RocksDB default is `PointInTime` which will attempt to restore a "snapshot"
|
||||
of the data when it was last known to be good. This data can be either a few
|
||||
|
@ -126,12 +127,12 @@ if `PointInTime` does not work as a last ditch effort.
|
|||
|
||||
With this in mind:
|
||||
|
||||
- First start conduwuit with the `PointInTime` recovery method. See the [example
|
||||
- First start Continuwuity with the `PointInTime` recovery method. See the [example
|
||||
config](configuration/examples.md) for how to do this using
|
||||
`rocksdb_recovery_mode`
|
||||
- If your database successfully opens, clients are recommended to clear their
|
||||
client cache to account for the rollback
|
||||
- Leave your conduwuit running in `PointInTime` for at least 30-60 minutes so as
|
||||
- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as
|
||||
much possible corruption is restored
|
||||
- If all goes will, you should be able to restore back to using
|
||||
`TolerateCorruptedTailRecords` and you have successfully recovered your database
|
||||
|
@ -142,16 +143,16 @@ Note that users should not really be debugging things. If you find yourself
|
|||
debugging and find the issue, please let us know and/or how we can fix it.
|
||||
Various debug commands can be found in `!admin debug`.
|
||||
|
||||
#### Debug/Trace log level
|
||||
### Debug/Trace log level
|
||||
|
||||
conduwuit builds without debug or trace log levels at compile time by default
|
||||
Continuwuity builds without debug or trace log levels at compile time by default
|
||||
for substantial performance gains in CPU usage and improved compile times. If
|
||||
you need to access debug/trace log levels, you will need to build without the
|
||||
`release_max_log_level` feature or use our provided static debug binaries.
|
||||
|
||||
#### Changing log level dynamically
|
||||
### Changing log level dynamically
|
||||
|
||||
conduwuit supports changing the tracing log environment filter on-the-fly using
|
||||
Continuwuity supports changing the tracing log environment filter on-the-fly using
|
||||
the admin command `!admin debug change-log-level <log env filter>`. This accepts
|
||||
a string **without quotes** the same format as the `log` config option.
|
||||
|
||||
|
@ -166,9 +167,9 @@ load, simply pass the `--reset` flag.
|
|||
|
||||
`!admin debug change-log-level --reset`
|
||||
|
||||
#### Pinging servers
|
||||
### Pinging servers
|
||||
|
||||
conduwuit can ping other servers using `!admin debug ping <server>`. This takes
|
||||
Continuwuity can ping other servers using `!admin debug ping <server>`. This takes
|
||||
a server name and goes through the server discovery process and queries
|
||||
`/_matrix/federation/v1/version`. Errors are outputted.
|
||||
|
||||
|
@ -177,15 +178,15 @@ server performance on either side as that endpoint is completely unauthenticated
|
|||
and simply fetches a string on a static JSON endpoint. It is very low cost both
|
||||
bandwidth and computationally.
|
||||
|
||||
#### Allocator memory stats
|
||||
### Allocator memory stats
|
||||
|
||||
When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you
|
||||
can see conduwuit's high-level allocator stats by using
|
||||
can see Continuwuity's high-level allocator stats by using
|
||||
`!admin server memory-usage` at the bottom.
|
||||
|
||||
If you are a developer, you can also view the raw jemalloc statistics with
|
||||
`!admin debug memory-stats`. Please note that this output is extremely large
|
||||
which may only be visible in the conduwuit console CLI due to PDU size limits,
|
||||
which may only be visible in the Continuwuity console CLI due to PDU size limits,
|
||||
and is not easy for non-developers to understand.
|
||||
|
||||
[unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Setting up TURN/STURN
|
||||
|
||||
In order to make or receive calls, a TURN server is required. conduwuit suggests
|
||||
In order to make or receive calls, a TURN server is required. Continuwuity suggests
|
||||
using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also
|
||||
available as a Docker image.
|
||||
|
||||
|
@ -17,9 +17,9 @@ realm=<your server domain>
|
|||
A common way to generate a suitable alphanumeric secret key is by using `pwgen
|
||||
-s 64 1`.
|
||||
|
||||
These same values need to be set in conduwuit. See the [example
|
||||
These same values need to be set in Continuwuity. See the [example
|
||||
config](configuration/examples.md) in the TURN section for configuring these and
|
||||
restart conduwuit after.
|
||||
restart Continuwuity after.
|
||||
|
||||
`turn_secret` or a path to `turn_secret_file` must have a value of your
|
||||
coturn `static-auth-secret`, or use `turn_username` and `turn_password`
|
||||
|
@ -34,7 +34,7 @@ If you are using TURN over TLS, you can replace `turn:` with `turns:` in the
|
|||
TURN over TLS. This is highly recommended.
|
||||
|
||||
If you need unauthenticated access to the TURN URIs, or some clients may be
|
||||
having trouble, you can enable `turn_guest_access` in conduwuit which disables
|
||||
having trouble, you can enable `turn_guest_access` in Continuwuity which disables
|
||||
authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer`
|
||||
|
||||
### Run
|
||||
|
|
|
@ -17,12 +17,61 @@ crate-type = [
|
|||
]
|
||||
|
||||
[features]
|
||||
brotli_compression = [
|
||||
"conduwuit-api/brotli_compression",
|
||||
"conduwuit-core/brotli_compression",
|
||||
"conduwuit-service/brotli_compression",
|
||||
]
|
||||
gzip_compression = [
|
||||
"conduwuit-api/gzip_compression",
|
||||
"conduwuit-core/gzip_compression",
|
||||
"conduwuit-service/gzip_compression",
|
||||
]
|
||||
io_uring = [
|
||||
"conduwuit-api/io_uring",
|
||||
"conduwuit-database/io_uring",
|
||||
"conduwuit-service/io_uring",
|
||||
]
|
||||
jemalloc = [
|
||||
"conduwuit-api/jemalloc",
|
||||
"conduwuit-core/jemalloc",
|
||||
"conduwuit-database/jemalloc",
|
||||
"conduwuit-service/jemalloc",
|
||||
]
|
||||
jemalloc_conf = [
|
||||
"conduwuit-api/jemalloc_conf",
|
||||
"conduwuit-core/jemalloc_conf",
|
||||
"conduwuit-database/jemalloc_conf",
|
||||
"conduwuit-service/jemalloc_conf",
|
||||
]
|
||||
jemalloc_prof = [
|
||||
"conduwuit-api/jemalloc_prof",
|
||||
"conduwuit-core/jemalloc_prof",
|
||||
"conduwuit-database/jemalloc_prof",
|
||||
"conduwuit-service/jemalloc_prof",
|
||||
]
|
||||
jemalloc_stats = [
|
||||
"conduwuit-api/jemalloc_stats",
|
||||
"conduwuit-core/jemalloc_stats",
|
||||
"conduwuit-database/jemalloc_stats",
|
||||
"conduwuit-service/jemalloc_stats",
|
||||
]
|
||||
release_max_log_level = [
|
||||
"conduwuit-api/release_max_log_level",
|
||||
"conduwuit-core/release_max_log_level",
|
||||
"conduwuit-database/release_max_log_level",
|
||||
"conduwuit-service/release_max_log_level",
|
||||
"tracing/max_level_trace",
|
||||
"tracing/release_max_level_info",
|
||||
"log/max_level_trace",
|
||||
"log/release_max_level_info",
|
||||
]
|
||||
zstd_compression = [
|
||||
"conduwuit-api/zstd_compression",
|
||||
"conduwuit-core/zstd_compression",
|
||||
"conduwuit-database/zstd_compression",
|
||||
"conduwuit-service/zstd_compression",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
clap.workspace = true
|
||||
|
|
|
@ -2,7 +2,7 @@ use clap::Parser;
|
|||
use conduwuit::Result;
|
||||
|
||||
use crate::{
|
||||
appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command,
|
||||
appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context,
|
||||
debug, debug::DebugCommand, federation, federation::FederationCommand, media,
|
||||
media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server,
|
||||
server::ServerCommand, user, user::UserCommand,
|
||||
|
@ -49,20 +49,18 @@ pub(super) enum AdminCommand {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all, name = "command")]
|
||||
pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result {
|
||||
pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result {
|
||||
use AdminCommand::*;
|
||||
|
||||
match command {
|
||||
| Appservices(command) => appservice::process(command, context).await?,
|
||||
| Media(command) => media::process(command, context).await?,
|
||||
| Users(command) => user::process(command, context).await?,
|
||||
| Rooms(command) => room::process(command, context).await?,
|
||||
| Federation(command) => federation::process(command, context).await?,
|
||||
| Server(command) => server::process(command, context).await?,
|
||||
| Debug(command) => debug::process(command, context).await?,
|
||||
| Query(command) => query::process(command, context).await?,
|
||||
| Check(command) => check::process(command, context).await?,
|
||||
| Appservices(command) => appservice::process(command, context).await,
|
||||
| Media(command) => media::process(command, context).await,
|
||||
| Users(command) => user::process(command, context).await,
|
||||
| Rooms(command) => room::process(command, context).await,
|
||||
| Federation(command) => federation::process(command, context).await,
|
||||
| Server(command) => server::process(command, context).await,
|
||||
| Debug(command) => debug::process(command, context).await,
|
||||
| Query(command) => query::process(command, context).await,
|
||||
| Check(command) => check::process(command, context).await,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,84 +1,80 @@
|
|||
use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent};
|
||||
use conduwuit::{Err, Result, checked};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
|
||||
use crate::{Result, admin_command};
|
||||
use crate::admin_command;
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn register(&self) -> Result<RoomMessageEventContent> {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
pub(super) async fn register(&self) -> Result {
|
||||
let body = &self.body;
|
||||
let body_len = self.body.len();
|
||||
if body_len < 2
|
||||
|| !body[0].trim().starts_with("```")
|
||||
|| body.last().unwrap_or(&"").trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.");
|
||||
}
|
||||
|
||||
let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
||||
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config_body);
|
||||
let range = 1..checked!(body_len - 1)?;
|
||||
let appservice_config_body = body[range].join("\n");
|
||||
let parsed_config = serde_yaml::from_str(&appservice_config_body);
|
||||
match parsed_config {
|
||||
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
|
||||
| Ok(registration) => match self
|
||||
.services
|
||||
.appservice
|
||||
.register_appservice(®istration, &appservice_config_body)
|
||||
.await
|
||||
.map(|()| registration.id)
|
||||
{
|
||||
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Appservice registered with ID: {}",
|
||||
registration.id
|
||||
))),
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to register appservice: {e}"
|
||||
))),
|
||||
| Err(e) => return Err!("Failed to register appservice: {e}"),
|
||||
| Ok(id) => write!(self, "Appservice registered with ID: {id}"),
|
||||
},
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Could not parse appservice config as YAML: {e}"
|
||||
))),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn unregister(
|
||||
&self,
|
||||
appservice_identifier: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn unregister(&self, appservice_identifier: String) -> Result {
|
||||
match self
|
||||
.services
|
||||
.appservice
|
||||
.unregister_appservice(&appservice_identifier)
|
||||
.await
|
||||
{
|
||||
| Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")),
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to unregister appservice: {e}"
|
||||
))),
|
||||
| Err(e) => return Err!("Failed to unregister appservice: {e}"),
|
||||
| Ok(()) => write!(self, "Appservice unregistered."),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn show_appservice_config(
|
||||
&self,
|
||||
appservice_identifier: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result {
|
||||
match self
|
||||
.services
|
||||
.appservice
|
||||
.get_registration(&appservice_identifier)
|
||||
.await
|
||||
{
|
||||
| None => return Err!("Appservice does not exist."),
|
||||
| Some(config) => {
|
||||
let config_str = serde_yaml::to_string(&config)
|
||||
.expect("config should've been validated on register");
|
||||
let output =
|
||||
format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",);
|
||||
Ok(RoomMessageEventContent::notice_markdown(output))
|
||||
let config_str = serde_yaml::to_string(&config)?;
|
||||
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
|
||||
},
|
||||
| None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_registered(&self) -> Result<RoomMessageEventContent> {
|
||||
let appservices = self.services.appservice.iter_ids().await;
|
||||
let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", "));
|
||||
Ok(RoomMessageEventContent::text_plain(output))
|
||||
pub(super) async fn list_registered(&self) -> Result {
|
||||
self.services
|
||||
.appservice
|
||||
.iter_ids()
|
||||
.collect()
|
||||
.map(Ok)
|
||||
.and_then(|appservices: Vec<_>| {
|
||||
let len = appservices.len();
|
||||
let list = appservices.join(", ");
|
||||
write!(self, "Appservices ({len}): {list}")
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
use conduwuit::Result;
|
||||
use conduwuit_macros::implement;
|
||||
use futures::StreamExt;
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
/// Uses the iterator in `src/database/key_value/users.rs` to iterator over
|
||||
/// every user in our database (remote and local). Reports total count, any
|
||||
/// errors if there were any, etc
|
||||
#[implement(Command, params = "<'_>")]
|
||||
pub(super) async fn check_all_users(&self) -> Result<RoomMessageEventContent> {
|
||||
#[implement(Context, params = "<'_>")]
|
||||
pub(super) async fn check_all_users(&self) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let users = self.services.users.iter().collect::<Vec<_>>().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
@ -18,11 +17,10 @@ pub(super) async fn check_all_users(&self) -> Result<RoomMessageEventContent> {
|
|||
let err_count = users.iter().filter(|_user| false).count();
|
||||
let ok_count = users.iter().filter(|_user| true).count();
|
||||
|
||||
let message = format!(
|
||||
self.write_str(&format!(
|
||||
"Database query completed in {query_time:?}:\n\n```\nTotal entries: \
|
||||
{total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \
|
||||
{ok_count:?}\n```"
|
||||
);
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(message))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -3,13 +3,13 @@ use std::{fmt, time::SystemTime};
|
|||
use conduwuit::Result;
|
||||
use conduwuit_service::Services;
|
||||
use futures::{
|
||||
Future, FutureExt,
|
||||
Future, FutureExt, TryFutureExt,
|
||||
io::{AsyncWriteExt, BufWriter},
|
||||
lock::Mutex,
|
||||
};
|
||||
use ruma::EventId;
|
||||
|
||||
pub(crate) struct Command<'a> {
|
||||
pub(crate) struct Context<'a> {
|
||||
pub(crate) services: &'a Services,
|
||||
pub(crate) body: &'a [&'a str],
|
||||
pub(crate) timer: SystemTime,
|
||||
|
@ -17,14 +17,14 @@ pub(crate) struct Command<'a> {
|
|||
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Command<'_> {
|
||||
impl Context<'_> {
|
||||
pub(crate) fn write_fmt(
|
||||
&self,
|
||||
arguments: fmt::Arguments<'_>,
|
||||
) -> impl Future<Output = Result> + Send + '_ + use<'_> {
|
||||
let buf = format!("{arguments}");
|
||||
self.output.lock().then(|mut output| async move {
|
||||
output.write_all(buf.as_bytes()).await.map_err(Into::into)
|
||||
self.output.lock().then(async move |mut output| {
|
||||
output.write_all(buf.as_bytes()).map_err(Into::into).await
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -32,8 +32,8 @@ impl Command<'_> {
|
|||
&'a self,
|
||||
s: &'a str,
|
||||
) -> impl Future<Output = Result> + Send + 'a {
|
||||
self.output.lock().then(move |mut output| async move {
|
||||
output.write_all(s.as_bytes()).await.map_err(Into::into)
|
||||
self.output.lock().then(async move |mut output| {
|
||||
output.write_all(s.as_bytes()).map_err(Into::into).await
|
||||
})
|
||||
}
|
||||
}
|
|
@ -6,7 +6,7 @@ use std::{
|
|||
};
|
||||
|
||||
use conduwuit::{
|
||||
Error, Result, debug_error, err, info,
|
||||
Err, Result, debug_error, err, info,
|
||||
matrix::pdu::{PduEvent, PduId, RawPduId},
|
||||
trace, utils,
|
||||
utils::{
|
||||
|
@ -17,10 +17,9 @@ use conduwuit::{
|
|||
};
|
||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId,
|
||||
ServerName,
|
||||
api::{client::error::ErrorKind, federation::event::get_room_state},
|
||||
events::room::message::RoomMessageEventContent,
|
||||
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
||||
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
|
||||
api::federation::event::get_room_state,
|
||||
};
|
||||
use service::rooms::{
|
||||
short::{ShortEventId, ShortRoomId},
|
||||
|
@ -31,28 +30,24 @@ use tracing_subscriber::EnvFilter;
|
|||
use crate::admin_command;
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn echo(&self, message: Vec<String>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn echo(&self, message: Vec<String>) -> Result {
|
||||
let message = message.join(" ");
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(message))
|
||||
self.write_str(&message).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_auth_chain(
|
||||
&self,
|
||||
event_id: Box<EventId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result {
|
||||
let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else {
|
||||
return Ok(RoomMessageEventContent::notice_plain("Event not found."));
|
||||
return Err!("Event not found.");
|
||||
};
|
||||
|
||||
let room_id_str = event
|
||||
.get("room_id")
|
||||
.and_then(|val| val.as_str())
|
||||
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
|
||||
.and_then(CanonicalJsonValue::as_str)
|
||||
.ok_or_else(|| err!(Database("Invalid event in database")))?;
|
||||
|
||||
let room_id = <&RoomId>::try_from(room_id_str)
|
||||
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
|
||||
.map_err(|_| err!(Database("Invalid room id field in event in database")))?;
|
||||
|
||||
let start = Instant::now();
|
||||
let count = self
|
||||
|
@ -65,51 +60,39 @@ pub(super) async fn get_auth_chain(
|
|||
.await;
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Loaded auth chain with length {count} in {elapsed:?}"
|
||||
)))
|
||||
let out = format!("Loaded auth chain with length {count} in {elapsed:?}");
|
||||
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn parse_pdu(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn parse_pdu(&self) -> Result {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.");
|
||||
}
|
||||
|
||||
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
|
||||
match serde_json::from_str(&string) {
|
||||
| Err(e) => return Err!("Invalid json in command body: {e}"),
|
||||
| Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
|
||||
| Err(e) => return Err!("Could not parse PDU JSON: {e:?}"),
|
||||
| Ok(hash) => {
|
||||
let event_id = OwnedEventId::parse(format!("${hash}"));
|
||||
|
||||
match serde_json::from_value::<PduEvent>(
|
||||
serde_json::to_value(value).expect("value is json"),
|
||||
) {
|
||||
| Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"EventId: {event_id:?}\n{pdu:#?}"
|
||||
))),
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"EventId: {event_id:?}\nCould not parse event: {e}"
|
||||
))),
|
||||
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
|
||||
| Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"),
|
||||
| Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"),
|
||||
}
|
||||
},
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Could not parse PDU JSON: {e:?}"
|
||||
))),
|
||||
},
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Invalid json in command body: {e}"
|
||||
))),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
|
||||
let mut outlier = false;
|
||||
let mut pdu_json = self
|
||||
.services
|
||||
|
@ -124,21 +107,18 @@ pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessage
|
|||
}
|
||||
|
||||
match pdu_json {
|
||||
| Err(_) => return Err!("PDU not found locally."),
|
||||
| Ok(json) => {
|
||||
let json_text =
|
||||
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"{}\n```json\n{}\n```",
|
||||
if outlier {
|
||||
"Outlier (Rejected / Soft Failed) PDU found in our database"
|
||||
} else {
|
||||
"PDU found in our database"
|
||||
},
|
||||
json_text
|
||||
)))
|
||||
let text = serde_json::to_string_pretty(&json)?;
|
||||
let msg = if outlier {
|
||||
"Outlier (Rejected / Soft Failed) PDU found in our database"
|
||||
} else {
|
||||
"PDU found in our database"
|
||||
};
|
||||
write!(self, "{msg}\n```json\n{text}\n```",)
|
||||
},
|
||||
| Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -146,7 +126,7 @@ pub(super) async fn get_short_pdu(
|
|||
&self,
|
||||
shortroomid: ShortRoomId,
|
||||
shorteventid: ShortEventId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let pdu_id: RawPduId = PduId {
|
||||
shortroomid,
|
||||
shorteventid: shorteventid.into(),
|
||||
|
@ -161,41 +141,33 @@ pub(super) async fn get_short_pdu(
|
|||
.await;
|
||||
|
||||
match pdu_json {
|
||||
| Err(_) => return Err!("PDU not found locally."),
|
||||
| Ok(json) => {
|
||||
let json_text =
|
||||
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",)))
|
||||
let json_text = serde_json::to_string_pretty(&json)?;
|
||||
write!(self, "```json\n{json_text}\n```")
|
||||
},
|
||||
| Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_remote_pdu_list(
|
||||
&self,
|
||||
server: Box<ServerName>,
|
||||
force: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result {
|
||||
if !self.services.server.config.allow_federation {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Federation is disabled on this homeserver.",
|
||||
));
|
||||
return Err!("Federation is disabled on this homeserver.",);
|
||||
}
|
||||
|
||||
if server == self.services.globals.server_name() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
||||
fetching local PDUs from the database.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.",);
|
||||
}
|
||||
|
||||
let list = self
|
||||
|
@ -209,18 +181,19 @@ pub(super) async fn get_remote_pdu_list(
|
|||
let mut failed_count: usize = 0;
|
||||
let mut success_count: usize = 0;
|
||||
|
||||
for pdu in list {
|
||||
for event_id in list {
|
||||
if force {
|
||||
match self.get_remote_pdu(Box::from(pdu), server.clone()).await {
|
||||
match self
|
||||
.get_remote_pdu(event_id.to_owned(), server.clone())
|
||||
.await
|
||||
{
|
||||
| Err(e) => {
|
||||
failed_count = failed_count.saturating_add(1);
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to get remote PDU, ignoring error: {e}"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
.send_text(&format!("Failed to get remote PDU, ignoring error: {e}"))
|
||||
.await;
|
||||
|
||||
warn!("Failed to get remote PDU, ignoring error: {e}");
|
||||
},
|
||||
| _ => {
|
||||
|
@ -228,44 +201,48 @@ pub(super) async fn get_remote_pdu_list(
|
|||
},
|
||||
}
|
||||
} else {
|
||||
self.get_remote_pdu(Box::from(pdu), server.clone()).await?;
|
||||
self.get_remote_pdu(event_id.to_owned(), server.clone())
|
||||
.await?;
|
||||
success_count = success_count.saturating_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Fetched {success_count} remote PDUs successfully with {failed_count} failures"
|
||||
)))
|
||||
let out =
|
||||
format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures");
|
||||
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_remote_pdu(
|
||||
&self,
|
||||
event_id: Box<EventId>,
|
||||
server: Box<ServerName>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
event_id: OwnedEventId,
|
||||
server: OwnedServerName,
|
||||
) -> Result {
|
||||
if !self.services.server.config.allow_federation {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Federation is disabled on this homeserver.",
|
||||
));
|
||||
return Err!("Federation is disabled on this homeserver.");
|
||||
}
|
||||
|
||||
if server == self.services.globals.server_name() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
||||
fetching local PDUs.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
match self
|
||||
.services
|
||||
.sending
|
||||
.send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request {
|
||||
event_id: event_id.clone().into(),
|
||||
event_id: event_id.clone(),
|
||||
include_unredacted_content: None,
|
||||
})
|
||||
.await
|
||||
{
|
||||
| Err(e) =>
|
||||
return Err!(
|
||||
"Remote server did not have PDU or failed sending request to remote server: {e}"
|
||||
),
|
||||
| Ok(response) => {
|
||||
let json: CanonicalJsonObject =
|
||||
serde_json::from_str(response.pdu.get()).map_err(|e| {
|
||||
|
@ -273,10 +250,9 @@ pub(super) async fn get_remote_pdu(
|
|||
"Requested event ID {event_id} from server but failed to convert from \
|
||||
RawValue to CanonicalJsonObject (malformed event/response?): {e}"
|
||||
);
|
||||
Error::BadRequest(
|
||||
ErrorKind::Unknown,
|
||||
"Received response from server but failed to parse PDU",
|
||||
)
|
||||
err!(Request(Unknown(
|
||||
"Received response from server but failed to parse PDU"
|
||||
)))
|
||||
})?;
|
||||
|
||||
trace!("Attempting to parse PDU: {:?}", &response.pdu);
|
||||
|
@ -286,6 +262,7 @@ pub(super) async fn get_remote_pdu(
|
|||
.rooms
|
||||
.event_handler
|
||||
.parse_incoming_pdu(&response.pdu)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
let (event_id, value, room_id) = match parsed_result {
|
||||
|
@ -293,9 +270,7 @@ pub(super) async fn get_remote_pdu(
|
|||
| Err(e) => {
|
||||
warn!("Failed to parse PDU: {e}");
|
||||
info!("Full PDU: {:?}", &response.pdu);
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to parse PDU remote server {server} sent us: {e}"
|
||||
)));
|
||||
return Err!("Failed to parse PDU remote server {server} sent us: {e}");
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -307,30 +282,18 @@ pub(super) async fn get_remote_pdu(
|
|||
.rooms
|
||||
.timeline
|
||||
.backfill_pdu(&server, response.pdu)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
let json_text =
|
||||
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"{}\n```json\n{}\n```",
|
||||
"Got PDU from specified server and handled as backfilled PDU successfully. \
|
||||
Event body:",
|
||||
json_text
|
||||
)))
|
||||
let text = serde_json::to_string_pretty(&json)?;
|
||||
let msg = "Got PDU from specified server and handled as backfilled";
|
||||
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
|
||||
},
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Remote server did not have PDU or failed sending request to remote server: {e}"
|
||||
))),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_room_state(
|
||||
&self,
|
||||
room: OwnedRoomOrAliasId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
let room_id = self.services.rooms.alias.resolve(&room).await?;
|
||||
let room_state: Vec<_> = self
|
||||
.services
|
||||
|
@ -342,28 +305,24 @@ pub(super) async fn get_room_state(
|
|||
.await?;
|
||||
|
||||
if room_state.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Unable to find room state in our database (vector is empty)",
|
||||
));
|
||||
return Err!("Unable to find room state in our database (vector is empty)",);
|
||||
}
|
||||
|
||||
let json = serde_json::to_string_pretty(&room_state).map_err(|e| {
|
||||
warn!("Failed converting room state vector in our database to pretty JSON: {e}");
|
||||
Error::bad_database(
|
||||
err!(Database(
|
||||
"Failed to convert room state events to pretty JSON, possible invalid room state \
|
||||
events in our database",
|
||||
)
|
||||
events in our database {e}",
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json}\n```")))
|
||||
let out = format!("```json\n{json}\n```");
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn ping(&self, server: Box<ServerName>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
|
||||
if server == self.services.globals.server_name() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Not allowed to send federation requests to ourselves.",
|
||||
));
|
||||
return Err!("Not allowed to send federation requests to ourselves.");
|
||||
}
|
||||
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -377,35 +336,27 @@ pub(super) async fn ping(&self, server: Box<ServerName>) -> Result<RoomMessageEv
|
|||
)
|
||||
.await
|
||||
{
|
||||
| Err(e) => {
|
||||
return Err!("Failed sending federation request to specified server:\n\n{e}");
|
||||
},
|
||||
| Ok(response) => {
|
||||
let ping_time = timer.elapsed();
|
||||
|
||||
let json_text_res = serde_json::to_string_pretty(&response.server);
|
||||
|
||||
if let Ok(json) = json_text_res {
|
||||
return Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Got response which took {ping_time:?} time:\n```json\n{json}\n```"
|
||||
)));
|
||||
}
|
||||
let out = if let Ok(json) = json_text_res {
|
||||
format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```")
|
||||
} else {
|
||||
format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}")
|
||||
};
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Got non-JSON response which took {ping_time:?} time:\n{response:?}"
|
||||
)))
|
||||
},
|
||||
| Err(e) => {
|
||||
warn!(
|
||||
"Failed sending federation request to specified server from ping debug command: \
|
||||
{e}"
|
||||
);
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed sending federation request to specified server:\n\n{e}",
|
||||
)))
|
||||
write!(self, "{out}")
|
||||
},
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn force_device_list_updates(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn force_device_list_updates(&self) -> Result {
|
||||
// Force E2EE device list updates for all users
|
||||
self.services
|
||||
.users
|
||||
|
@ -413,27 +364,17 @@ pub(super) async fn force_device_list_updates(&self) -> Result<RoomMessageEventC
|
|||
.for_each(|user_id| self.services.users.mark_device_key_update(user_id))
|
||||
.await;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(
|
||||
"Marked all devices for all users as having new keys to update",
|
||||
))
|
||||
write!(self, "Marked all devices for all users as having new keys to update").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn change_log_level(
|
||||
&self,
|
||||
filter: Option<String>,
|
||||
reset: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool) -> Result {
|
||||
let handles = &["console"];
|
||||
|
||||
if reset {
|
||||
let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) {
|
||||
| Ok(s) => s,
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Log level from config appears to be invalid now: {e}"
|
||||
)));
|
||||
},
|
||||
| Err(e) => return Err!("Log level from config appears to be invalid now: {e}"),
|
||||
};
|
||||
|
||||
match self
|
||||
|
@ -443,16 +384,12 @@ pub(super) async fn change_log_level(
|
|||
.reload
|
||||
.reload(&old_filter_layer, Some(handles))
|
||||
{
|
||||
| Err(e) =>
|
||||
return Err!("Failed to modify and reload the global tracing log level: {e}"),
|
||||
| Ok(()) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Successfully changed log level back to config value {}",
|
||||
self.services.server.config.log
|
||||
)));
|
||||
},
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to modify and reload the global tracing log level: {e}"
|
||||
)));
|
||||
let value = &self.services.server.config.log;
|
||||
let out = format!("Successfully changed log level back to config value {value}");
|
||||
return self.write_str(&out).await;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -460,11 +397,7 @@ pub(super) async fn change_log_level(
|
|||
if let Some(filter) = filter {
|
||||
let new_filter_layer = match EnvFilter::try_new(filter) {
|
||||
| Ok(s) => s,
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Invalid log level filter specified: {e}"
|
||||
)));
|
||||
},
|
||||
| Err(e) => return Err!("Invalid log level filter specified: {e}"),
|
||||
};
|
||||
|
||||
match self
|
||||
|
@ -474,90 +407,75 @@ pub(super) async fn change_log_level(
|
|||
.reload
|
||||
.reload(&new_filter_layer, Some(handles))
|
||||
{
|
||||
| Ok(()) => {
|
||||
return Ok(RoomMessageEventContent::text_plain("Successfully changed log level"));
|
||||
},
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to modify and reload the global tracing log level: {e}"
|
||||
)));
|
||||
},
|
||||
| Ok(()) => return self.write_str("Successfully changed log level").await,
|
||||
| Err(e) =>
|
||||
return Err!("Failed to modify and reload the global tracing log level: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain("No log level was specified."))
|
||||
Err!("No log level was specified.")
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn sign_json(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn sign_json(&self) -> Result {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.");
|
||||
}
|
||||
|
||||
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
||||
match serde_json::from_str(&string) {
|
||||
| Err(e) => return Err!("Invalid json: {e}"),
|
||||
| Ok(mut value) => {
|
||||
self.services
|
||||
.server_keys
|
||||
.sign_json(&mut value)
|
||||
.expect("our request json is what ruma expects");
|
||||
let json_text =
|
||||
serde_json::to_string_pretty(&value).expect("canonical json is valid json");
|
||||
Ok(RoomMessageEventContent::text_plain(json_text))
|
||||
self.services.server_keys.sign_json(&mut value)?;
|
||||
let json_text = serde_json::to_string_pretty(&value)?;
|
||||
write!(self, "{json_text}")
|
||||
},
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn verify_json(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn verify_json(&self) -> Result {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.");
|
||||
}
|
||||
|
||||
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
||||
match serde_json::from_str::<CanonicalJsonObject>(&string) {
|
||||
| Err(e) => return Err!("Invalid json: {e}"),
|
||||
| Ok(value) => match self.services.server_keys.verify_json(&value, None).await {
|
||||
| Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")),
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Signature verification failed: {e}"
|
||||
))),
|
||||
| Err(e) => return Err!("Signature verification failed: {e}"),
|
||||
| Ok(()) => write!(self, "Signature correct"),
|
||||
},
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn verify_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
|
||||
use ruma::signatures::Verified;
|
||||
|
||||
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
|
||||
|
||||
event.remove("event_id");
|
||||
let msg = match self.services.server_keys.verify_event(&event, None).await {
|
||||
| Ok(ruma::signatures::Verified::Signatures) =>
|
||||
"signatures OK, but content hash failed (redaction).",
|
||||
| Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.",
|
||||
| Err(e) => return Err(e),
|
||||
| Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
|
||||
| Ok(Verified::All) => "signatures and hashes OK.",
|
||||
};
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(msg))
|
||||
self.write_str(msg).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub(super) async fn first_pdu_in_room(
|
||||
&self,
|
||||
room_id: Box<RoomId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
||||
if !self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -565,9 +483,7 @@ pub(super) async fn first_pdu_in_room(
|
|||
.server_in_room(&self.services.server.name, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"We are not participating in the room / we don't know about the room ID.",
|
||||
));
|
||||
return Err!("We are not participating in the room / we don't know about the room ID.",);
|
||||
}
|
||||
|
||||
let first_pdu = self
|
||||
|
@ -576,17 +492,15 @@ pub(super) async fn first_pdu_in_room(
|
|||
.timeline
|
||||
.first_pdu_in_room(&room_id)
|
||||
.await
|
||||
.map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?;
|
||||
.map_err(|_| err!(Database("Failed to find the first PDU in database")))?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}")))
|
||||
let out = format!("{first_pdu:?}");
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub(super) async fn latest_pdu_in_room(
|
||||
&self,
|
||||
room_id: Box<RoomId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
||||
if !self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -594,9 +508,7 @@ pub(super) async fn latest_pdu_in_room(
|
|||
.server_in_room(&self.services.server.name, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"We are not participating in the room / we don't know about the room ID.",
|
||||
));
|
||||
return Err!("We are not participating in the room / we don't know about the room ID.");
|
||||
}
|
||||
|
||||
let latest_pdu = self
|
||||
|
@ -605,18 +517,19 @@ pub(super) async fn latest_pdu_in_room(
|
|||
.timeline
|
||||
.latest_pdu_in_room(&room_id)
|
||||
.await
|
||||
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
|
||||
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}")))
|
||||
let out = format!("{latest_pdu:?}");
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub(super) async fn force_set_room_state_from_server(
|
||||
&self,
|
||||
room_id: Box<RoomId>,
|
||||
server_name: Box<ServerName>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
room_id: OwnedRoomId,
|
||||
server_name: OwnedServerName,
|
||||
) -> Result {
|
||||
if !self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -624,9 +537,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.server_in_room(&self.services.server.name, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"We are not participating in the room / we don't know about the room ID.",
|
||||
));
|
||||
return Err!("We are not participating in the room / we don't know about the room ID.");
|
||||
}
|
||||
|
||||
let first_pdu = self
|
||||
|
@ -635,7 +546,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.timeline
|
||||
.latest_pdu_in_room(&room_id)
|
||||
.await
|
||||
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
|
||||
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
|
||||
|
||||
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
|
||||
|
||||
|
@ -645,10 +556,9 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.services
|
||||
.sending
|
||||
.send_federation_request(&server_name, get_room_state::v1::Request {
|
||||
room_id: room_id.clone().into(),
|
||||
room_id: room_id.clone(),
|
||||
event_id: first_pdu.event_id.clone(),
|
||||
})
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
for pdu in remote_state_response.pdus.clone() {
|
||||
|
@ -657,7 +567,6 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.rooms
|
||||
.event_handler
|
||||
.parse_incoming_pdu(&pdu)
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
| Ok(t) => t,
|
||||
|
@ -721,7 +630,6 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.rooms
|
||||
.event_handler
|
||||
.resolve_state(&room_id, &room_version, state)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
info!("Forcing new room state");
|
||||
|
@ -737,6 +645,7 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.await?;
|
||||
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
|
||||
|
||||
self.services
|
||||
.rooms
|
||||
.state
|
||||
|
@ -753,21 +662,18 @@ pub(super) async fn force_set_room_state_from_server(
|
|||
.update_joined_count(&room_id)
|
||||
.await;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(
|
||||
"Successfully forced the room state from the requested remote server.",
|
||||
))
|
||||
self.write_str("Successfully forced the room state from the requested remote server.")
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_signing_keys(
|
||||
&self,
|
||||
server_name: Option<Box<ServerName>>,
|
||||
notary: Option<Box<ServerName>>,
|
||||
server_name: Option<OwnedServerName>,
|
||||
notary: Option<OwnedServerName>,
|
||||
query: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
|
||||
) -> Result {
|
||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
|
||||
|
||||
if let Some(notary) = notary {
|
||||
let signing_keys = self
|
||||
|
@ -776,9 +682,8 @@ pub(super) async fn get_signing_keys(
|
|||
.notary_request(¬ary, &server_name)
|
||||
.await?;
|
||||
|
||||
return Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"```rs\n{signing_keys:#?}\n```"
|
||||
)));
|
||||
let out = format!("```rs\n{signing_keys:#?}\n```");
|
||||
return self.write_str(&out).await;
|
||||
}
|
||||
|
||||
let signing_keys = if query {
|
||||
|
@ -793,17 +698,13 @@ pub(super) async fn get_signing_keys(
|
|||
.await?
|
||||
};
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"```rs\n{signing_keys:#?}\n```"
|
||||
)))
|
||||
let out = format!("```rs\n{signing_keys:#?}\n```");
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_verify_keys(
|
||||
&self,
|
||||
server_name: Option<Box<ServerName>>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
|
||||
pub(super) async fn get_verify_keys(&self, server_name: Option<OwnedServerName>) -> Result {
|
||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
|
||||
|
||||
let keys = self
|
||||
.services
|
||||
|
@ -818,26 +719,24 @@ pub(super) async fn get_verify_keys(
|
|||
writeln!(out, "| {key_id} | {key:?} |")?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn resolve_true_destination(
|
||||
&self,
|
||||
server_name: Box<ServerName>,
|
||||
server_name: OwnedServerName,
|
||||
no_cache: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
if !self.services.server.config.allow_federation {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Federation is disabled on this homeserver.",
|
||||
));
|
||||
return Err!("Federation is disabled on this homeserver.",);
|
||||
}
|
||||
|
||||
if server_name == self.services.server.name {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
||||
fetching local PDUs.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
let actual = self
|
||||
|
@ -846,13 +745,12 @@ pub(super) async fn resolve_true_destination(
|
|||
.resolve_actual_dest(&server_name, !no_cache)
|
||||
.await?;
|
||||
|
||||
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host,);
|
||||
|
||||
Ok(RoomMessageEventContent::text_markdown(msg))
|
||||
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host);
|
||||
self.write_str(&msg).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result {
|
||||
const OPTS: &str = "abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
let opts: String = OPTS
|
||||
|
@ -871,13 +769,12 @@ pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result<RoomMess
|
|||
self.write_str("```\n").await?;
|
||||
self.write_str(&stats).await?;
|
||||
self.write_str("\n```").await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(tokio_unstable)]
|
||||
#[admin_command]
|
||||
pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn runtime_metrics(&self) -> Result {
|
||||
let out = self.services.server.metrics.runtime_metrics().map_or_else(
|
||||
|| "Runtime metrics are not available.".to_owned(),
|
||||
|metrics| {
|
||||
|
@ -890,51 +787,51 @@ pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
|
|||
},
|
||||
);
|
||||
|
||||
Ok(RoomMessageEventContent::text_markdown(out))
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[cfg(not(tokio_unstable))]
|
||||
#[admin_command]
|
||||
pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
|
||||
Ok(RoomMessageEventContent::text_markdown(
|
||||
"Runtime metrics require building with `tokio_unstable`.",
|
||||
))
|
||||
pub(super) async fn runtime_metrics(&self) -> Result {
|
||||
self.write_str("Runtime metrics require building with `tokio_unstable`.")
|
||||
.await
|
||||
}
|
||||
|
||||
#[cfg(tokio_unstable)]
|
||||
#[admin_command]
|
||||
pub(super) async fn runtime_interval(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn runtime_interval(&self) -> Result {
|
||||
let out = self.services.server.metrics.runtime_interval().map_or_else(
|
||||
|| "Runtime metrics are not available.".to_owned(),
|
||||
|metrics| format!("```rs\n{metrics:#?}\n```"),
|
||||
);
|
||||
|
||||
Ok(RoomMessageEventContent::text_markdown(out))
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[cfg(not(tokio_unstable))]
|
||||
#[admin_command]
|
||||
pub(super) async fn runtime_interval(&self) -> Result<RoomMessageEventContent> {
|
||||
Ok(RoomMessageEventContent::text_markdown(
|
||||
"Runtime metrics require building with `tokio_unstable`.",
|
||||
))
|
||||
pub(super) async fn runtime_interval(&self) -> Result {
|
||||
self.write_str("Runtime metrics require building with `tokio_unstable`.")
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn time(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn time(&self) -> Result {
|
||||
let now = SystemTime::now();
|
||||
Ok(RoomMessageEventContent::text_markdown(utils::time::format(now, "%+")))
|
||||
let now = utils::time::format(now, "%+");
|
||||
|
||||
self.write_str(&now).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_dependencies(&self, names: bool) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn list_dependencies(&self, names: bool) -> Result {
|
||||
if names {
|
||||
let out = info::cargo::dependencies_names().join(" ");
|
||||
return Ok(RoomMessageEventContent::notice_markdown(out));
|
||||
return self.write_str(&out).await;
|
||||
}
|
||||
|
||||
let deps = info::cargo::dependencies();
|
||||
let mut out = String::new();
|
||||
let deps = info::cargo::dependencies();
|
||||
writeln!(out, "| name | version | features |")?;
|
||||
writeln!(out, "| ---- | ------- | -------- |")?;
|
||||
for (name, dep) in deps {
|
||||
|
@ -945,10 +842,11 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result<RoomMessageE
|
|||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
writeln!(out, "| {name} | {version} | {feats} |")?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
self.write_str(&out).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -956,7 +854,7 @@ pub(super) async fn database_stats(
|
|||
&self,
|
||||
property: Option<String>,
|
||||
map: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let map_name = map.as_ref().map_or(EMPTY, String::as_str);
|
||||
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
|
||||
self.services
|
||||
|
@ -968,17 +866,11 @@ pub(super) async fn database_stats(
|
|||
let res = map.property(&property).expect("invalid property");
|
||||
writeln!(self, "##### {name}:\n```\n{}\n```", res.trim())
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn database_files(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
level: Option<i32>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn database_files(&self, map: Option<String>, level: Option<i32>) -> Result {
|
||||
let mut files: Vec<_> = self.services.db.db.file_list().collect::<Result<_>>()?;
|
||||
|
||||
files.sort_by_key(|f| f.name.clone());
|
||||
|
@ -1005,16 +897,12 @@ pub(super) async fn database_files(
|
|||
file.column_family_name,
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn trim_memory(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn trim_memory(&self) -> Result {
|
||||
conduwuit::alloc::trim(None)?;
|
||||
|
||||
writeln!(self, "done").await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
writeln!(self, "done").await
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ pub(crate) mod tester;
|
|||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName};
|
||||
use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName};
|
||||
use service::rooms::short::{ShortEventId, ShortRoomId};
|
||||
|
||||
use self::tester::TesterCommand;
|
||||
|
@ -20,7 +20,7 @@ pub(super) enum DebugCommand {
|
|||
/// - Get the auth_chain of a PDU
|
||||
GetAuthChain {
|
||||
/// An event ID (the $ character followed by the base64 reference hash)
|
||||
event_id: Box<EventId>,
|
||||
event_id: OwnedEventId,
|
||||
},
|
||||
|
||||
/// - Parse and print a PDU from a JSON
|
||||
|
@ -35,7 +35,7 @@ pub(super) enum DebugCommand {
|
|||
/// - Retrieve and print a PDU by EventID from the conduwuit database
|
||||
GetPdu {
|
||||
/// An event ID (a $ followed by the base64 reference hash)
|
||||
event_id: Box<EventId>,
|
||||
event_id: OwnedEventId,
|
||||
},
|
||||
|
||||
/// - Retrieve and print a PDU by PduId from the conduwuit database
|
||||
|
@ -52,11 +52,11 @@ pub(super) enum DebugCommand {
|
|||
/// (following normal event auth rules, handles it as an incoming PDU).
|
||||
GetRemotePdu {
|
||||
/// An event ID (a $ followed by the base64 reference hash)
|
||||
event_id: Box<EventId>,
|
||||
event_id: OwnedEventId,
|
||||
|
||||
/// Argument for us to attempt to fetch the event from the
|
||||
/// specified remote server.
|
||||
server: Box<ServerName>,
|
||||
server: OwnedServerName,
|
||||
},
|
||||
|
||||
/// - Same as `get-remote-pdu` but accepts a codeblock newline delimited
|
||||
|
@ -64,7 +64,7 @@ pub(super) enum DebugCommand {
|
|||
GetRemotePduList {
|
||||
/// Argument for us to attempt to fetch all the events from the
|
||||
/// specified remote server.
|
||||
server: Box<ServerName>,
|
||||
server: OwnedServerName,
|
||||
|
||||
/// If set, ignores errors, else stops at the first error/failure.
|
||||
#[arg(short, long)]
|
||||
|
@ -88,10 +88,10 @@ pub(super) enum DebugCommand {
|
|||
|
||||
/// - Get and display signing keys from local cache or remote server.
|
||||
GetSigningKeys {
|
||||
server_name: Option<Box<ServerName>>,
|
||||
server_name: Option<OwnedServerName>,
|
||||
|
||||
#[arg(long)]
|
||||
notary: Option<Box<ServerName>>,
|
||||
notary: Option<OwnedServerName>,
|
||||
|
||||
#[arg(short, long)]
|
||||
query: bool,
|
||||
|
@ -99,14 +99,14 @@ pub(super) enum DebugCommand {
|
|||
|
||||
/// - Get and display signing keys from local cache or remote server.
|
||||
GetVerifyKeys {
|
||||
server_name: Option<Box<ServerName>>,
|
||||
server_name: Option<OwnedServerName>,
|
||||
},
|
||||
|
||||
/// - Sends a federation request to the remote server's
|
||||
/// `/_matrix/federation/v1/version` endpoint and measures the latency it
|
||||
/// took for the server to respond
|
||||
Ping {
|
||||
server: Box<ServerName>,
|
||||
server: OwnedServerName,
|
||||
},
|
||||
|
||||
/// - Forces device lists for all local and remote users to be updated (as
|
||||
|
@ -141,21 +141,21 @@ pub(super) enum DebugCommand {
|
|||
///
|
||||
/// This re-verifies a PDU existing in the database found by ID.
|
||||
VerifyPdu {
|
||||
event_id: Box<EventId>,
|
||||
event_id: OwnedEventId,
|
||||
},
|
||||
|
||||
/// - Prints the very first PDU in the specified room (typically
|
||||
/// m.room.create)
|
||||
FirstPduInRoom {
|
||||
/// The room ID
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - Prints the latest ("last") PDU in the specified room (typically a
|
||||
/// message)
|
||||
LatestPduInRoom {
|
||||
/// The room ID
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - Forcefully replaces the room state of our local copy of the specified
|
||||
|
@ -174,9 +174,9 @@ pub(super) enum DebugCommand {
|
|||
/// `/_matrix/federation/v1/state/{roomId}`.
|
||||
ForceSetRoomStateFromServer {
|
||||
/// The impacted room ID
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
/// The server we will use to query the room state for
|
||||
server_name: Box<ServerName>,
|
||||
server_name: OwnedServerName,
|
||||
},
|
||||
|
||||
/// - Runs a server name through conduwuit's true destination resolution
|
||||
|
@ -184,7 +184,7 @@ pub(super) enum DebugCommand {
|
|||
///
|
||||
/// Useful for debugging well-known issues
|
||||
ResolveTrueDestination {
|
||||
server_name: Box<ServerName>,
|
||||
server_name: OwnedServerName,
|
||||
|
||||
#[arg(short, long)]
|
||||
no_cache: bool,
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use conduwuit::Err;
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
use conduwuit::{Err, Result};
|
||||
|
||||
use crate::{Result, admin_command, admin_command_dispatch};
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
#[admin_command_dispatch]
|
||||
#[derive(Debug, clap::Subcommand)]
|
||||
|
@ -14,14 +13,14 @@ pub(crate) enum TesterCommand {
|
|||
|
||||
#[rustfmt::skip]
|
||||
#[admin_command]
|
||||
async fn panic(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn panic(&self) -> Result {
|
||||
|
||||
panic!("panicked")
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[admin_command]
|
||||
async fn failure(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn failure(&self) -> Result {
|
||||
|
||||
Err!("failed")
|
||||
}
|
||||
|
@ -29,20 +28,20 @@ async fn failure(&self) -> Result<RoomMessageEventContent> {
|
|||
#[inline(never)]
|
||||
#[rustfmt::skip]
|
||||
#[admin_command]
|
||||
async fn tester(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn tester(&self) -> Result {
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain("legacy"))
|
||||
self.write_str("Ok").await
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
#[rustfmt::skip]
|
||||
#[admin_command]
|
||||
async fn timer(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn timer(&self) -> Result {
|
||||
let started = std::time::Instant::now();
|
||||
timed(self.body);
|
||||
|
||||
let elapsed = started.elapsed();
|
||||
Ok(RoomMessageEventContent::notice_plain(format!("completed in {elapsed:#?}")))
|
||||
self.write_str(&format!("completed in {elapsed:#?}")).await
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
|
|
|
@ -1,49 +1,48 @@
|
|||
use std::fmt::Write;
|
||||
|
||||
use conduwuit::Result;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
||||
|
||||
use crate::{admin_command, get_room_info};
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn disable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result {
|
||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||
Ok(RoomMessageEventContent::text_plain("Room disabled."))
|
||||
self.write_str("Room disabled.").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn enable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result {
|
||||
self.services.rooms.metadata.disable_room(&room_id, false);
|
||||
Ok(RoomMessageEventContent::text_plain("Room enabled."))
|
||||
self.write_str("Room enabled.").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn incoming_federation(&self) -> Result<RoomMessageEventContent> {
|
||||
let map = self
|
||||
.services
|
||||
.rooms
|
||||
.event_handler
|
||||
.federation_handletime
|
||||
.read()
|
||||
.expect("locked");
|
||||
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
||||
pub(super) async fn incoming_federation(&self) -> Result {
|
||||
let msg = {
|
||||
let map = self
|
||||
.services
|
||||
.rooms
|
||||
.event_handler
|
||||
.federation_handletime
|
||||
.read()
|
||||
.expect("locked");
|
||||
|
||||
for (r, (e, i)) in map.iter() {
|
||||
let elapsed = i.elapsed();
|
||||
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
|
||||
}
|
||||
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
||||
for (r, (e, i)) in map.iter() {
|
||||
let elapsed = i.elapsed();
|
||||
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(&msg))
|
||||
msg
|
||||
};
|
||||
|
||||
self.write_str(&msg).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn fetch_support_well_known(
|
||||
&self,
|
||||
server_name: Box<ServerName>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result {
|
||||
let response = self
|
||||
.services
|
||||
.client
|
||||
|
@ -55,54 +54,44 @@ pub(super) async fn fetch_support_well_known(
|
|||
let text = response.text().await?;
|
||||
|
||||
if text.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("Response text/body is empty."));
|
||||
return Err!("Response text/body is empty.");
|
||||
}
|
||||
|
||||
if text.len() > 1500 {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Response text/body is over 1500 characters, assuming no support well-known.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
let json: serde_json::Value = match serde_json::from_str(&text) {
|
||||
| Ok(json) => json,
|
||||
| Err(_) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Response text/body is not valid JSON.",
|
||||
));
|
||||
return Err!("Response text/body is not valid JSON.",);
|
||||
},
|
||||
};
|
||||
|
||||
let pretty_json: String = match serde_json::to_string_pretty(&json) {
|
||||
| Ok(json) => json,
|
||||
| Err(_) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Response text/body is not valid JSON.",
|
||||
));
|
||||
return Err!("Response text/body is not valid JSON.",);
|
||||
},
|
||||
};
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Got JSON response:\n\n```json\n{pretty_json}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn remote_user_in_rooms(
|
||||
&self,
|
||||
user_id: Box<UserId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result {
|
||||
if user_id.server_name() == self.services.server.name {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"User belongs to our server, please use `list-joined-rooms` user admin command \
|
||||
instead.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
if !self.services.users.exists(&user_id).await {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Remote user does not exist in our database.",
|
||||
));
|
||||
return Err!("Remote user does not exist in our database.",);
|
||||
}
|
||||
|
||||
let mut rooms: Vec<(OwnedRoomId, u64, String)> = self
|
||||
|
@ -115,21 +104,19 @@ pub(super) async fn remote_user_in_rooms(
|
|||
.await;
|
||||
|
||||
if rooms.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
|
||||
return Err!("User is not in any rooms.");
|
||||
}
|
||||
|
||||
rooms.sort_by_key(|r| r.1);
|
||||
rooms.reverse();
|
||||
|
||||
let output = format!(
|
||||
"Rooms {user_id} shares with us ({}):\n```\n{}\n```",
|
||||
rooms.len(),
|
||||
rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
);
|
||||
let num = rooms.len();
|
||||
let body = rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
Ok(RoomMessageEventContent::text_markdown(output))
|
||||
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ mod commands;
|
|||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{RoomId, ServerName, UserId};
|
||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
||||
|
||||
use crate::admin_command_dispatch;
|
||||
|
||||
|
@ -14,12 +14,12 @@ pub(super) enum FederationCommand {
|
|||
|
||||
/// - Disables incoming federation handling for a room.
|
||||
DisableRoom {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - Enables incoming federation handling for a room again.
|
||||
EnableRoom {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - Fetch `/.well-known/matrix/support` from the specified server
|
||||
|
@ -32,11 +32,11 @@ pub(super) enum FederationCommand {
|
|||
/// moderation, and security inquiries. This command provides a way to
|
||||
/// easily fetch that information.
|
||||
FetchSupportWellKnown {
|
||||
server_name: Box<ServerName>,
|
||||
server_name: OwnedServerName,
|
||||
},
|
||||
|
||||
/// - Lists all the rooms we share/track with the specified *remote* user
|
||||
RemoteUserInRooms {
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,26 +1,22 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use conduwuit::{
|
||||
Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago,
|
||||
Err, Result, debug, debug_info, debug_warn, error, info, trace,
|
||||
utils::time::parse_timepoint_ago, warn,
|
||||
};
|
||||
use conduwuit_service::media::Dim;
|
||||
use ruma::{
|
||||
EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName,
|
||||
events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
|
||||
|
||||
use crate::{admin_command, utils::parse_local_user_id};
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn delete(
|
||||
&self,
|
||||
mxc: Option<Box<MxcUri>>,
|
||||
event_id: Option<Box<EventId>>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
mxc: Option<OwnedMxcUri>,
|
||||
event_id: Option<OwnedEventId>,
|
||||
) -> Result {
|
||||
if event_id.is_some() && mxc.is_some() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Please specify either an MXC or an event ID, not both.",
|
||||
));
|
||||
return Err!("Please specify either an MXC or an event ID, not both.",);
|
||||
}
|
||||
|
||||
if let Some(mxc) = mxc {
|
||||
|
@ -30,9 +26,7 @@ pub(super) async fn delete(
|
|||
.delete(&mxc.as_str().try_into()?)
|
||||
.await?;
|
||||
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Deleted the MXC from our database and on our filesystem.",
|
||||
));
|
||||
return Err!("Deleted the MXC from our database and on our filesystem.",);
|
||||
}
|
||||
|
||||
if let Some(event_id) = event_id {
|
||||
|
@ -113,41 +107,36 @@ pub(super) async fn delete(
|
|||
let final_url = url.to_string().replace('"', "");
|
||||
mxc_urls.push(final_url);
|
||||
} else {
|
||||
info!(
|
||||
warn!(
|
||||
"Found a URL in the event ID {event_id} but did not \
|
||||
start with mxc://, ignoring"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
info!("No \"url\" key in \"file\" key.");
|
||||
error!("No \"url\" key in \"file\" key.");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Event ID does not have a \"content\" key or failed parsing the \
|
||||
event ID JSON.",
|
||||
));
|
||||
);
|
||||
}
|
||||
} else {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Event ID does not have a \"content\" key, this is not a message or an \
|
||||
event type that contains media.",
|
||||
));
|
||||
);
|
||||
}
|
||||
},
|
||||
| _ => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Event ID does not exist or is not known to us.",
|
||||
));
|
||||
return Err!("Event ID does not exist or is not known to us.",);
|
||||
},
|
||||
}
|
||||
|
||||
if mxc_urls.is_empty() {
|
||||
info!("Parsed event ID {event_id} but did not contain any MXC URLs.");
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Parsed event ID but found no MXC URLs.",
|
||||
));
|
||||
return Err!("Parsed event ID but found no MXC URLs.",);
|
||||
}
|
||||
|
||||
let mut mxc_deletion_count: usize = 0;
|
||||
|
@ -170,27 +159,27 @@ pub(super) async fn delete(
|
|||
}
|
||||
}
|
||||
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \
|
||||
event ID {event_id}."
|
||||
)));
|
||||
return self
|
||||
.write_str(&format!(
|
||||
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \
|
||||
from event ID {event_id}."
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(
|
||||
Err!(
|
||||
"Please specify either an MXC using --mxc or an event ID using --event-id of the \
|
||||
message containing an image. See --help for details.",
|
||||
))
|
||||
message containing an image. See --help for details."
|
||||
)
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn delete_list(&self) -> Result {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.",);
|
||||
}
|
||||
|
||||
let mut failed_parsed_mxcs: usize = 0;
|
||||
|
@ -204,7 +193,6 @@ pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
|
|||
.try_into()
|
||||
.inspect_err(|e| {
|
||||
debug_warn!("Failed to parse user-provided MXC URI: {e}");
|
||||
|
||||
failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1);
|
||||
})
|
||||
.ok()
|
||||
|
@ -227,10 +215,11 @@ pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
|
|||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
self.write_str(&format!(
|
||||
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \
|
||||
and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.",
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -240,11 +229,9 @@ pub(super) async fn delete_past_remote_media(
|
|||
before: bool,
|
||||
after: bool,
|
||||
yes_i_want_to_delete_local_media: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
if before && after {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Please only pick one argument, --before or --after.",
|
||||
));
|
||||
return Err!("Please only pick one argument, --before or --after.",);
|
||||
}
|
||||
assert!(!(before && after), "--before and --after should not be specified together");
|
||||
|
||||
|
@ -260,35 +247,28 @@ pub(super) async fn delete_past_remote_media(
|
|||
)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Deleted {deleted_count} total files.",
|
||||
)))
|
||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn delete_all_from_user(
|
||||
&self,
|
||||
username: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn delete_all_from_user(&self, username: String) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &username)?;
|
||||
|
||||
let deleted_count = self.services.media.delete_from_user(&user_id).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Deleted {deleted_count} total files.",
|
||||
)))
|
||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn delete_all_from_server(
|
||||
&self,
|
||||
server_name: Box<ServerName>,
|
||||
server_name: OwnedServerName,
|
||||
yes_i_want_to_delete_local_media: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"This command only works for remote media by default.",
|
||||
));
|
||||
return Err!("This command only works for remote media by default.",);
|
||||
}
|
||||
|
||||
let Ok(all_mxcs) = self
|
||||
|
@ -298,9 +278,7 @@ pub(super) async fn delete_all_from_server(
|
|||
.await
|
||||
.inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}"))
|
||||
else {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Failed to get MXC URIs from our database",
|
||||
));
|
||||
return Err!("Failed to get MXC URIs from our database",);
|
||||
};
|
||||
|
||||
let mut deleted_count: usize = 0;
|
||||
|
@ -336,17 +314,16 @@ pub(super) async fn delete_all_from_server(
|
|||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Deleted {deleted_count} total files.",
|
||||
)))
|
||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result {
|
||||
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
||||
let metadata = self.services.media.get_metadata(&mxc).await;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```")))
|
||||
self.write_str(&format!("```\n{metadata:#?}\n```")).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -355,7 +332,7 @@ pub(super) async fn get_remote_file(
|
|||
mxc: OwnedMxcUri,
|
||||
server: Option<OwnedServerName>,
|
||||
timeout: u32,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
||||
let timeout = Duration::from_millis(timeout.into());
|
||||
let mut result = self
|
||||
|
@ -368,8 +345,8 @@ pub(super) async fn get_remote_file(
|
|||
let len = result.content.as_ref().expect("content").len();
|
||||
result.content.as_mut().expect("content").clear();
|
||||
|
||||
let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```");
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -380,7 +357,7 @@ pub(super) async fn get_remote_thumbnail(
|
|||
timeout: u32,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
||||
let timeout = Duration::from_millis(timeout.into());
|
||||
let dim = Dim::new(width, height, None);
|
||||
|
@ -394,6 +371,6 @@ pub(super) async fn get_remote_thumbnail(
|
|||
let len = result.content.as_ref().expect("content").len();
|
||||
result.content.as_mut().expect("content").clear();
|
||||
|
||||
let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```");
|
||||
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ mod commands;
|
|||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName};
|
||||
use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName};
|
||||
|
||||
use crate::admin_command_dispatch;
|
||||
|
||||
|
@ -15,12 +15,12 @@ pub(super) enum MediaCommand {
|
|||
Delete {
|
||||
/// The MXC URL to delete
|
||||
#[arg(long)]
|
||||
mxc: Option<Box<MxcUri>>,
|
||||
mxc: Option<OwnedMxcUri>,
|
||||
|
||||
/// - The message event ID which contains the media and thumbnail MXC
|
||||
/// URLs
|
||||
#[arg(long)]
|
||||
event_id: Option<Box<EventId>>,
|
||||
event_id: Option<OwnedEventId>,
|
||||
},
|
||||
|
||||
/// - Deletes a codeblock list of MXC URLs from our database and on the
|
||||
|
@ -57,7 +57,7 @@ pub(super) enum MediaCommand {
|
|||
/// - Deletes all remote media from the specified remote server. This will
|
||||
/// always ignore errors by default.
|
||||
DeleteAllFromServer {
|
||||
server_name: Box<ServerName>,
|
||||
server_name: OwnedServerName,
|
||||
|
||||
/// Long argument to delete local media
|
||||
#[arg(long)]
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
pub(crate) mod admin;
|
||||
pub(crate) mod command;
|
||||
pub(crate) mod context;
|
||||
pub(crate) mod processor;
|
||||
mod tests;
|
||||
pub(crate) mod utils;
|
||||
|
@ -23,13 +23,9 @@ extern crate conduwuit_api as api;
|
|||
extern crate conduwuit_core as conduwuit;
|
||||
extern crate conduwuit_service as service;
|
||||
|
||||
pub(crate) use conduwuit::Result;
|
||||
pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch};
|
||||
|
||||
pub(crate) use crate::{
|
||||
command::Command,
|
||||
utils::{escape_html, get_room_info},
|
||||
};
|
||||
pub(crate) use crate::{context::Context, utils::get_room_info};
|
||||
|
||||
pub(crate) const PAGE_SIZE: usize = 100;
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ use service::{
|
|||
use tracing::Level;
|
||||
use tracing_subscriber::{EnvFilter, filter::LevelFilter};
|
||||
|
||||
use crate::{Command, admin, admin::AdminCommand};
|
||||
use crate::{admin, admin::AdminCommand, context::Context};
|
||||
|
||||
#[must_use]
|
||||
pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) }
|
||||
|
@ -58,7 +58,7 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
|
|||
| Ok(parsed) => parsed,
|
||||
};
|
||||
|
||||
let context = Command {
|
||||
let context = Context {
|
||||
services: &services,
|
||||
body: &body,
|
||||
timer: SystemTime::now(),
|
||||
|
@ -103,7 +103,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
|||
|
||||
/// Parse and process a message from the admin room
|
||||
async fn process(
|
||||
context: &Command<'_>,
|
||||
context: &Context<'_>,
|
||||
command: AdminCommand,
|
||||
args: &[String],
|
||||
) -> (Result, String) {
|
||||
|
@ -132,7 +132,7 @@ async fn process(
|
|||
(result, output)
|
||||
}
|
||||
|
||||
fn capture_create(context: &Command<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
|
||||
fn capture_create(context: &Context<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
|
||||
let env_config = &context.services.server.config.admin_log_capture;
|
||||
let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| {
|
||||
warn!("admin_log_capture filter invalid: {e:?}");
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::{OwnedRoomId, OwnedUserId};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
@ -12,31 +12,31 @@ pub(crate) enum AccountDataCommand {
|
|||
/// - Returns all changes to the account data that happened after `since`.
|
||||
ChangesSince {
|
||||
/// Full user ID
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
/// UNIX timestamp since (u64)
|
||||
since: u64,
|
||||
/// Optional room ID of the account data
|
||||
room_id: Option<Box<RoomId>>,
|
||||
room_id: Option<OwnedRoomId>,
|
||||
},
|
||||
|
||||
/// - Searches the account data for a specific kind.
|
||||
AccountDataGet {
|
||||
/// Full user ID
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
/// Account data event type
|
||||
kind: String,
|
||||
/// Optional room ID of the account data
|
||||
room_id: Option<Box<RoomId>>,
|
||||
room_id: Option<OwnedRoomId>,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn changes_since(
|
||||
&self,
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
since: u64,
|
||||
room_id: Option<Box<RoomId>>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
room_id: Option<OwnedRoomId>,
|
||||
) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results: Vec<_> = self
|
||||
.services
|
||||
|
@ -46,18 +46,17 @@ async fn changes_since(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn account_data_get(
|
||||
&self,
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
kind: String,
|
||||
room_id: Option<Box<RoomId>>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
room_id: Option<OwnedRoomId>,
|
||||
) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = self
|
||||
.services
|
||||
|
@ -66,7 +65,6 @@ async fn account_data_get(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
/// All the getters and iterators from src/database/key_value/appservice.rs
|
||||
|
@ -9,7 +10,7 @@ pub(crate) enum AppserviceCommand {
|
|||
/// - Gets the appservice registration info/details from the ID as a string
|
||||
GetRegistration {
|
||||
/// Appservice registration ID
|
||||
appservice_id: Box<str>,
|
||||
appservice_id: String,
|
||||
},
|
||||
|
||||
/// - Gets all appservice registrations with their ID and registration info
|
||||
|
@ -17,7 +18,7 @@ pub(crate) enum AppserviceCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators from src/database/key_value/appservice.rs
|
||||
pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result {
|
||||
pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>
|
|||
},
|
||||
| AppserviceCommand::All => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.appservice.all().await;
|
||||
let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::ServerName;
|
||||
use ruma::OwnedServerName;
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
/// All the getters and iterators from src/database/key_value/globals.rs
|
||||
|
@ -11,17 +11,17 @@ pub(crate) enum GlobalsCommand {
|
|||
|
||||
CurrentCount,
|
||||
|
||||
LastCheckForUpdatesId,
|
||||
LastCheckForAnnouncementsId,
|
||||
|
||||
/// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
|
||||
/// for the server.
|
||||
SigningKeysFor {
|
||||
origin: Box<ServerName>,
|
||||
origin: OwnedServerName,
|
||||
},
|
||||
}
|
||||
|
||||
/// All the getters and iterators from src/database/key_value/globals.rs
|
||||
pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result {
|
||||
pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
@ -39,9 +39,12 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -
|
|||
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
},
|
||||
| GlobalsCommand::LastCheckForUpdatesId => {
|
||||
| GlobalsCommand::LastCheckForAnnouncementsId => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.updates.last_check_for_updates_id().await;
|
||||
let results = services
|
||||
.announcements
|
||||
.last_check_for_announcements_id()
|
||||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::UserId;
|
||||
use ruma::OwnedUserId;
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
/// All the getters and iterators from src/database/key_value/presence.rs
|
||||
|
@ -11,7 +11,7 @@ pub(crate) enum PresenceCommand {
|
|||
/// - Returns the latest presence event for the given user.
|
||||
GetPresence {
|
||||
/// Full user ID
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
||||
/// - Iterator of the most recent presence updates that happened after the
|
||||
|
@ -23,7 +23,7 @@ pub(crate) enum PresenceCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators in key_value/presence.rs
|
||||
pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result {
|
||||
pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::UserId;
|
||||
use ruma::OwnedUserId;
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub(crate) enum PusherCommand {
|
||||
/// - Returns all the pushers for the user.
|
||||
GetPushers {
|
||||
/// Full user ID
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result {
|
||||
pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
|
|
@ -11,7 +11,6 @@ use conduwuit::{
|
|||
use conduwuit_database::Map;
|
||||
use conduwuit_service::Services;
|
||||
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
@ -170,7 +169,7 @@ pub(super) async fn compact(
|
|||
into: Option<usize>,
|
||||
parallelism: Option<usize>,
|
||||
exhaustive: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
use conduwuit_database::compact::Options;
|
||||
|
||||
let default_all_maps: Option<_> = map.is_none().then(|| {
|
||||
|
@ -221,17 +220,11 @@ pub(super) async fn compact(
|
|||
let results = results.await;
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_count(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_count(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
|
@ -242,17 +235,11 @@ pub(super) async fn raw_count(
|
|||
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_keys(
|
||||
&self,
|
||||
map: String,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_keys(&self, map: String, prefix: Option<String>) -> Result {
|
||||
writeln!(self, "```").boxed().await?;
|
||||
|
||||
let map = self.services.db.get(map.as_str())?;
|
||||
|
@ -266,18 +253,12 @@ pub(super) async fn raw_keys(
|
|||
.await?;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
let out = format!("\n```\n\nQuery completed in {query_time:?}");
|
||||
self.write_str(out.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_keys_sizes(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_keys_sizes(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
|
@ -294,18 +275,12 @@ pub(super) async fn raw_keys_sizes(
|
|||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
|
||||
self.write_str(result.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_keys_total(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_keys_total(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
|
@ -318,19 +293,12 @@ pub(super) async fn raw_keys_total(
|
|||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_vals_sizes(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_vals_sizes(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
|
@ -348,18 +316,12 @@ pub(super) async fn raw_vals_sizes(
|
|||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
|
||||
self.write_str(result.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_vals_total(
|
||||
&self,
|
||||
map: Option<String>,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_vals_total(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||
|
||||
let timer = Instant::now();
|
||||
|
@ -373,19 +335,12 @@ pub(super) async fn raw_vals_total(
|
|||
.await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_iter(
|
||||
&self,
|
||||
map: String,
|
||||
prefix: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_iter(&self, map: String, prefix: Option<String>) -> Result {
|
||||
writeln!(self, "```").await?;
|
||||
|
||||
let map = self.services.db.get(&map)?;
|
||||
|
@ -401,9 +356,7 @@ pub(super) async fn raw_iter(
|
|||
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -412,7 +365,7 @@ pub(super) async fn raw_keys_from(
|
|||
map: String,
|
||||
start: String,
|
||||
limit: Option<usize>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
writeln!(self, "```").await?;
|
||||
|
||||
let map = self.services.db.get(&map)?;
|
||||
|
@ -426,9 +379,7 @@ pub(super) async fn raw_keys_from(
|
|||
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -437,7 +388,7 @@ pub(super) async fn raw_iter_from(
|
|||
map: String,
|
||||
start: String,
|
||||
limit: Option<usize>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
let result = map
|
||||
|
@ -449,41 +400,38 @@ pub(super) async fn raw_iter_from(
|
|||
.await?;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_del(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_del(&self, map: String, key: String) -> Result {
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
map.remove(&key);
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Operation completed in {query_time:?}"
|
||||
)))
|
||||
let query_time = timer.elapsed();
|
||||
self.write_str(&format!("Operation completed in {query_time:?}"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_get(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_get(&self, map: String, key: String) -> Result {
|
||||
let map = self.services.db.get(&map)?;
|
||||
let timer = Instant::now();
|
||||
let handle = map.get(&key).await?;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
let result = String::from_utf8_lossy(&handle);
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn raw_maps(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn raw_maps(&self) -> Result {
|
||||
let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}")))
|
||||
self.write_str(&format!("{list:#?}")).await
|
||||
}
|
||||
|
||||
fn with_maps_or<'a>(
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::{Result, utils::time};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent};
|
||||
use ruma::OwnedServerName;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
@ -21,10 +21,7 @@ pub(crate) enum ResolverCommand {
|
|||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn destinations_cache(
|
||||
&self,
|
||||
server_name: Option<OwnedServerName>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn destinations_cache(&self, server_name: Option<OwnedServerName>) -> Result {
|
||||
use service::resolver::cache::CachedDest;
|
||||
|
||||
writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?;
|
||||
|
@ -44,11 +41,11 @@ async fn destinations_cache(
|
|||
.await?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn overrides_cache(&self, server_name: Option<String>) -> Result<RoomMessageEventContent> {
|
||||
async fn overrides_cache(&self, server_name: Option<String>) -> Result {
|
||||
use service::resolver::cache::CachedOverride;
|
||||
|
||||
writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?;
|
||||
|
@ -70,5 +67,5 @@ async fn overrides_cache(&self, server_name: Option<String>) -> Result<RoomMessa
|
|||
.await?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain(""))
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{RoomAliasId, RoomId};
|
||||
use ruma::{OwnedRoomAliasId, OwnedRoomId};
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
|
||||
pub(crate) enum RoomAliasCommand {
|
||||
ResolveLocalAlias {
|
||||
/// Full room alias
|
||||
alias: Box<RoomAliasId>,
|
||||
alias: OwnedRoomAliasId,
|
||||
},
|
||||
|
||||
/// - Iterator of all our local room aliases for the room ID
|
||||
LocalAliasesForRoom {
|
||||
/// Full room ID
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - Iterator of all our local aliases in our database with their room IDs
|
||||
|
@ -24,7 +24,7 @@ pub(crate) enum RoomAliasCommand {
|
|||
}
|
||||
|
||||
/// All the getters and iterators in src/database/key_value/rooms/alias.rs
|
||||
pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result {
|
||||
pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
|
|
@ -1,85 +1,85 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::{Error, Result};
|
||||
use conduwuit::Result;
|
||||
use futures::StreamExt;
|
||||
use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub(crate) enum RoomStateCacheCommand {
|
||||
ServerInRoom {
|
||||
server: Box<ServerName>,
|
||||
room_id: Box<RoomId>,
|
||||
server: OwnedServerName,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
RoomServers {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
ServerRooms {
|
||||
server: Box<ServerName>,
|
||||
server: OwnedServerName,
|
||||
},
|
||||
|
||||
RoomMembers {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
LocalUsersInRoom {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
ActiveLocalUsersInRoom {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
RoomJoinedCount {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
RoomInvitedCount {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
RoomUserOnceJoined {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
RoomMembersInvited {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
GetInviteCount {
|
||||
room_id: Box<RoomId>,
|
||||
user_id: Box<UserId>,
|
||||
room_id: OwnedRoomId,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
||||
GetLeftCount {
|
||||
room_id: Box<RoomId>,
|
||||
user_id: Box<UserId>,
|
||||
room_id: OwnedRoomId,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
||||
RoomsJoined {
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
||||
RoomsLeft {
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
||||
RoomsInvited {
|
||||
user_id: Box<UserId>,
|
||||
user_id: OwnedUserId,
|
||||
},
|
||||
|
||||
InviteState {
|
||||
user_id: Box<UserId>,
|
||||
room_id: Box<RoomId>,
|
||||
user_id: OwnedUserId,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result {
|
||||
pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
let c = match subcommand {
|
||||
match subcommand {
|
||||
| RoomStateCacheCommand::ServerInRoom { server, room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = services
|
||||
|
@ -89,9 +89,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomServers { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -104,9 +106,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::ServerRooms { server } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -119,9 +123,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomMembers { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -134,9 +140,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::LocalUsersInRoom { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -149,9 +157,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -164,18 +174,22 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomJoinedCount { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.rooms.state_cache.room_joined_count(&room_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomInvitedCount { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -186,9 +200,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomUserOnceJoined { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -201,9 +217,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomMembersInvited { room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -216,9 +234,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::GetInviteCount { room_id, user_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -229,9 +249,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::GetLeftCount { room_id, user_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -242,9 +264,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomsJoined { user_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -257,9 +281,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomsInvited { user_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -271,9 +297,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::RoomsLeft { user_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -285,9 +313,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| RoomStateCacheCommand::InviteState { user_id, room_id } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
|
@ -298,13 +328,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
}?;
|
||||
|
||||
context.write_str(c.body()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::{PduCount, Result, utils::stream::TryTools};
|
||||
use futures::TryStreamExt;
|
||||
use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::OwnedRoomOrAliasId;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
@ -24,7 +24,7 @@ pub(crate) enum RoomTimelineCommand {
|
|||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result {
|
||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||
|
||||
let result = self
|
||||
|
@ -34,7 +34,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result<RoomMessa
|
|||
.last_timeline_count(None, &room_id)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}")))
|
||||
self.write_str(&format!("{result:#?}")).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -43,7 +43,7 @@ pub(super) async fn pdus(
|
|||
room_id: OwnedRoomOrAliasId,
|
||||
from: Option<String>,
|
||||
limit: Option<usize>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||
|
||||
let from: Option<PduCount> = from.as_deref().map(str::parse).transpose()?;
|
||||
|
@ -57,5 +57,5 @@ pub(super) async fn pdus(
|
|||
.try_collect()
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}")))
|
||||
self.write_str(&format!("{result:#?}")).await
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::{OwnedServerName, OwnedUserId};
|
||||
use service::sending::Destination;
|
||||
|
||||
use crate::Command;
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
/// All the getters and iterators from src/database/key_value/sending.rs
|
||||
|
@ -27,9 +27,9 @@ pub(crate) enum SendingCommand {
|
|||
#[arg(short, long)]
|
||||
appservice_id: Option<String>,
|
||||
#[arg(short, long)]
|
||||
server_name: Option<Box<ServerName>>,
|
||||
server_name: Option<OwnedServerName>,
|
||||
#[arg(short, long)]
|
||||
user_id: Option<Box<UserId>>,
|
||||
user_id: Option<OwnedUserId>,
|
||||
#[arg(short, long)]
|
||||
push_key: Option<String>,
|
||||
},
|
||||
|
@ -49,30 +49,20 @@ pub(crate) enum SendingCommand {
|
|||
#[arg(short, long)]
|
||||
appservice_id: Option<String>,
|
||||
#[arg(short, long)]
|
||||
server_name: Option<Box<ServerName>>,
|
||||
server_name: Option<OwnedServerName>,
|
||||
#[arg(short, long)]
|
||||
user_id: Option<Box<UserId>>,
|
||||
user_id: Option<OwnedUserId>,
|
||||
#[arg(short, long)]
|
||||
push_key: Option<String>,
|
||||
},
|
||||
|
||||
GetLatestEduCount {
|
||||
server_name: Box<ServerName>,
|
||||
server_name: OwnedServerName,
|
||||
},
|
||||
}
|
||||
|
||||
/// All the getters and iterators in key_value/sending.rs
|
||||
pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result {
|
||||
let c = reprocess(subcommand, context).await?;
|
||||
context.write_str(c.body()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// All the getters and iterators in key_value/sending.rs
|
||||
pub(super) async fn reprocess(
|
||||
subcommand: SendingCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
|
||||
match subcommand {
|
||||
|
@ -82,9 +72,11 @@ pub(super) async fn reprocess(
|
|||
let active_requests = results.collect::<Vec<_>>().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| SendingCommand::QueuedRequests {
|
||||
appservice_id,
|
||||
|
@ -97,19 +89,19 @@ pub(super) async fn reprocess(
|
|||
&& user_id.is_none()
|
||||
&& push_key.is_none()
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
}
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = match (appservice_id, server_name, user_id, push_key) {
|
||||
| (Some(appservice_id), None, None, None) => {
|
||||
if appservice_id.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
services
|
||||
|
@ -120,40 +112,42 @@ pub(super) async fn reprocess(
|
|||
| (None, Some(server_name), None, None) => services
|
||||
.sending
|
||||
.db
|
||||
.queued_requests(&Destination::Federation(server_name.into())),
|
||||
.queued_requests(&Destination::Federation(server_name)),
|
||||
| (None, None, Some(user_id), Some(push_key)) => {
|
||||
if push_key.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
services
|
||||
.sending
|
||||
.db
|
||||
.queued_requests(&Destination::Push(user_id.into(), push_key))
|
||||
.queued_requests(&Destination::Push(user_id, push_key))
|
||||
},
|
||||
| (Some(_), Some(_), Some(_), Some(_)) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. Not all of them See --help for more details.",
|
||||
));
|
||||
);
|
||||
},
|
||||
| _ => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
let queued_requests = results.collect::<Vec<_>>().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| SendingCommand::ActiveRequestsFor {
|
||||
appservice_id,
|
||||
|
@ -166,20 +160,20 @@ pub(super) async fn reprocess(
|
|||
&& user_id.is_none()
|
||||
&& push_key.is_none()
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = match (appservice_id, server_name, user_id, push_key) {
|
||||
| (Some(appservice_id), None, None, None) => {
|
||||
if appservice_id.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
services
|
||||
|
@ -190,49 +184,53 @@ pub(super) async fn reprocess(
|
|||
| (None, Some(server_name), None, None) => services
|
||||
.sending
|
||||
.db
|
||||
.active_requests_for(&Destination::Federation(server_name.into())),
|
||||
.active_requests_for(&Destination::Federation(server_name)),
|
||||
| (None, None, Some(user_id), Some(push_key)) => {
|
||||
if push_key.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
services
|
||||
.sending
|
||||
.db
|
||||
.active_requests_for(&Destination::Push(user_id.into(), push_key))
|
||||
.active_requests_for(&Destination::Push(user_id, push_key))
|
||||
},
|
||||
| (Some(_), Some(_), Some(_), Some(_)) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. Not all of them See --help for more details.",
|
||||
));
|
||||
);
|
||||
},
|
||||
| _ => {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"An appservice ID, server name, or a user ID with push key must be \
|
||||
specified via arguments. See --help for more details.",
|
||||
));
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
let active_requests = results.collect::<Vec<_>>().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
| SendingCommand::GetLatestEduCount { server_name } => {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let results = services.sending.db.get_latest_educount(&server_name).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
)))
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||
))
|
||||
.await
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::{OwnedEventId, OwnedRoomOrAliasId};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
@ -18,10 +18,7 @@ pub(crate) enum ShortCommand {
|
|||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn short_event_id(
|
||||
&self,
|
||||
event_id: OwnedEventId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result {
|
||||
let shortid = self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -29,17 +26,14 @@ pub(super) async fn short_event_id(
|
|||
.get_shorteventid(&event_id)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
|
||||
self.write_str(&format!("{shortid:#?}")).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn short_room_id(
|
||||
&self,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result {
|
||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||
|
||||
let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
|
||||
self.write_str(&format!("{shortid:#?}")).await
|
||||
}
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use futures::stream::StreamExt;
|
||||
use ruma::{
|
||||
OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
@ -99,11 +97,7 @@ pub(crate) enum UsersCommand {
|
|||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_shared_rooms(
|
||||
&self,
|
||||
user_a: OwnedUserId,
|
||||
user_b: OwnedUserId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result: Vec<_> = self
|
||||
.services
|
||||
|
@ -115,9 +109,8 @@ async fn get_shared_rooms(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -127,7 +120,7 @@ async fn get_backup_session(
|
|||
version: String,
|
||||
room_id: OwnedRoomId,
|
||||
session_id: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -136,9 +129,8 @@ async fn get_backup_session(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -147,7 +139,7 @@ async fn get_room_backups(
|
|||
user_id: OwnedUserId,
|
||||
version: String,
|
||||
room_id: OwnedRoomId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -156,32 +148,22 @@ async fn get_room_backups(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_all_backups(
|
||||
&self,
|
||||
user_id: OwnedUserId,
|
||||
version: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self.services.key_backups.get_all(&user_id, &version).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_backup_algorithm(
|
||||
&self,
|
||||
user_id: OwnedUserId,
|
||||
version: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -190,16 +172,12 @@ async fn get_backup_algorithm(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_latest_backup_version(
|
||||
&self,
|
||||
user_id: OwnedUserId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -208,36 +186,33 @@ async fn get_latest_backup_version(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||
async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self.services.key_backups.get_latest_backup(&user_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn iter_users(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn iter_users(&self) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result: Vec<OwnedUserId> = self.services.users.stream().map(Into::into).collect().await;
|
||||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn iter_users2(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn iter_users2(&self) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result: Vec<_> = self.services.users.stream().collect().await;
|
||||
let result: Vec<_> = result
|
||||
|
@ -248,35 +223,32 @@ async fn iter_users2(&self) -> Result<RoomMessageEventContent> {
|
|||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn count_users(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn count_users(&self) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self.services.users.count().await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn password_hash(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||
async fn password_hash(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self.services.users.password_hash(&user_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn list_devices(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||
async fn list_devices(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let devices = self
|
||||
.services
|
||||
|
@ -288,13 +260,12 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result<RoomMessageEventCon
|
|||
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||
async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let devices = self
|
||||
.services
|
||||
|
@ -304,17 +275,12 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result<RoomMessag
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_device_metadata(
|
||||
&self,
|
||||
user_id: OwnedUserId,
|
||||
device_id: OwnedDeviceId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let device = self
|
||||
.services
|
||||
|
@ -323,28 +289,22 @@ async fn get_device_metadata(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_devices_version(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||
async fn get_devices_version(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let device = self.services.users.get_devicelist_version(&user_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn count_one_time_keys(
|
||||
&self,
|
||||
user_id: OwnedUserId,
|
||||
device_id: OwnedDeviceId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -353,17 +313,12 @@ async fn count_one_time_keys(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_device_keys(
|
||||
&self,
|
||||
user_id: OwnedUserId,
|
||||
device_id: OwnedDeviceId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -372,24 +327,22 @@ async fn get_device_keys(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||
async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self.services.users.get_user_signing_key(&user_id).await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_master_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||
async fn get_master_key(&self, user_id: OwnedUserId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -398,17 +351,12 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventC
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn get_to_device_events(
|
||||
&self,
|
||||
user_id: OwnedUserId,
|
||||
device_id: OwnedDeviceId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
||||
let timer = tokio::time::Instant::now();
|
||||
let result = self
|
||||
.services
|
||||
|
@ -418,7 +366,6 @@ async fn get_to_device_events(
|
|||
.await;
|
||||
let query_time = timer.elapsed();
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
use std::fmt::Write;
|
||||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
use ruma::{OwnedRoomAliasId, OwnedRoomId};
|
||||
|
||||
use crate::{Command, escape_html};
|
||||
use crate::Context;
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub(crate) enum RoomAliasCommand {
|
||||
|
@ -18,7 +16,7 @@ pub(crate) enum RoomAliasCommand {
|
|||
force: bool,
|
||||
|
||||
/// The room id to set the alias on
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
|
||||
/// The alias localpart to use (`alias`, not `#alias:servername.tld`)
|
||||
room_alias_localpart: String,
|
||||
|
@ -40,21 +38,11 @@ pub(crate) enum RoomAliasCommand {
|
|||
/// - List aliases currently being used
|
||||
List {
|
||||
/// If set, only list the aliases for this room
|
||||
room_id: Option<Box<RoomId>>,
|
||||
room_id: Option<OwnedRoomId>,
|
||||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result {
|
||||
let c = reprocess(command, context).await?;
|
||||
context.write_str(c.body()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn reprocess(
|
||||
command: RoomAliasCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
let server_user = &services.globals.server_user;
|
||||
|
||||
|
@ -67,9 +55,7 @@ pub(super) async fn reprocess(
|
|||
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) {
|
||||
| Ok(alias) => alias,
|
||||
| Err(err) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to parse alias: {err}"
|
||||
)));
|
||||
return Err!("Failed to parse alias: {err}");
|
||||
},
|
||||
};
|
||||
match command {
|
||||
|
@ -81,60 +67,50 @@ pub(super) async fn reprocess(
|
|||
&room_id,
|
||||
server_user,
|
||||
) {
|
||||
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Successfully overwrote alias (formerly {id})"
|
||||
))),
|
||||
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to remove alias: {err}"
|
||||
))),
|
||||
| Err(err) => Err!("Failed to remove alias: {err}"),
|
||||
| Ok(()) =>
|
||||
context
|
||||
.write_str(&format!(
|
||||
"Successfully overwrote alias (formerly {id})"
|
||||
))
|
||||
.await,
|
||||
}
|
||||
},
|
||||
| (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
| (false, Ok(id)) => Err!(
|
||||
"Refusing to overwrite in use alias for {id}, use -f or --force to \
|
||||
overwrite"
|
||||
))),
|
||||
),
|
||||
| (_, Err(_)) => {
|
||||
match services.rooms.alias.set_alias(
|
||||
&room_alias,
|
||||
&room_id,
|
||||
server_user,
|
||||
) {
|
||||
| Ok(()) => Ok(RoomMessageEventContent::text_plain(
|
||||
"Successfully set alias",
|
||||
)),
|
||||
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to remove alias: {err}"
|
||||
))),
|
||||
| Err(err) => Err!("Failed to remove alias: {err}"),
|
||||
| Ok(()) => context.write_str("Successfully set alias").await,
|
||||
}
|
||||
},
|
||||
}
|
||||
},
|
||||
| RoomAliasCommand::Remove { .. } => {
|
||||
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
||||
| Err(_) => Err!("Alias isn't in use."),
|
||||
| Ok(id) => match services
|
||||
.rooms
|
||||
.alias
|
||||
.remove_alias(&room_alias, server_user)
|
||||
.await
|
||||
{
|
||||
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Removed alias from {id}"
|
||||
))),
|
||||
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to remove alias: {err}"
|
||||
))),
|
||||
| Err(err) => Err!("Failed to remove alias: {err}"),
|
||||
| Ok(()) =>
|
||||
context.write_str(&format!("Removed alias from {id}")).await,
|
||||
},
|
||||
| Err(_) =>
|
||||
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
|
||||
}
|
||||
},
|
||||
| RoomAliasCommand::Which { .. } => {
|
||||
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
||||
| Ok(id) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Alias resolves to {id}"
|
||||
))),
|
||||
| Err(_) =>
|
||||
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
|
||||
| Err(_) => Err!("Alias isn't in use."),
|
||||
| Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await,
|
||||
}
|
||||
},
|
||||
| RoomAliasCommand::List { .. } => unreachable!(),
|
||||
|
@ -156,15 +132,8 @@ pub(super) async fn reprocess(
|
|||
output
|
||||
});
|
||||
|
||||
let html_list = aliases.iter().fold(String::new(), |mut output, alias| {
|
||||
writeln!(output, "<li>{}</li>", escape_html(alias.as_ref()))
|
||||
.expect("should be able to write to string buffer");
|
||||
output
|
||||
});
|
||||
|
||||
let plain = format!("Aliases for {room_id}:\n{plain_list}");
|
||||
let html = format!("Aliases for {room_id}:\n<ul>{html_list}</ul>");
|
||||
Ok(RoomMessageEventContent::text_html(plain, html))
|
||||
context.write_str(&plain).await
|
||||
} else {
|
||||
let aliases = services
|
||||
.rooms
|
||||
|
@ -183,23 +152,8 @@ pub(super) async fn reprocess(
|
|||
output
|
||||
});
|
||||
|
||||
let html_list = aliases
|
||||
.iter()
|
||||
.fold(String::new(), |mut output, (alias, id)| {
|
||||
writeln!(
|
||||
output,
|
||||
"<li><code>{}</code> -> #{}:{}</li>",
|
||||
escape_html(alias.as_ref()),
|
||||
escape_html(id),
|
||||
server_name
|
||||
)
|
||||
.expect("should be able to write to string buffer");
|
||||
output
|
||||
});
|
||||
|
||||
let plain = format!("Aliases:\n{plain_list}");
|
||||
let html = format!("Aliases:\n<ul>{html_list}</ul>");
|
||||
Ok(RoomMessageEventContent::text_html(plain, html))
|
||||
context.write_str(&plain).await
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use conduwuit::Result;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::OwnedRoomId;
|
||||
|
||||
use crate::{PAGE_SIZE, admin_command, get_room_info};
|
||||
|
||||
|
@ -11,7 +11,7 @@ pub(super) async fn list_rooms(
|
|||
exclude_disabled: bool,
|
||||
exclude_banned: bool,
|
||||
no_details: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
// TODO: i know there's a way to do this with clap, but i can't seem to find it
|
||||
let page = page.unwrap_or(1);
|
||||
let mut rooms = self
|
||||
|
@ -41,29 +41,28 @@ pub(super) async fn list_rooms(
|
|||
.collect::<Vec<_>>();
|
||||
|
||||
if rooms.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
|
||||
return Err!("No more rooms.");
|
||||
}
|
||||
|
||||
let output_plain = format!(
|
||||
"Rooms ({}):\n```\n{}\n```",
|
||||
rooms.len(),
|
||||
rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| if no_details {
|
||||
let body = rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| {
|
||||
if no_details {
|
||||
format!("{id}")
|
||||
} else {
|
||||
format!("{id}\tMembers: {members}\tName: {name}")
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
);
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result {
|
||||
let result = self.services.rooms.metadata.exists(&room_id).await;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!("{result}")))
|
||||
self.write_str(&format!("{result}")).await
|
||||
}
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::OwnedRoomId;
|
||||
|
||||
use crate::{Command, PAGE_SIZE, get_room_info};
|
||||
use crate::{Context, PAGE_SIZE, get_room_info};
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub(crate) enum RoomDirectoryCommand {
|
||||
/// - Publish a room to the room directory
|
||||
Publish {
|
||||
/// The room id of the room to publish
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - Unpublish a room to the room directory
|
||||
Unpublish {
|
||||
/// The room id of the room to unpublish
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - List rooms that are published
|
||||
|
@ -25,25 +25,16 @@ pub(crate) enum RoomDirectoryCommand {
|
|||
},
|
||||
}
|
||||
|
||||
pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result {
|
||||
let c = reprocess(command, context).await?;
|
||||
context.write_str(c.body()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn reprocess(
|
||||
command: RoomDirectoryCommand,
|
||||
context: &Command<'_>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result {
|
||||
let services = context.services;
|
||||
match command {
|
||||
| RoomDirectoryCommand::Publish { room_id } => {
|
||||
services.rooms.directory.set_public(&room_id);
|
||||
Ok(RoomMessageEventContent::notice_plain("Room published"))
|
||||
context.write_str("Room published").await
|
||||
},
|
||||
| RoomDirectoryCommand::Unpublish { room_id } => {
|
||||
services.rooms.directory.set_not_public(&room_id);
|
||||
Ok(RoomMessageEventContent::notice_plain("Room unpublished"))
|
||||
context.write_str("Room unpublished").await
|
||||
},
|
||||
| RoomDirectoryCommand::List { page } => {
|
||||
// TODO: i know there's a way to do this with clap, but i can't seem to find it
|
||||
|
@ -66,20 +57,18 @@ pub(super) async fn reprocess(
|
|||
.collect();
|
||||
|
||||
if rooms.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
|
||||
return Err!("No more rooms.");
|
||||
}
|
||||
|
||||
let output = format!(
|
||||
"Rooms (page {page}):\n```\n{}\n```",
|
||||
rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| format!(
|
||||
"{id} | Members: {members} | Name: {name}"
|
||||
))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
);
|
||||
Ok(RoomMessageEventContent::text_markdown(output))
|
||||
let body = rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
context
|
||||
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",))
|
||||
.await
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use clap::Subcommand;
|
||||
use conduwuit::{Result, utils::ReadyExt};
|
||||
use conduwuit::{Err, Result, utils::ReadyExt};
|
||||
use futures::StreamExt;
|
||||
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
|
||||
use ruma::OwnedRoomId;
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch};
|
||||
|
||||
|
@ -10,7 +10,7 @@ use crate::{admin_command, admin_command_dispatch};
|
|||
pub(crate) enum RoomInfoCommand {
|
||||
/// - List joined members in a room
|
||||
ListJoinedMembers {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
|
||||
/// Lists only our local users in the specified room
|
||||
#[arg(long)]
|
||||
|
@ -22,16 +22,12 @@ pub(crate) enum RoomInfoCommand {
|
|||
/// Room topics can be huge, so this is in its
|
||||
/// own separate command
|
||||
ViewRoomTopic {
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn list_joined_members(
|
||||
&self,
|
||||
room_id: Box<RoomId>,
|
||||
local_only: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result {
|
||||
let room_name = self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -64,22 +60,19 @@ async fn list_joined_members(
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
let output_plain = format!(
|
||||
"{} Members in Room \"{}\":\n```\n{}\n```",
|
||||
member_info.len(),
|
||||
room_name,
|
||||
member_info
|
||||
.into_iter()
|
||||
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
);
|
||||
let num = member_info.len();
|
||||
let body = member_info
|
||||
.into_iter()
|
||||
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn view_room_topic(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
|
||||
async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result {
|
||||
let Ok(room_topic) = self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -87,10 +80,9 @@ async fn view_room_topic(&self, room_id: Box<RoomId>) -> Result<RoomMessageEvent
|
|||
.get_room_topic(&room_id)
|
||||
.await
|
||||
else {
|
||||
return Ok(RoomMessageEventContent::text_plain("Room does not have a room topic set."));
|
||||
return Err!("Room does not have a room topic set.");
|
||||
};
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"Room topic:\n```\n{room_topic}\n```"
|
||||
)))
|
||||
self.write_str(&format!("Room topic:\n```\n{room_topic}\n```"))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,15 +1,12 @@
|
|||
use api::client::leave_room;
|
||||
use clap::Subcommand;
|
||||
use conduwuit::{
|
||||
Result, debug,
|
||||
Err, Result, debug,
|
||||
utils::{IterStream, ReadyExt},
|
||||
warn,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId,
|
||||
events::room::message::RoomMessageEventContent,
|
||||
};
|
||||
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
|
||||
|
||||
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
||||
|
||||
|
@ -24,7 +21,7 @@ pub(crate) enum RoomModerationCommand {
|
|||
BanRoom {
|
||||
/// The room in the format of `!roomid:example.com` or a room alias in
|
||||
/// the format of `#roomalias:example.com`
|
||||
room: Box<RoomOrAliasId>,
|
||||
room: OwnedRoomOrAliasId,
|
||||
},
|
||||
|
||||
/// - Bans a list of rooms (room IDs and room aliases) from a newline
|
||||
|
@ -36,7 +33,7 @@ pub(crate) enum RoomModerationCommand {
|
|||
UnbanRoom {
|
||||
/// The room in the format of `!roomid:example.com` or a room alias in
|
||||
/// the format of `#roomalias:example.com`
|
||||
room: Box<RoomOrAliasId>,
|
||||
room: OwnedRoomOrAliasId,
|
||||
},
|
||||
|
||||
/// - List of all rooms we have banned
|
||||
|
@ -49,14 +46,14 @@ pub(crate) enum RoomModerationCommand {
|
|||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
|
||||
async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
debug!("Got room alias or ID: {}", room);
|
||||
|
||||
let admin_room_alias = &self.services.globals.admin_alias;
|
||||
|
||||
if let Ok(admin_room_id) = self.services.admin.get_admin_room().await {
|
||||
if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) {
|
||||
return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room."));
|
||||
return Err!("Not allowed to ban the admin room.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,11 +61,11 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
|
|||
let room_id = match RoomId::parse(&room) {
|
||||
| Ok(room_id) => room_id,
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
return Err!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
)));
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -80,11 +77,11 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
|
|||
let room_alias = match RoomAliasId::parse(&room) {
|
||||
| Ok(room_alias) => room_alias,
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
return Err!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
)));
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -123,9 +120,9 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
|
|||
room_id
|
||||
},
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::notice_plain(format!(
|
||||
return Err!(
|
||||
"Failed to resolve room alias {room_alias} to a room ID: {e}"
|
||||
)));
|
||||
);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
@ -135,11 +132,11 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
|
|||
|
||||
room_id
|
||||
} else {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Room specified is not a room ID or room alias. Please note that this requires a \
|
||||
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`)",
|
||||
));
|
||||
);
|
||||
};
|
||||
|
||||
debug!("Making all users leave the room {room_id} and forgetting it");
|
||||
|
@ -185,20 +182,19 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
|
|||
|
||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(
|
||||
self.write_str(
|
||||
"Room banned, removed all our local users, and disabled incoming federation with room.",
|
||||
))
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn ban_list_of_rooms(&self) -> Result<RoomMessageEventContent> {
|
||||
async fn ban_list_of_rooms(&self) -> Result {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.",);
|
||||
}
|
||||
|
||||
let rooms_s = self
|
||||
|
@ -356,23 +352,24 @@ async fn ban_list_of_rooms(&self) -> Result<RoomMessageEventContent> {
|
|||
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
self.write_str(&format!(
|
||||
"Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \
|
||||
disabled incoming federation with the room."
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
|
||||
async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||
let room_id = if room.is_room_id() {
|
||||
let room_id = match RoomId::parse(&room) {
|
||||
| Ok(room_id) => room_id,
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
return Err!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
)));
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -384,11 +381,11 @@ async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventC
|
|||
let room_alias = match RoomAliasId::parse(&room) {
|
||||
| Ok(room_alias) => room_alias,
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
return Err!(
|
||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`): {e}"
|
||||
)));
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -427,9 +424,7 @@ async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventC
|
|||
room_id
|
||||
},
|
||||
| Err(e) => {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to resolve room alias {room} to a room ID: {e}"
|
||||
)));
|
||||
return Err!("Failed to resolve room alias {room} to a room ID: {e}");
|
||||
},
|
||||
}
|
||||
},
|
||||
|
@ -439,19 +434,20 @@ async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventC
|
|||
|
||||
room_id
|
||||
} else {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Room specified is not a room ID or room alias. Please note that this requires a \
|
||||
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||
(`#roomalias:example.com`)",
|
||||
));
|
||||
);
|
||||
};
|
||||
|
||||
self.services.rooms.metadata.disable_room(&room_id, false);
|
||||
Ok(RoomMessageEventContent::text_plain("Room unbanned and federation re-enabled."))
|
||||
self.write_str("Room unbanned and federation re-enabled.")
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventContent> {
|
||||
async fn list_banned_rooms(&self, no_details: bool) -> Result {
|
||||
let room_ids: Vec<OwnedRoomId> = self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -462,7 +458,7 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventCo
|
|||
.await;
|
||||
|
||||
if room_ids.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("No rooms are banned."));
|
||||
return Err!("No rooms are banned.");
|
||||
}
|
||||
|
||||
let mut rooms = room_ids
|
||||
|
@ -475,19 +471,20 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventCo
|
|||
rooms.sort_by_key(|r| r.1);
|
||||
rooms.reverse();
|
||||
|
||||
let output_plain = format!(
|
||||
"Rooms Banned ({}):\n```\n{}\n```",
|
||||
rooms.len(),
|
||||
rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| if no_details {
|
||||
let num = rooms.len();
|
||||
|
||||
let body = rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| {
|
||||
if no_details {
|
||||
format!("{id}")
|
||||
} else {
|
||||
format!("{id}\tMembers: {members}\tName: {name}")
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
);
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
use std::{fmt::Write, path::PathBuf, sync::Arc};
|
||||
|
||||
use conduwuit::{Err, Result, info, utils::time, warn};
|
||||
use ruma::events::room::message::RoomMessageEventContent;
|
||||
use conduwuit::{
|
||||
Err, Result, info,
|
||||
utils::{stream::IterStream, time},
|
||||
warn,
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
|
||||
use crate::admin_command;
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn uptime(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn uptime(&self) -> Result {
|
||||
let elapsed = self
|
||||
.services
|
||||
.server
|
||||
|
@ -15,47 +19,36 @@ pub(super) async fn uptime(&self) -> Result<RoomMessageEventContent> {
|
|||
.expect("standard duration");
|
||||
|
||||
let result = time::pretty(elapsed);
|
||||
Ok(RoomMessageEventContent::notice_plain(format!("{result}.")))
|
||||
self.write_str(&format!("{result}.")).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn show_config(&self) -> Result<RoomMessageEventContent> {
|
||||
// Construct and send the response
|
||||
Ok(RoomMessageEventContent::text_markdown(format!(
|
||||
"{}",
|
||||
*self.services.server.config
|
||||
)))
|
||||
pub(super) async fn show_config(&self) -> Result {
|
||||
self.write_str(&format!("{}", *self.services.server.config))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn reload_config(
|
||||
&self,
|
||||
path: Option<PathBuf>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn reload_config(&self, path: Option<PathBuf>) -> Result {
|
||||
let path = path.as_deref().into_iter();
|
||||
self.services.config.reload(path)?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain("Successfully reconfigured."))
|
||||
self.write_str("Successfully reconfigured.").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_features(
|
||||
&self,
|
||||
available: bool,
|
||||
enabled: bool,
|
||||
comma: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result {
|
||||
let delim = if comma { "," } else { " " };
|
||||
if enabled && !available {
|
||||
let features = info::rustc::features().join(delim);
|
||||
let out = format!("`\n{features}\n`");
|
||||
return Ok(RoomMessageEventContent::text_markdown(out));
|
||||
return self.write_str(&out).await;
|
||||
}
|
||||
|
||||
if available && !enabled {
|
||||
let features = info::cargo::features().join(delim);
|
||||
let out = format!("`\n{features}\n`");
|
||||
return Ok(RoomMessageEventContent::text_markdown(out));
|
||||
return self.write_str(&out).await;
|
||||
}
|
||||
|
||||
let mut features = String::new();
|
||||
|
@ -68,77 +61,76 @@ pub(super) async fn list_features(
|
|||
writeln!(features, "{emoji} {feature} {remark}")?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_markdown(features))
|
||||
self.write_str(&features).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn memory_usage(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn memory_usage(&self) -> Result {
|
||||
let services_usage = self.services.memory_usage().await?;
|
||||
let database_usage = self.services.db.db.memory_usage()?;
|
||||
let allocator_usage =
|
||||
conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}"));
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
self.write_str(&format!(
|
||||
"Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}",
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn clear_caches(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn clear_caches(&self) -> Result {
|
||||
self.services.clear_cache().await;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain("Done."))
|
||||
self.write_str("Done.").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_backups(&self) -> Result<RoomMessageEventContent> {
|
||||
let result = self.services.db.db.backup_list()?;
|
||||
|
||||
if result.is_empty() {
|
||||
Ok(RoomMessageEventContent::text_plain("No backups found."))
|
||||
} else {
|
||||
Ok(RoomMessageEventContent::text_plain(result))
|
||||
}
|
||||
pub(super) async fn list_backups(&self) -> Result {
|
||||
self.services
|
||||
.db
|
||||
.db
|
||||
.backup_list()?
|
||||
.try_stream()
|
||||
.try_for_each(|result| write!(self, "{result}"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn backup_database(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn backup_database(&self) -> Result {
|
||||
let db = Arc::clone(&self.services.db);
|
||||
let mut result = self
|
||||
let result = self
|
||||
.services
|
||||
.server
|
||||
.runtime()
|
||||
.spawn_blocking(move || match db.db.backup() {
|
||||
| Ok(()) => String::new(),
|
||||
| Err(e) => e.to_string(),
|
||||
| Ok(()) => "Done".to_owned(),
|
||||
| Err(e) => format!("Failed: {e}"),
|
||||
})
|
||||
.await?;
|
||||
|
||||
if result.is_empty() {
|
||||
result = self.services.db.db.backup_list()?;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(result))
|
||||
let count = self.services.db.db.backup_count()?;
|
||||
self.write_str(&format!("{result}. Currently have {count} backups."))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result {
|
||||
let message = message.join(" ");
|
||||
self.services.admin.send_text(&message).await;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain("Notice was sent to #admins"))
|
||||
self.write_str("Notice was sent to #admins").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn reload_mods(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn reload_mods(&self) -> Result {
|
||||
self.services.server.reload()?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain("Reloading server..."))
|
||||
self.write_str("Reloading server...").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
#[cfg(unix)]
|
||||
pub(super) async fn restart(&self, force: bool) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn restart(&self, force: bool) -> Result {
|
||||
use conduwuit::utils::sys::current_exe_deleted;
|
||||
|
||||
if !force && current_exe_deleted() {
|
||||
|
@ -150,13 +142,13 @@ pub(super) async fn restart(&self, force: bool) -> Result<RoomMessageEventConten
|
|||
|
||||
self.services.server.restart()?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain("Restarting server..."))
|
||||
self.write_str("Restarting server...").await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn shutdown(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn shutdown(&self) -> Result {
|
||||
warn!("shutdown command");
|
||||
self.services.server.shutdown()?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_plain("Shutting down server..."))
|
||||
self.write_str("Shutting down server...").await
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ pub(super) enum ServerCommand {
|
|||
/// - Print database memory usage statistics
|
||||
MemoryUsage,
|
||||
|
||||
/// - Clears all of Conduwuit's caches
|
||||
/// - Clears all of Continuwuity's caches
|
||||
ClearCaches,
|
||||
|
||||
/// - Performs an online backup of the database (only available for RocksDB
|
||||
|
|
|
@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _};
|
|||
|
||||
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
|
||||
use conduwuit::{
|
||||
Result, debug, debug_warn, error, info, is_equal_to,
|
||||
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
||||
matrix::pdu::PduBuilder,
|
||||
utils::{self, ReadyExt},
|
||||
warn,
|
||||
|
@ -10,11 +10,10 @@ use conduwuit::{
|
|||
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId,
|
||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
||||
events::{
|
||||
RoomAccountDataEventType, StateEventType,
|
||||
room::{
|
||||
message::RoomMessageEventContent,
|
||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||
redaction::RoomRedactionEventContent,
|
||||
},
|
||||
|
@ -31,7 +30,7 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25;
|
|||
const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin.";
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_users(&self) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn list_users(&self) -> Result {
|
||||
let users: Vec<_> = self
|
||||
.services
|
||||
.users
|
||||
|
@ -44,30 +43,22 @@ pub(super) async fn list_users(&self) -> Result<RoomMessageEventContent> {
|
|||
plain_msg += users.join("\n").as_str();
|
||||
plain_msg += "\n```";
|
||||
|
||||
self.write_str(plain_msg.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
self.write_str(&plain_msg).await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn create_user(
|
||||
&self,
|
||||
username: String,
|
||||
password: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn create_user(&self, username: String, password: Option<String>) -> Result {
|
||||
// Validate user id
|
||||
let user_id = parse_local_user_id(self.services, &username)?;
|
||||
|
||||
if let Err(e) = user_id.validate_strict() {
|
||||
if self.services.config.emergency_password.is_none() {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Username {user_id} contains disallowed characters or spaces: {e}"
|
||||
)));
|
||||
return Err!("Username {user_id} contains disallowed characters or spaces: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
if self.services.users.exists(&user_id).await {
|
||||
return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists")));
|
||||
return Err!("User {user_id} already exists");
|
||||
}
|
||||
|
||||
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
||||
|
@ -89,8 +80,7 @@ pub(super) async fn create_user(
|
|||
.new_user_displayname_suffix
|
||||
.is_empty()
|
||||
{
|
||||
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)
|
||||
.expect("should be able to write to string buffer");
|
||||
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?;
|
||||
}
|
||||
|
||||
self.services
|
||||
|
@ -110,15 +100,17 @@ pub(super) async fn create_user(
|
|||
content: ruma::events::push_rules::PushRulesEventContent {
|
||||
global: ruma::push::Ruleset::server_default(&user_id),
|
||||
},
|
||||
})
|
||||
.expect("to json value always works"),
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !self.services.server.config.auto_join_rooms.is_empty() {
|
||||
for room in &self.services.server.config.auto_join_rooms {
|
||||
let Ok(room_id) = self.services.rooms.alias.resolve(room).await else {
|
||||
error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping");
|
||||
error!(
|
||||
%user_id,
|
||||
"Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"
|
||||
);
|
||||
continue;
|
||||
};
|
||||
|
||||
|
@ -154,18 +146,17 @@ pub(super) async fn create_user(
|
|||
info!("Automatically joined room {room} for user {user_id}");
|
||||
},
|
||||
| Err(e) => {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed to automatically join room {room} for user {user_id}: \
|
||||
{e}"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
// don't return this error so we don't fail registrations
|
||||
error!(
|
||||
"Failed to automatically join room {room} for user {user_id}: {e}"
|
||||
);
|
||||
self.services
|
||||
.admin
|
||||
.send_text(&format!(
|
||||
"Failed to automatically join room {room} for user {user_id}: \
|
||||
{e}"
|
||||
))
|
||||
.await;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -192,25 +183,18 @@ pub(super) async fn create_user(
|
|||
debug!("create_user admin command called without an admin room being available");
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Created user with user_id: {user_id} and password: `{password}`"
|
||||
)))
|
||||
self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn deactivate(
|
||||
&self,
|
||||
no_leave_rooms: bool,
|
||||
user_id: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result {
|
||||
// Validate user id
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
|
||||
// don't deactivate the server service account
|
||||
if user_id == self.services.globals.server_user {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Not allowed to deactivate the server service account.",
|
||||
));
|
||||
return Err!("Not allowed to deactivate the server service account.",);
|
||||
}
|
||||
|
||||
self.services.users.deactivate_account(&user_id).await?;
|
||||
|
@ -218,11 +202,8 @@ pub(super) async fn deactivate(
|
|||
if !no_leave_rooms {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
"Making {user_id} leave all rooms after deactivation..."
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
.send_text(&format!("Making {user_id} leave all rooms after deactivation..."))
|
||||
.await;
|
||||
|
||||
let all_joined_rooms: Vec<OwnedRoomId> = self
|
||||
.services
|
||||
|
@ -239,24 +220,19 @@ pub(super) async fn deactivate(
|
|||
leave_all_rooms(self.services, &user_id).await;
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"User {user_id} has been deactivated"
|
||||
)))
|
||||
self.write_str(&format!("User {user_id} has been deactivated"))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn reset_password(
|
||||
&self,
|
||||
username: String,
|
||||
password: Option<String>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn reset_password(&self, username: String, password: Option<String>) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &username)?;
|
||||
|
||||
if user_id == self.services.globals.server_user {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
return Err!(
|
||||
"Not allowed to set the password for the server account. Please use the emergency \
|
||||
password config option.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
||||
|
@ -266,28 +242,20 @@ pub(super) async fn reset_password(
|
|||
.users
|
||||
.set_password(&user_id, Some(new_password.as_str()))
|
||||
{
|
||||
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Successfully reset the password for user {user_id}: `{new_password}`"
|
||||
))),
|
||||
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Couldn't reset the password for user {user_id}: {e}"
|
||||
))),
|
||||
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
||||
| Ok(()) =>
|
||||
write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn deactivate_all(
|
||||
&self,
|
||||
no_leave_rooms: bool,
|
||||
force: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.",);
|
||||
}
|
||||
|
||||
let usernames = self
|
||||
|
@ -301,15 +269,23 @@ pub(super) async fn deactivate_all(
|
|||
|
||||
for username in usernames {
|
||||
match parse_active_local_user_id(self.services, username).await {
|
||||
| Err(e) => {
|
||||
self.services
|
||||
.admin
|
||||
.send_text(&format!("{username} is not a valid username, skipping over: {e}"))
|
||||
.await;
|
||||
|
||||
continue;
|
||||
},
|
||||
| Ok(user_id) => {
|
||||
if self.services.users.is_admin(&user_id).await && !force {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
.send_text(&format!(
|
||||
"{username} is an admin and --force is not set, skipping over"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
))
|
||||
.await;
|
||||
|
||||
admins.push(username);
|
||||
continue;
|
||||
}
|
||||
|
@ -318,26 +294,16 @@ pub(super) async fn deactivate_all(
|
|||
if user_id == self.services.globals.server_user {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
.send_text(&format!(
|
||||
"{username} is the server service account, skipping over"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
))
|
||||
.await;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
user_ids.push(user_id);
|
||||
},
|
||||
| Err(e) => {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
"{username} is not a valid username, skipping over: {e}"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
continue;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -345,6 +311,12 @@ pub(super) async fn deactivate_all(
|
|||
|
||||
for user_id in user_ids {
|
||||
match self.services.users.deactivate_account(&user_id).await {
|
||||
| Err(e) => {
|
||||
self.services
|
||||
.admin
|
||||
.send_text(&format!("Failed deactivating user: {e}"))
|
||||
.await;
|
||||
},
|
||||
| Ok(()) => {
|
||||
deactivation_count = deactivation_count.saturating_add(1);
|
||||
if !no_leave_rooms {
|
||||
|
@ -365,33 +337,24 @@ pub(super) async fn deactivate_all(
|
|||
leave_all_rooms(self.services, &user_id).await;
|
||||
}
|
||||
},
|
||||
| Err(e) => {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
"Failed deactivating user: {e}"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if admins.is_empty() {
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
"Deactivated {deactivation_count} accounts."
|
||||
)))
|
||||
write!(self, "Deactivated {deactivation_count} accounts.")
|
||||
} else {
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
write!(
|
||||
self,
|
||||
"Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \
|
||||
--force to deactivate admin accounts",
|
||||
admins.join(", ")
|
||||
)))
|
||||
)
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
|
||||
// Validate user id
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
|
||||
|
@ -405,23 +368,20 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result<RoomMess
|
|||
.await;
|
||||
|
||||
if rooms.is_empty() {
|
||||
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
|
||||
return Err!("User is not in any rooms.");
|
||||
}
|
||||
|
||||
rooms.sort_by_key(|r| r.1);
|
||||
rooms.reverse();
|
||||
|
||||
let output_plain = format!(
|
||||
"Rooms {user_id} Joined ({}):\n```\n{}\n```",
|
||||
rooms.len(),
|
||||
rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
);
|
||||
let body = rooms
|
||||
.iter()
|
||||
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -429,27 +389,23 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
&self,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
yes_i_want_to_do_this: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
if self.body.len() < 2
|
||||
|| !self.body[0].trim().starts_with("```")
|
||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||
{
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"Expected code block in command body. Add --help for details.",
|
||||
));
|
||||
return Err!("Expected code block in command body. Add --help for details.",);
|
||||
}
|
||||
|
||||
if !yes_i_want_to_do_this {
|
||||
return Ok(RoomMessageEventContent::notice_markdown(
|
||||
return Err!(
|
||||
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
|
||||
bulk join all specified local users.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
|
||||
return Ok(RoomMessageEventContent::notice_markdown(
|
||||
"There is not an admin room to check for server admins.",
|
||||
));
|
||||
return Err!("There is not an admin room to check for server admins.",);
|
||||
};
|
||||
|
||||
let (room_id, servers) = self
|
||||
|
@ -466,7 +422,7 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
.server_in_room(self.services.globals.server_name(), &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room."));
|
||||
return Err!("We are not joined in this room.");
|
||||
}
|
||||
|
||||
let server_admins: Vec<_> = self
|
||||
|
@ -486,9 +442,7 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::notice_markdown(
|
||||
"There is not a single server admin in the room.",
|
||||
));
|
||||
return Err!("There is not a single server admin in the room.",);
|
||||
}
|
||||
|
||||
let usernames = self
|
||||
|
@ -506,11 +460,11 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
if user_id == self.services.globals.server_user {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
.send_text(&format!(
|
||||
"{username} is the server service account, skipping over"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
))
|
||||
.await;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -519,11 +473,9 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
| Err(e) => {
|
||||
self.services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
"{username} is not a valid username, skipping over: {e}"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
.send_text(&format!("{username} is not a valid username, skipping over: {e}"))
|
||||
.await;
|
||||
|
||||
continue;
|
||||
},
|
||||
}
|
||||
|
@ -554,10 +506,11 @@ pub(super) async fn force_join_list_of_local_users(
|
|||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
self.write_str(&format!(
|
||||
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
|
||||
failed.",
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -565,18 +518,16 @@ pub(super) async fn force_join_all_local_users(
|
|||
&self,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
yes_i_want_to_do_this: bool,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
if !yes_i_want_to_do_this {
|
||||
return Ok(RoomMessageEventContent::notice_markdown(
|
||||
return Err!(
|
||||
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
|
||||
bulk join all local users.",
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
|
||||
return Ok(RoomMessageEventContent::notice_markdown(
|
||||
"There is not an admin room to check for server admins.",
|
||||
));
|
||||
return Err!("There is not an admin room to check for server admins.",);
|
||||
};
|
||||
|
||||
let (room_id, servers) = self
|
||||
|
@ -593,7 +544,7 @@ pub(super) async fn force_join_all_local_users(
|
|||
.server_in_room(self.services.globals.server_name(), &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room."));
|
||||
return Err!("We are not joined in this room.");
|
||||
}
|
||||
|
||||
let server_admins: Vec<_> = self
|
||||
|
@ -613,9 +564,7 @@ pub(super) async fn force_join_all_local_users(
|
|||
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::notice_markdown(
|
||||
"There is not a single server admin in the room.",
|
||||
));
|
||||
return Err!("There is not a single server admin in the room.",);
|
||||
}
|
||||
|
||||
let mut failed_joins: usize = 0;
|
||||
|
@ -650,10 +599,11 @@ pub(super) async fn force_join_all_local_users(
|
|||
}
|
||||
}
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
self.write_str(&format!(
|
||||
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
|
||||
failed.",
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -661,7 +611,7 @@ pub(super) async fn force_join_room(
|
|||
&self,
|
||||
user_id: String,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
let (room_id, servers) = self
|
||||
.services
|
||||
|
@ -677,9 +627,8 @@ pub(super) async fn force_join_room(
|
|||
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"{user_id} has been joined to {room_id}.",
|
||||
)))
|
||||
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
|
@ -687,7 +636,7 @@ pub(super) async fn force_leave_room(
|
|||
&self,
|
||||
user_id: String,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||
|
||||
|
@ -703,24 +652,17 @@ pub(super) async fn force_leave_room(
|
|||
.is_joined(&user_id, &room_id)
|
||||
.await
|
||||
{
|
||||
return Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"{user_id} is not joined in the room"
|
||||
)));
|
||||
return Err!("{user_id} is not joined in the room");
|
||||
}
|
||||
|
||||
leave_room(self.services, &user_id, &room_id, None).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"{user_id} has left {room_id}.",
|
||||
)))
|
||||
self.write_str(&format!("{user_id} has left {room_id}.",))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn force_demote(
|
||||
&self,
|
||||
user_id: String,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||
|
||||
|
@ -731,15 +673,11 @@ pub(super) async fn force_demote(
|
|||
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
|
||||
|
||||
let room_power_levels = self
|
||||
let room_power_levels: Option<RoomPowerLevelsEventContent> = self
|
||||
.services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.room_state_get_content::<RoomPowerLevelsEventContent>(
|
||||
&room_id,
|
||||
&StateEventType::RoomPowerLevels,
|
||||
"",
|
||||
)
|
||||
.room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "")
|
||||
.await
|
||||
.ok();
|
||||
|
||||
|
@ -757,9 +695,7 @@ pub(super) async fn force_demote(
|
|||
.is_ok_and(|event| event.sender == user_id);
|
||||
|
||||
if !user_can_demote_self {
|
||||
return Ok(RoomMessageEventContent::notice_markdown(
|
||||
"User is not allowed to modify their own power levels in the room.",
|
||||
));
|
||||
return Err!("User is not allowed to modify their own power levels in the room.",);
|
||||
}
|
||||
|
||||
let mut power_levels_content = room_power_levels.unwrap_or_default();
|
||||
|
@ -777,34 +713,34 @@ pub(super) async fn force_demote(
|
|||
)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
self.write_str(&format!(
|
||||
"User {user_id} demoted themselves to the room default power level in {room_id} - \
|
||||
{event_id}"
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn make_user_admin(&self, user_id: String) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
|
||||
assert!(
|
||||
self.services.globals.user_is_local(&user_id),
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
|
||||
self.services.admin.make_user_admin(&user_id).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"{user_id} has been granted admin privileges.",
|
||||
)))
|
||||
self.write_str(&format!("{user_id} has been granted admin privileges.",))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn put_room_tag(
|
||||
&self,
|
||||
user_id: String,
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
tag: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
||||
|
||||
let mut tags_event = self
|
||||
|
@ -831,18 +767,19 @@ pub(super) async fn put_room_tag(
|
|||
)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
self.write_str(&format!(
|
||||
"Successfully updated room account data for {user_id} and room {room_id} with tag {tag}"
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn delete_room_tag(
|
||||
&self,
|
||||
user_id: String,
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
tag: String,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
) -> Result {
|
||||
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
||||
|
||||
let mut tags_event = self
|
||||
|
@ -866,18 +803,15 @@ pub(super) async fn delete_room_tag(
|
|||
)
|
||||
.await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(format!(
|
||||
self.write_str(&format!(
|
||||
"Successfully updated room account data for {user_id} and room {room_id}, deleting room \
|
||||
tag {tag}"
|
||||
)))
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn get_room_tags(
|
||||
&self,
|
||||
user_id: String,
|
||||
room_id: Box<RoomId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result {
|
||||
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
||||
|
||||
let tags_event = self
|
||||
|
@ -889,17 +823,12 @@ pub(super) async fn get_room_tags(
|
|||
content: TagEventContent { tags: BTreeMap::new() },
|
||||
});
|
||||
|
||||
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||
"```\n{:#?}\n```",
|
||||
tags_event.content.tags
|
||||
)))
|
||||
self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn redact_event(
|
||||
&self,
|
||||
event_id: Box<EventId>,
|
||||
) -> Result<RoomMessageEventContent> {
|
||||
pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||
let Ok(event) = self
|
||||
.services
|
||||
.rooms
|
||||
|
@ -907,20 +836,18 @@ pub(super) async fn redact_event(
|
|||
.get_non_outlier_pdu(&event_id)
|
||||
.await
|
||||
else {
|
||||
return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database."));
|
||||
return Err!("Event does not exist in our database.");
|
||||
};
|
||||
|
||||
if event.is_redacted() {
|
||||
return Ok(RoomMessageEventContent::text_plain("Event is already redacted."));
|
||||
return Err!("Event is already redacted.");
|
||||
}
|
||||
|
||||
let room_id = event.room_id;
|
||||
let sender_user = event.sender;
|
||||
|
||||
if !self.services.globals.user_is_local(&sender_user) {
|
||||
return Ok(RoomMessageEventContent::text_plain(
|
||||
"This command only works on local users.",
|
||||
));
|
||||
return Err!("This command only works on local users.");
|
||||
}
|
||||
|
||||
let reason = format!(
|
||||
|
@ -949,9 +876,8 @@ pub(super) async fn redact_event(
|
|||
.await?
|
||||
};
|
||||
|
||||
let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}");
|
||||
|
||||
self.write_str(out.as_str()).await?;
|
||||
|
||||
Ok(RoomMessageEventContent::text_plain(""))
|
||||
self.write_str(&format!(
|
||||
"Successfully redacted event. Redaction event ID: {redaction_event_id}"
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ mod commands;
|
|||
|
||||
use clap::Subcommand;
|
||||
use conduwuit::Result;
|
||||
use ruma::{EventId, OwnedRoomOrAliasId, RoomId};
|
||||
use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId};
|
||||
|
||||
use crate::admin_command_dispatch;
|
||||
|
||||
|
@ -102,21 +102,21 @@ pub(super) enum UserCommand {
|
|||
/// room's internal ID, and the tag name `m.server_notice`.
|
||||
PutRoomTag {
|
||||
user_id: String,
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
tag: String,
|
||||
},
|
||||
|
||||
/// - Deletes the room tag for the specified user and room ID
|
||||
DeleteRoomTag {
|
||||
user_id: String,
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
tag: String,
|
||||
},
|
||||
|
||||
/// - Gets all the room tags for the specified user and room ID
|
||||
GetRoomTags {
|
||||
user_id: String,
|
||||
room_id: Box<RoomId>,
|
||||
room_id: OwnedRoomId,
|
||||
},
|
||||
|
||||
/// - Attempts to forcefully redact the specified event ID from the sender
|
||||
|
@ -124,7 +124,7 @@ pub(super) enum UserCommand {
|
|||
///
|
||||
/// This is only valid for local users
|
||||
RedactEvent {
|
||||
event_id: Box<EventId>,
|
||||
event_id: OwnedEventId,
|
||||
},
|
||||
|
||||
/// - Force joins a specified list of local users to join the specified
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#![allow(dead_code)]
|
||||
|
||||
use conduwuit_core::{Err, Result, err};
|
||||
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||
use service::Services;
|
||||
|
|
|
@ -17,21 +17,50 @@ crate-type = [
|
|||
]
|
||||
|
||||
[features]
|
||||
element_hacks = []
|
||||
release_max_log_level = [
|
||||
"tracing/max_level_trace",
|
||||
"tracing/release_max_level_info",
|
||||
"log/max_level_trace",
|
||||
"log/release_max_level_info",
|
||||
brotli_compression = [
|
||||
"conduwuit-core/brotli_compression",
|
||||
"conduwuit-service/brotli_compression",
|
||||
"reqwest/brotli",
|
||||
]
|
||||
zstd_compression = [
|
||||
"reqwest/zstd",
|
||||
element_hacks = [
|
||||
"conduwuit-service/element_hacks",
|
||||
]
|
||||
gzip_compression = [
|
||||
"conduwuit-core/gzip_compression",
|
||||
"conduwuit-service/gzip_compression",
|
||||
"reqwest/gzip",
|
||||
]
|
||||
brotli_compression = [
|
||||
"reqwest/brotli",
|
||||
io_uring = [
|
||||
"conduwuit-service/io_uring",
|
||||
]
|
||||
jemalloc = [
|
||||
"conduwuit-core/jemalloc",
|
||||
"conduwuit-service/jemalloc",
|
||||
]
|
||||
jemalloc_conf = [
|
||||
"conduwuit-core/jemalloc_conf",
|
||||
"conduwuit-service/jemalloc_conf",
|
||||
]
|
||||
jemalloc_prof = [
|
||||
"conduwuit-core/jemalloc_prof",
|
||||
"conduwuit-service/jemalloc_prof",
|
||||
]
|
||||
jemalloc_stats = [
|
||||
"conduwuit-core/jemalloc_stats",
|
||||
"conduwuit-service/jemalloc_stats",
|
||||
]
|
||||
release_max_log_level = [
|
||||
"conduwuit-core/release_max_log_level",
|
||||
"conduwuit-service/release_max_log_level",
|
||||
"log/max_level_trace",
|
||||
"log/release_max_level_info",
|
||||
"tracing/max_level_trace",
|
||||
"tracing/release_max_level_info",
|
||||
]
|
||||
zstd_compression = [
|
||||
"conduwuit-core/zstd_compression",
|
||||
"conduwuit-service/zstd_compression",
|
||||
"reqwest/zstd",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
|
@ -42,7 +71,6 @@ axum.workspace = true
|
|||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
conduwuit-core.workspace = true
|
||||
conduwuit-database.workspace = true
|
||||
conduwuit-service.workspace = true
|
||||
const-str.workspace = true
|
||||
futures.workspace = true
|
||||
|
|
|
@ -52,13 +52,8 @@ pub(crate) async fn get_public_rooms_filtered_route(
|
|||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
||||
if let Some(server) = &body.server {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_room_directory_server_names
|
||||
.is_match(server.host())
|
||||
|| services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
.moderation
|
||||
.is_remote_server_room_directory_forbidden(server)
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
@ -92,15 +87,7 @@ pub(crate) async fn get_public_rooms_route(
|
|||
body: Ruma<get_public_rooms::v3::Request>,
|
||||
) -> Result<get_public_rooms::v3::Response> {
|
||||
if let Some(server) = &body.server {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_room_directory_server_names
|
||||
.is_match(server.host())
|
||||
|| services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(server) {
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::{
|
||||
borrow::Borrow,
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
collections::{HashMap, HashSet},
|
||||
iter::once,
|
||||
net::IpAddr,
|
||||
sync::Arc,
|
||||
|
@ -9,7 +9,7 @@ use std::{
|
|||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Result, at, debug, debug_info, debug_warn, err, error, info,
|
||||
Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching,
|
||||
matrix::{
|
||||
StateKey,
|
||||
pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json},
|
||||
|
@ -17,7 +17,12 @@ use conduwuit::{
|
|||
},
|
||||
result::{FlatOk, NotFound},
|
||||
trace,
|
||||
utils::{self, IterStream, ReadyExt, shuffle},
|
||||
utils::{
|
||||
self, FutureBoolExt,
|
||||
future::ReadyEqExt,
|
||||
shuffle,
|
||||
stream::{BroadbandExt, IterStream, ReadyExt},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::{
|
||||
|
@ -28,7 +33,7 @@ use conduwuit_service::{
|
|||
state_compressor::{CompressedState, HashSetCompressStateEvent},
|
||||
},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
OwnedUserId, RoomId, RoomVersionId, ServerName, UserId,
|
||||
|
@ -52,7 +57,6 @@ use ruma::{
|
|||
room::{
|
||||
join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent},
|
||||
member::{MembershipState, RoomMemberEventContent},
|
||||
message::RoomMessageEventContent,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@ -79,9 +83,8 @@ async fn banned_room_check(
|
|||
if let Some(room_id) = room_id {
|
||||
if services.rooms.metadata.is_banned(room_id).await
|
||||
|| services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(room_id.server_name().unwrap().host())
|
||||
.moderation
|
||||
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid"))
|
||||
{
|
||||
warn!(
|
||||
"User {user_id} who is not an admin attempted to send an invite for or \
|
||||
|
@ -96,12 +99,11 @@ async fn banned_room_check(
|
|||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
.send_text(&format!(
|
||||
"Automatically deactivating user {user_id} due to attempted banned \
|
||||
room join from IP {client_ip}"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
||||
|
@ -136,12 +138,11 @@ async fn banned_room_check(
|
|||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||
.send_text(&format!(
|
||||
"Automatically deactivating user {user_id} due to attempted banned \
|
||||
room join from IP {client_ip}"
|
||||
)))
|
||||
.await
|
||||
.ok();
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
||||
|
@ -366,10 +367,10 @@ pub(crate) async fn knock_room_route(
|
|||
InsecureClientIp(client): InsecureClientIp,
|
||||
body: Ruma<knock_room::v3::Request>,
|
||||
) -> Result<knock_room::v3::Response> {
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let body = body.body;
|
||||
let sender_user = body.sender_user();
|
||||
let body = &body.body;
|
||||
|
||||
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
|
||||
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) {
|
||||
| Ok(room_id) => {
|
||||
banned_room_check(
|
||||
&services,
|
||||
|
@ -493,7 +494,7 @@ pub(crate) async fn invite_user_route(
|
|||
let sender_user = body.sender_user();
|
||||
|
||||
if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites {
|
||||
info!(
|
||||
debug_error!(
|
||||
"User {sender_user} is not an admin and attempted to send an invite to room {}",
|
||||
&body.room_id
|
||||
);
|
||||
|
@ -722,12 +723,10 @@ pub(crate) async fn forget_room_route(
|
|||
|
||||
let joined = services.rooms.state_cache.is_joined(user_id, room_id);
|
||||
let knocked = services.rooms.state_cache.is_knocked(user_id, room_id);
|
||||
let left = services.rooms.state_cache.is_left(user_id, room_id);
|
||||
let invited = services.rooms.state_cache.is_invited(user_id, room_id);
|
||||
|
||||
let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await;
|
||||
|
||||
if joined || knocked || invited {
|
||||
pin_mut!(joined, knocked, invited);
|
||||
if joined.or(knocked).or(invited).await {
|
||||
return Err!(Request(Unknown("You must leave the room before forgetting it")));
|
||||
}
|
||||
|
||||
|
@ -741,11 +740,11 @@ pub(crate) async fn forget_room_route(
|
|||
return Err!(Request(Unknown("No membership event was found, room was never joined")));
|
||||
}
|
||||
|
||||
if left
|
||||
|| membership.is_ok_and(|member| {
|
||||
member.membership == MembershipState::Leave
|
||||
|| member.membership == MembershipState::Ban
|
||||
}) {
|
||||
let non_membership = membership
|
||||
.map(|member| member.membership)
|
||||
.is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban));
|
||||
|
||||
if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await {
|
||||
services.rooms.state_cache.forget(room_id, user_id);
|
||||
}
|
||||
|
||||
|
@ -866,32 +865,32 @@ pub(crate) async fn joined_members_route(
|
|||
State(services): State<crate::State>,
|
||||
body: Ruma<joined_members::v3::Request>,
|
||||
) -> Result<joined_members::v3::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if !services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_state_events(sender_user, &body.room_id)
|
||||
.user_can_see_state_events(body.sender_user(), &body.room_id)
|
||||
.await
|
||||
{
|
||||
return Err!(Request(Forbidden("You don't have permission to view this room.")));
|
||||
}
|
||||
|
||||
let joined: BTreeMap<OwnedUserId, RoomMember> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&body.room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.then(|user| async move {
|
||||
(user.clone(), RoomMember {
|
||||
display_name: services.users.displayname(&user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(&user).await.ok(),
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
.await;
|
||||
Ok(joined_members::v3::Response {
|
||||
joined: services
|
||||
.rooms
|
||||
.state_cache
|
||||
.room_members(&body.room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.broad_then(|user_id| async move {
|
||||
let member = RoomMember {
|
||||
display_name: services.users.displayname(&user_id).await.ok(),
|
||||
avatar_url: services.users.avatar_url(&user_id).await.ok(),
|
||||
};
|
||||
|
||||
Ok(joined_members::v3::Response { joined })
|
||||
(user_id, member)
|
||||
})
|
||||
.collect()
|
||||
.await,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn join_room_by_id_helper(
|
||||
|
@ -1118,9 +1117,10 @@ async fn join_room_by_id_helper_remote(
|
|||
})?;
|
||||
|
||||
if signed_event_id != event_id {
|
||||
return Err!(Request(BadJson(
|
||||
warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID")
|
||||
)));
|
||||
return Err!(Request(BadJson(warn!(
|
||||
%signed_event_id, %event_id,
|
||||
"Server {remote_server} sent event with wrong event ID"
|
||||
))));
|
||||
}
|
||||
|
||||
match signed_value["signatures"]
|
||||
|
@ -1696,19 +1696,18 @@ pub(crate) async fn invite_helper(
|
|||
})?;
|
||||
|
||||
if pdu.event_id != event_id {
|
||||
return Err!(Request(BadJson(
|
||||
warn!(%pdu.event_id, %event_id, "Server {} sent event with wrong event ID", user_id.server_name())
|
||||
)));
|
||||
return Err!(Request(BadJson(warn!(
|
||||
%pdu.event_id, %event_id,
|
||||
"Server {} sent event with wrong event ID",
|
||||
user_id.server_name()
|
||||
))));
|
||||
}
|
||||
|
||||
let origin: OwnedServerName = serde_json::from_value(
|
||||
serde_json::to_value(
|
||||
value
|
||||
.get("origin")
|
||||
.ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?,
|
||||
)
|
||||
.expect("CanonicalJson is valid json value"),
|
||||
)
|
||||
let origin: OwnedServerName = serde_json::from_value(serde_json::to_value(
|
||||
value
|
||||
.get("origin")
|
||||
.ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?,
|
||||
)?)
|
||||
.map_err(|e| {
|
||||
err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}"))))
|
||||
})?;
|
||||
|
@ -1818,9 +1817,11 @@ pub async fn leave_room(
|
|||
blurhash: None,
|
||||
};
|
||||
|
||||
if services.rooms.metadata.is_banned(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
{
|
||||
let is_banned = services.rooms.metadata.is_banned(room_id);
|
||||
let is_disabled = services.rooms.metadata.is_disabled(room_id);
|
||||
|
||||
pin_mut!(is_banned, is_disabled);
|
||||
if is_banned.or(is_disabled).await {
|
||||
// the room is banned/disabled, the room must be rejected locally since we
|
||||
// cant/dont want to federate with this server
|
||||
services
|
||||
|
@ -1840,18 +1841,24 @@ pub async fn leave_room(
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
// Ask a remote server if we don't have this room and are not knocking on it
|
||||
if !services
|
||||
let dont_have_room = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.server_in_room(services.globals.server_name(), room_id)
|
||||
.await && !services
|
||||
.eq(&false);
|
||||
|
||||
let not_knocked = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.is_knocked(user_id, room_id)
|
||||
.await
|
||||
{
|
||||
if let Err(e) = remote_leave_room(services, user_id, room_id).await {
|
||||
.eq(&false);
|
||||
|
||||
// Ask a remote server if we don't have this room and are not knocking on it
|
||||
if dont_have_room.and(not_knocked).await {
|
||||
if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone())
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
warn!(%user_id, "Failed to leave room {room_id} remotely: {e}");
|
||||
// Don't tell the client about this error
|
||||
}
|
||||
|
@ -1936,6 +1943,7 @@ async fn remote_leave_room(
|
|||
services: &Services,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
) -> Result<()> {
|
||||
let mut make_leave_response_and_server =
|
||||
Err!(BadServerResponse("No remote server available to assist in leaving {room_id}."));
|
||||
|
@ -2052,6 +2060,12 @@ async fn remote_leave_room(
|
|||
.expect("Timestamp is valid js_int value"),
|
||||
),
|
||||
);
|
||||
// Inject the reason key into the event content dict if it exists
|
||||
if let Some(reason) = reason {
|
||||
if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") {
|
||||
content.insert("reason".to_owned(), CanonicalJsonValue::String(reason));
|
||||
}
|
||||
}
|
||||
|
||||
// room v3 and above removed the "event_id" field from remote PDU format
|
||||
match room_version_id {
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use core::panic;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Result, at,
|
||||
|
@ -132,8 +134,6 @@ pub(crate) async fn get_message_events_route(
|
|||
.take(limit)
|
||||
.collect()
|
||||
.await;
|
||||
// let appservice_id = body.appservice_info.map(|appservice|
|
||||
// appservice.registration.id);
|
||||
|
||||
let lazy_loading_context = lazy_loading::Context {
|
||||
user_id: sender_user,
|
||||
|
@ -143,7 +143,7 @@ pub(crate) async fn get_message_events_route(
|
|||
if let Some(registration) = body.appservice_info.as_ref() {
|
||||
<&DeviceId>::from(registration.registration.id.as_str())
|
||||
} else {
|
||||
<&DeviceId>::from("")
|
||||
panic!("No device_id provided and no appservice registration found, this should be unreachable");
|
||||
},
|
||||
},
|
||||
room_id,
|
||||
|
@ -274,12 +274,13 @@ pub(crate) async fn is_ignored_pdu(
|
|||
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok();
|
||||
|
||||
let ignored_server = services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(pdu.sender().server_name().host());
|
||||
.moderation
|
||||
.is_remote_server_ignored(pdu.sender().server_name());
|
||||
|
||||
if ignored_type
|
||||
&& (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await)
|
||||
&& (ignored_server
|
||||
|| (!services.config.send_messages_from_ignored_users_to_client
|
||||
&& services.users.user_is_ignored(&pdu.sender, user_id).await))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -107,7 +107,6 @@ pub(crate) async fn create_room_route(
|
|||
|
||||
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
|
||||
}
|
||||
|
||||
let _short_id = services
|
||||
.rooms
|
||||
.short
|
||||
|
@ -606,24 +605,42 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result<Own
|
|||
return Err(Error::BadRequest(ErrorKind::Unknown, "Custom room ID is forbidden."));
|
||||
}
|
||||
|
||||
if custom_room_id.contains(':') {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Custom room ID contained `:` which is not allowed. Please note that this expects a \
|
||||
localpart, not the full room ID.",
|
||||
));
|
||||
} else if custom_room_id.contains(char::is_whitespace) {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Custom room ID contained spaces which is not valid.",
|
||||
));
|
||||
}
|
||||
|
||||
let server_name = services.globals.server_name();
|
||||
let full_room_id = format!("!{custom_room_id}:{server_name}");
|
||||
|
||||
OwnedRoomId::parse(full_room_id)
|
||||
let mut room_id = custom_room_id.to_owned();
|
||||
if custom_room_id.contains(':') {
|
||||
if !custom_room_id.starts_with('!') {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Custom room ID contains an unexpected `:` which is not allowed.",
|
||||
));
|
||||
}
|
||||
} else if custom_room_id.starts_with('!') {
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Room ID is prefixed with !, but is not fully qualified. You likely did not want \
|
||||
this.",
|
||||
));
|
||||
} else {
|
||||
room_id = format!("!{custom_room_id}:{server_name}");
|
||||
}
|
||||
OwnedRoomId::parse(room_id)
|
||||
.map_err(Into::into)
|
||||
.inspect(|full_room_id| debug_info!(?full_room_id, "Full custom room ID"))
|
||||
.and_then(|full_room_id| {
|
||||
if full_room_id
|
||||
.server_name()
|
||||
.expect("failed to extract server name from room ID")
|
||||
!= server_name
|
||||
{
|
||||
Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Custom room ID must be on this server.",
|
||||
))
|
||||
} else {
|
||||
Ok(full_room_id)
|
||||
}
|
||||
})
|
||||
.inspect(|full_room_id| {
|
||||
debug_info!(?full_room_id, "Full custom room ID");
|
||||
})
|
||||
.inspect_err(|e| warn!(?e, ?custom_room_id, "Failed to create room with custom room ID",))
|
||||
}
|
||||
|
|
|
@ -5,16 +5,12 @@ mod v5;
|
|||
use conduwuit::{
|
||||
Error, PduCount, Result,
|
||||
matrix::pdu::PduEvent,
|
||||
utils::{
|
||||
IterStream,
|
||||
stream::{BroadbandExt, ReadyExt, TryIgnore},
|
||||
},
|
||||
utils::stream::{BroadbandExt, ReadyExt, TryIgnore},
|
||||
};
|
||||
use conduwuit_service::Services;
|
||||
use futures::{StreamExt, pin_mut};
|
||||
use ruma::{
|
||||
RoomId, UserId,
|
||||
directory::RoomTypeFilter,
|
||||
events::TimelineEventType::{
|
||||
self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker,
|
||||
},
|
||||
|
@ -87,33 +83,3 @@ async fn share_encrypted_room(
|
|||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn filter_rooms<'a>(
|
||||
services: &Services,
|
||||
rooms: &[&'a RoomId],
|
||||
filter: &[RoomTypeFilter],
|
||||
negate: bool,
|
||||
) -> Vec<&'a RoomId> {
|
||||
rooms
|
||||
.iter()
|
||||
.stream()
|
||||
.filter_map(|r| async move {
|
||||
let room_type = services.rooms.state_accessor.get_room_type(r).await;
|
||||
|
||||
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let room_type_filter = RoomTypeFilter::from(room_type.ok());
|
||||
|
||||
let include = if negate {
|
||||
!filter.contains(&room_type_filter)
|
||||
} else {
|
||||
filter.is_empty() || filter.contains(&room_type_filter)
|
||||
};
|
||||
|
||||
include.then_some(r)
|
||||
})
|
||||
.collect()
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -14,8 +14,8 @@ use conduwuit::{
|
|||
pair_of, ref_at,
|
||||
result::FlatOk,
|
||||
utils::{
|
||||
self, BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
future::OptionStream,
|
||||
self, BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
future::{OptionStream, ReadyEqExt},
|
||||
math::ruma_from_u64,
|
||||
stream::{BroadbandExt, Tools, TryExpect, WidebandExt},
|
||||
},
|
||||
|
@ -32,6 +32,7 @@ use conduwuit_service::{
|
|||
use futures::{
|
||||
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
|
||||
future::{OptionFuture, join, join3, join4, join5, try_join, try_join4},
|
||||
pin_mut,
|
||||
};
|
||||
use ruma::{
|
||||
DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
|
||||
|
@ -433,10 +434,14 @@ async fn handle_left_room(
|
|||
return Ok(None);
|
||||
}
|
||||
|
||||
if !services.rooms.metadata.exists(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
|| services.rooms.metadata.is_banned(room_id).await
|
||||
{
|
||||
let is_not_found = services.rooms.metadata.exists(room_id).eq(&false);
|
||||
|
||||
let is_disabled = services.rooms.metadata.is_disabled(room_id);
|
||||
|
||||
let is_banned = services.rooms.metadata.is_banned(room_id);
|
||||
|
||||
pin_mut!(is_not_found, is_disabled, is_banned);
|
||||
if is_not_found.or(is_disabled).or(is_banned).await {
|
||||
// This is just a rejected invite, not a room we know
|
||||
// Insert a leave event anyways for the client
|
||||
let event = PduEvent {
|
||||
|
|
|
@ -6,23 +6,27 @@ use std::{
|
|||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Error, PduCount, PduEvent, Result, debug, error, extract_variant,
|
||||
Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant,
|
||||
matrix::TypeStateKey,
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::{
|
||||
Services,
|
||||
rooms::read_receipt::pack_receipts,
|
||||
sync::{into_db_key, into_snake_key},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
use ruma::{
|
||||
MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId,
|
||||
api::client::{
|
||||
error::ErrorKind,
|
||||
sync::sync_events::{
|
||||
self, DeviceLists, UnreadNotificationsCount,
|
||||
v4::{SlidingOp, SlidingSyncRoomHero},
|
||||
},
|
||||
api::client::sync::sync_events::{
|
||||
self, DeviceLists, UnreadNotificationsCount,
|
||||
v4::{SlidingOp, SlidingSyncRoomHero},
|
||||
},
|
||||
directory::RoomTypeFilter,
|
||||
events::{
|
||||
AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType,
|
||||
TimelineEventType::*,
|
||||
|
@ -31,15 +35,15 @@ use ruma::{
|
|||
serde::Raw,
|
||||
uint,
|
||||
};
|
||||
use service::rooms::read_receipt::pack_receipts;
|
||||
|
||||
use super::{load_timeline, share_encrypted_room};
|
||||
use crate::{
|
||||
Ruma,
|
||||
client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms},
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter},
|
||||
};
|
||||
|
||||
pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
||||
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
||||
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
||||
|
||||
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
||||
///
|
||||
|
@ -50,10 +54,11 @@ pub(crate) async fn sync_events_v4_route(
|
|||
) -> Result<sync_events::v4::Response> {
|
||||
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
|
||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||
let sender_device = body.sender_device.expect("user is authenticated");
|
||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||
let mut body = body.body;
|
||||
|
||||
// Setup watchers, so if there's no response, we can wait for them
|
||||
let watcher = services.sync.watch(sender_user, &sender_device);
|
||||
let watcher = services.sync.watch(sender_user, sender_device);
|
||||
|
||||
let next_batch = services.globals.next_count()?;
|
||||
|
||||
|
@ -68,33 +73,21 @@ pub(crate) async fn sync_events_v4_route(
|
|||
.and_then(|string| string.parse().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
if globalsince != 0
|
||||
&& !services
|
||||
.sync
|
||||
.remembered(sender_user.clone(), sender_device.clone(), conn_id.clone())
|
||||
{
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id.clone());
|
||||
if globalsince != 0 && !services.sync.remembered(&db_key) {
|
||||
debug!("Restarting sync stream because it was gone from the database");
|
||||
return Err(Error::Request(
|
||||
ErrorKind::UnknownPos,
|
||||
"Connection data lost since last time".into(),
|
||||
http::StatusCode::BAD_REQUEST,
|
||||
));
|
||||
return Err!(Request(UnknownPos("Connection data lost since last time")));
|
||||
}
|
||||
|
||||
if globalsince == 0 {
|
||||
services.sync.forget_sync_request_connection(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
conn_id.clone(),
|
||||
);
|
||||
services.sync.forget_sync_request_connection(&db_key);
|
||||
}
|
||||
|
||||
// Get sticky parameters from cache
|
||||
let known_rooms = services.sync.update_sync_request_with_cache(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
&mut body,
|
||||
);
|
||||
let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone());
|
||||
let known_rooms = services
|
||||
.sync
|
||||
.update_sync_request_with_cache(&snake_key, &mut body);
|
||||
|
||||
let all_joined_rooms: Vec<_> = services
|
||||
.rooms
|
||||
|
@ -136,7 +129,7 @@ pub(crate) async fn sync_events_v4_route(
|
|||
if body.extensions.to_device.enabled.unwrap_or(false) {
|
||||
services
|
||||
.users
|
||||
.remove_to_device_events(sender_user, &sender_device, globalsince)
|
||||
.remove_to_device_events(sender_user, sender_device, globalsince)
|
||||
.await;
|
||||
}
|
||||
|
||||
|
@ -261,7 +254,7 @@ pub(crate) async fn sync_events_v4_route(
|
|||
if let Some(Ok(user_id)) =
|
||||
pdu.state_key.as_deref().map(UserId::parse)
|
||||
{
|
||||
if user_id == *sender_user {
|
||||
if user_id == sender_user {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -299,7 +292,7 @@ pub(crate) async fn sync_events_v4_route(
|
|||
.state_cache
|
||||
.room_members(room_id)
|
||||
// Don't send key updates from the sender to the sender
|
||||
.ready_filter(|user_id| sender_user != user_id)
|
||||
.ready_filter(|&user_id| sender_user != user_id)
|
||||
// Only send keys if the sender doesn't share an encrypted room with the target
|
||||
// already
|
||||
.filter_map(|user_id| {
|
||||
|
@ -425,10 +418,9 @@ pub(crate) async fn sync_events_v4_route(
|
|||
});
|
||||
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id);
|
||||
services.sync.update_sync_known_rooms(
|
||||
sender_user,
|
||||
&sender_device,
|
||||
conn_id.clone(),
|
||||
&db_key,
|
||||
list_id.clone(),
|
||||
new_known_rooms,
|
||||
globalsince,
|
||||
|
@ -478,23 +470,20 @@ pub(crate) async fn sync_events_v4_route(
|
|||
}
|
||||
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id);
|
||||
services.sync.update_sync_known_rooms(
|
||||
sender_user,
|
||||
&sender_device,
|
||||
conn_id.clone(),
|
||||
&db_key,
|
||||
"subscriptions".to_owned(),
|
||||
known_subscription_rooms,
|
||||
globalsince,
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
services.sync.update_sync_subscriptions(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
conn_id.clone(),
|
||||
body.room_subscriptions,
|
||||
);
|
||||
if let Some(conn_id) = body.conn_id.clone() {
|
||||
let db_key = into_db_key(sender_user, sender_device, conn_id);
|
||||
services
|
||||
.sync
|
||||
.update_sync_subscriptions(&db_key, body.room_subscriptions);
|
||||
}
|
||||
|
||||
let mut rooms = BTreeMap::new();
|
||||
|
@ -648,7 +637,7 @@ pub(crate) async fn sync_events_v4_route(
|
|||
.rooms
|
||||
.state_cache
|
||||
.room_members(room_id)
|
||||
.ready_filter(|member| member != sender_user)
|
||||
.ready_filter(|&member| member != sender_user)
|
||||
.filter_map(|user_id| {
|
||||
services
|
||||
.rooms
|
||||
|
@ -787,7 +776,7 @@ pub(crate) async fn sync_events_v4_route(
|
|||
.users
|
||||
.get_to_device_events(
|
||||
sender_user,
|
||||
&sender_device,
|
||||
sender_device,
|
||||
Some(globalsince),
|
||||
Some(next_batch),
|
||||
)
|
||||
|
@ -805,7 +794,7 @@ pub(crate) async fn sync_events_v4_route(
|
|||
},
|
||||
device_one_time_keys_count: services
|
||||
.users
|
||||
.count_one_time_keys(sender_user, &sender_device)
|
||||
.count_one_time_keys(sender_user, sender_device)
|
||||
.await,
|
||||
// Fallback keys are not yet supported
|
||||
device_unused_fallback_key_types: None,
|
||||
|
@ -817,3 +806,33 @@ pub(crate) async fn sync_events_v4_route(
|
|||
delta_token: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn filter_rooms<'a>(
|
||||
services: &Services,
|
||||
rooms: &[&'a RoomId],
|
||||
filter: &[RoomTypeFilter],
|
||||
negate: bool,
|
||||
) -> Vec<&'a RoomId> {
|
||||
rooms
|
||||
.iter()
|
||||
.stream()
|
||||
.filter_map(|r| async move {
|
||||
let room_type = services.rooms.state_accessor.get_room_type(r).await;
|
||||
|
||||
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let room_type_filter = RoomTypeFilter::from(room_type.ok());
|
||||
|
||||
let include = if negate {
|
||||
!filter.contains(&room_type_filter)
|
||||
} else {
|
||||
filter.is_empty() || filter.contains(&room_type_filter)
|
||||
};
|
||||
|
||||
include.then_some(r)
|
||||
})
|
||||
.collect()
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -1,31 +1,35 @@
|
|||
use std::{
|
||||
cmp::{self, Ordering},
|
||||
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
|
||||
ops::Deref,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Error, Result, debug, error, extract_variant,
|
||||
Err, Error, Result, error, extract_variant, is_equal_to,
|
||||
matrix::{
|
||||
TypeStateKey,
|
||||
pdu::{PduCount, PduEvent},
|
||||
},
|
||||
trace,
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
future::ReadyEqExt,
|
||||
math::{ruma_from_usize, usize_from_ruma},
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use conduwuit_service::rooms::read_receipt::pack_receipts;
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
use conduwuit_service::{Services, rooms::read_receipt::pack_receipts, sync::into_snake_key};
|
||||
use futures::{
|
||||
FutureExt, Stream, StreamExt, TryFutureExt,
|
||||
future::{OptionFuture, join3, try_join4},
|
||||
pin_mut,
|
||||
};
|
||||
use ruma::{
|
||||
DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId,
|
||||
api::client::{
|
||||
error::ErrorKind,
|
||||
sync::sync_events::{self, DeviceLists, UnreadNotificationsCount},
|
||||
},
|
||||
api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount},
|
||||
directory::RoomTypeFilter,
|
||||
events::{
|
||||
AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType,
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
|
@ -34,13 +38,15 @@ use ruma::{
|
|||
uint,
|
||||
};
|
||||
|
||||
use super::{filter_rooms, share_encrypted_room};
|
||||
use super::share_encrypted_room;
|
||||
use crate::{
|
||||
Ruma,
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline},
|
||||
};
|
||||
|
||||
type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request);
|
||||
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
||||
type KnownRooms = BTreeMap<String, BTreeMap<OwnedRoomId, u64>>;
|
||||
|
||||
/// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync`
|
||||
/// ([MSC4186])
|
||||
|
@ -53,7 +59,7 @@ type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request
|
|||
/// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575
|
||||
/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186
|
||||
pub(crate) async fn sync_events_v5_route(
|
||||
State(services): State<crate::State>,
|
||||
State(ref services): State<crate::State>,
|
||||
body: Ruma<sync_events::v5::Request>,
|
||||
) -> Result<sync_events::v5::Response> {
|
||||
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
|
||||
|
@ -74,95 +80,95 @@ pub(crate) async fn sync_events_v5_route(
|
|||
.and_then(|string| string.parse().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
if globalsince != 0
|
||||
&& !services.sync.snake_connection_cached(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
conn_id.clone(),
|
||||
) {
|
||||
debug!("Restarting sync stream because it was gone from the database");
|
||||
return Err(Error::Request(
|
||||
ErrorKind::UnknownPos,
|
||||
"Connection data lost since last time".into(),
|
||||
http::StatusCode::BAD_REQUEST,
|
||||
));
|
||||
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
|
||||
|
||||
if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) {
|
||||
return Err!(Request(UnknownPos(
|
||||
"Connection data unknown to server; restarting sync stream."
|
||||
)));
|
||||
}
|
||||
|
||||
// Client / User requested an initial sync
|
||||
if globalsince == 0 {
|
||||
services.sync.forget_snake_sync_connection(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
conn_id.clone(),
|
||||
);
|
||||
services.sync.forget_snake_sync_connection(&snake_key);
|
||||
}
|
||||
|
||||
// Get sticky parameters from cache
|
||||
let known_rooms = services.sync.update_snake_sync_request_with_cache(
|
||||
sender_user.clone(),
|
||||
sender_device.clone(),
|
||||
&mut body,
|
||||
);
|
||||
let known_rooms = services
|
||||
.sync
|
||||
.update_snake_sync_request_with_cache(&snake_key, &mut body);
|
||||
|
||||
let all_joined_rooms: Vec<_> = services
|
||||
let all_joined_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_joined(sender_user)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
.collect::<Vec<OwnedRoomId>>();
|
||||
|
||||
let all_invited_rooms: Vec<_> = services
|
||||
let all_invited_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(sender_user)
|
||||
.map(|r| r.0)
|
||||
.collect()
|
||||
.await;
|
||||
.collect::<Vec<OwnedRoomId>>();
|
||||
|
||||
let all_knocked_rooms: Vec<_> = services
|
||||
let all_knocked_rooms = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_knocked(sender_user)
|
||||
.map(|r| r.0)
|
||||
.collect()
|
||||
.await;
|
||||
.collect::<Vec<OwnedRoomId>>();
|
||||
|
||||
let all_rooms: Vec<&RoomId> = all_joined_rooms
|
||||
.iter()
|
||||
.map(AsRef::as_ref)
|
||||
.chain(all_invited_rooms.iter().map(AsRef::as_ref))
|
||||
.chain(all_knocked_rooms.iter().map(AsRef::as_ref))
|
||||
.collect();
|
||||
let (all_joined_rooms, all_invited_rooms, all_knocked_rooms) =
|
||||
join3(all_joined_rooms, all_invited_rooms, all_knocked_rooms).await;
|
||||
|
||||
let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect();
|
||||
let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect();
|
||||
let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref);
|
||||
let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref);
|
||||
let all_knocked_rooms = all_knocked_rooms.iter().map(AsRef::as_ref);
|
||||
let all_rooms = all_joined_rooms
|
||||
.clone()
|
||||
.chain(all_invited_rooms.clone())
|
||||
.chain(all_knocked_rooms.clone());
|
||||
|
||||
let pos = next_batch.clone().to_string();
|
||||
|
||||
let mut todo_rooms: TodoRooms = BTreeMap::new();
|
||||
|
||||
let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body);
|
||||
|
||||
let account_data = collect_account_data(services, sync_info).map(Ok);
|
||||
|
||||
let e2ee = collect_e2ee(services, sync_info, all_joined_rooms.clone());
|
||||
|
||||
let to_device = collect_to_device(services, sync_info, next_batch).map(Ok);
|
||||
|
||||
let receipts = collect_receipts(services).map(Ok);
|
||||
|
||||
let (account_data, e2ee, to_device, receipts) =
|
||||
try_join4(account_data, e2ee, to_device, receipts).await?;
|
||||
|
||||
let extensions = sync_events::v5::response::Extensions {
|
||||
account_data,
|
||||
e2ee,
|
||||
to_device,
|
||||
receipts,
|
||||
typing: sync_events::v5::response::Typing::default(),
|
||||
};
|
||||
|
||||
let mut response = sync_events::v5::Response {
|
||||
txn_id: body.txn_id.clone(),
|
||||
pos,
|
||||
lists: BTreeMap::new(),
|
||||
rooms: BTreeMap::new(),
|
||||
extensions: sync_events::v5::response::Extensions {
|
||||
account_data: collect_account_data(services, sync_info).await,
|
||||
e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?,
|
||||
to_device: collect_to_device(services, sync_info, next_batch).await,
|
||||
receipts: collect_receipts(services).await,
|
||||
typing: sync_events::v5::response::Typing::default(),
|
||||
},
|
||||
extensions,
|
||||
};
|
||||
|
||||
handle_lists(
|
||||
services,
|
||||
sync_info,
|
||||
&all_invited_rooms,
|
||||
&all_joined_rooms,
|
||||
&all_rooms,
|
||||
all_invited_rooms.clone(),
|
||||
all_joined_rooms.clone(),
|
||||
all_rooms,
|
||||
&mut todo_rooms,
|
||||
&known_rooms,
|
||||
&mut response,
|
||||
|
@ -175,7 +181,7 @@ pub(crate) async fn sync_events_v5_route(
|
|||
services,
|
||||
sender_user,
|
||||
next_batch,
|
||||
&all_invited_rooms,
|
||||
all_invited_rooms.clone(),
|
||||
&todo_rooms,
|
||||
&mut response,
|
||||
&body,
|
||||
|
@ -200,31 +206,33 @@ pub(crate) async fn sync_events_v5_route(
|
|||
}
|
||||
|
||||
trace!(
|
||||
rooms=?response.rooms.len(),
|
||||
account_data=?response.extensions.account_data.rooms.len(),
|
||||
receipts=?response.extensions.receipts.rooms.len(),
|
||||
rooms = ?response.rooms.len(),
|
||||
account_data = ?response.extensions.account_data.rooms.len(),
|
||||
receipts = ?response.extensions.receipts.rooms.len(),
|
||||
"responding to request with"
|
||||
);
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
type KnownRooms = BTreeMap<String, BTreeMap<OwnedRoomId, u64>>;
|
||||
pub(crate) type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
||||
|
||||
async fn fetch_subscriptions(
|
||||
services: crate::State,
|
||||
services: &Services,
|
||||
(sender_user, sender_device, globalsince, body): SyncInfo<'_>,
|
||||
known_rooms: &KnownRooms,
|
||||
todo_rooms: &mut TodoRooms,
|
||||
) {
|
||||
let mut known_subscription_rooms = BTreeSet::new();
|
||||
for (room_id, room) in &body.room_subscriptions {
|
||||
if !services.rooms.metadata.exists(room_id).await
|
||||
|| services.rooms.metadata.is_disabled(room_id).await
|
||||
|| services.rooms.metadata.is_banned(room_id).await
|
||||
{
|
||||
let not_exists = services.rooms.metadata.exists(room_id).eq(&false);
|
||||
|
||||
let is_disabled = services.rooms.metadata.is_disabled(room_id);
|
||||
|
||||
let is_banned = services.rooms.metadata.is_banned(room_id);
|
||||
|
||||
pin_mut!(not_exists, is_disabled, is_banned);
|
||||
if not_exists.or(is_disabled).or(is_banned).await {
|
||||
continue;
|
||||
}
|
||||
|
||||
let todo_room =
|
||||
todo_rooms
|
||||
.entry(room_id.clone())
|
||||
|
@ -254,11 +262,10 @@ async fn fetch_subscriptions(
|
|||
// body.room_subscriptions.remove(&r);
|
||||
//}
|
||||
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
if let Some(conn_id) = body.conn_id.clone() {
|
||||
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
|
||||
services.sync.update_snake_sync_known_rooms(
|
||||
sender_user,
|
||||
sender_device,
|
||||
conn_id.clone(),
|
||||
&snake_key,
|
||||
"subscriptions".to_owned(),
|
||||
known_subscription_rooms,
|
||||
globalsince,
|
||||
|
@ -267,27 +274,39 @@ async fn fetch_subscriptions(
|
|||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_lists<'a>(
|
||||
services: crate::State,
|
||||
async fn handle_lists<'a, Rooms, AllRooms>(
|
||||
services: &Services,
|
||||
(sender_user, sender_device, globalsince, body): SyncInfo<'_>,
|
||||
all_invited_rooms: &Vec<&'a RoomId>,
|
||||
all_joined_rooms: &Vec<&'a RoomId>,
|
||||
all_rooms: &Vec<&'a RoomId>,
|
||||
all_invited_rooms: Rooms,
|
||||
all_joined_rooms: Rooms,
|
||||
all_rooms: AllRooms,
|
||||
todo_rooms: &'a mut TodoRooms,
|
||||
known_rooms: &'a KnownRooms,
|
||||
response: &'_ mut sync_events::v5::Response,
|
||||
) -> KnownRooms {
|
||||
) -> KnownRooms
|
||||
where
|
||||
Rooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
|
||||
AllRooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
|
||||
{
|
||||
for (list_id, list) in &body.lists {
|
||||
let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) {
|
||||
| Some(true) => all_invited_rooms,
|
||||
| Some(false) => all_joined_rooms,
|
||||
| None => all_rooms,
|
||||
let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) {
|
||||
| None => all_rooms.clone().collect(),
|
||||
| Some(true) => all_invited_rooms.clone().collect(),
|
||||
| Some(false) => all_joined_rooms.clone().collect(),
|
||||
};
|
||||
|
||||
let active_rooms = match list.filters.clone().map(|f| f.not_room_types) {
|
||||
| Some(filter) if filter.is_empty() => active_rooms,
|
||||
| Some(value) => &filter_rooms(&services, active_rooms, &value, true).await,
|
||||
let active_rooms = match list.filters.as_ref().map(|f| &f.not_room_types) {
|
||||
| None => active_rooms,
|
||||
| Some(filter) if filter.is_empty() => active_rooms,
|
||||
| Some(value) =>
|
||||
filter_rooms(
|
||||
services,
|
||||
value,
|
||||
&true,
|
||||
active_rooms.iter().stream().map(Deref::deref),
|
||||
)
|
||||
.collect()
|
||||
.await,
|
||||
};
|
||||
|
||||
let mut new_known_rooms: BTreeSet<OwnedRoomId> = BTreeSet::new();
|
||||
|
@ -305,6 +324,7 @@ async fn handle_lists<'a>(
|
|||
|
||||
let new_rooms: BTreeSet<OwnedRoomId> =
|
||||
room_ids.clone().into_iter().map(From::from).collect();
|
||||
|
||||
new_known_rooms.extend(new_rooms);
|
||||
//new_known_rooms.extend(room_ids..cloned());
|
||||
for room_id in room_ids {
|
||||
|
@ -340,29 +360,32 @@ async fn handle_lists<'a>(
|
|||
count: ruma_from_usize(active_rooms.len()),
|
||||
});
|
||||
|
||||
if let Some(conn_id) = &body.conn_id {
|
||||
if let Some(conn_id) = body.conn_id.clone() {
|
||||
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
|
||||
services.sync.update_snake_sync_known_rooms(
|
||||
sender_user,
|
||||
sender_device,
|
||||
conn_id.clone(),
|
||||
&snake_key,
|
||||
list_id.clone(),
|
||||
new_known_rooms,
|
||||
globalsince,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
BTreeMap::default()
|
||||
}
|
||||
|
||||
async fn process_rooms(
|
||||
services: crate::State,
|
||||
async fn process_rooms<'a, Rooms>(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
next_batch: u64,
|
||||
all_invited_rooms: &[&RoomId],
|
||||
all_invited_rooms: Rooms,
|
||||
todo_rooms: &TodoRooms,
|
||||
response: &mut sync_events::v5::Response,
|
||||
body: &sync_events::v5::Request,
|
||||
) -> Result<BTreeMap<OwnedRoomId, sync_events::v5::response::Room>> {
|
||||
) -> Result<BTreeMap<OwnedRoomId, sync_events::v5::response::Room>>
|
||||
where
|
||||
Rooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
|
||||
{
|
||||
let mut rooms = BTreeMap::new();
|
||||
for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms {
|
||||
let roomsincecount = PduCount::Normal(*roomsince);
|
||||
|
@ -371,7 +394,7 @@ async fn process_rooms(
|
|||
let mut invite_state = None;
|
||||
let (timeline_pdus, limited);
|
||||
let new_room_id: &RoomId = (*room_id).as_ref();
|
||||
if all_invited_rooms.contains(&new_room_id) {
|
||||
if all_invited_rooms.clone().any(is_equal_to!(new_room_id)) {
|
||||
// TODO: figure out a timestamp we can use for remote invites
|
||||
invite_state = services
|
||||
.rooms
|
||||
|
@ -383,7 +406,7 @@ async fn process_rooms(
|
|||
(timeline_pdus, limited) = (Vec::new(), true);
|
||||
} else {
|
||||
(timeline_pdus, limited) = match load_timeline(
|
||||
&services,
|
||||
services,
|
||||
sender_user,
|
||||
room_id,
|
||||
roomsincecount,
|
||||
|
@ -416,18 +439,17 @@ async fn process_rooms(
|
|||
.rooms
|
||||
.read_receipt
|
||||
.last_privateread_update(sender_user, room_id)
|
||||
.await > *roomsince;
|
||||
.await;
|
||||
|
||||
let private_read_event = if last_privateread_update {
|
||||
services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.private_read_get(room_id, sender_user)
|
||||
.await
|
||||
.ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let private_read_event: OptionFuture<_> = (last_privateread_update > *roomsince)
|
||||
.then(|| {
|
||||
services
|
||||
.rooms
|
||||
.read_receipt
|
||||
.private_read_get(room_id, sender_user)
|
||||
.ok()
|
||||
})
|
||||
.into();
|
||||
|
||||
let mut receipts: Vec<Raw<AnySyncEphemeralRoomEvent>> = services
|
||||
.rooms
|
||||
|
@ -443,7 +465,7 @@ async fn process_rooms(
|
|||
.collect()
|
||||
.await;
|
||||
|
||||
if let Some(private_read_event) = private_read_event {
|
||||
if let Some(private_read_event) = private_read_event.await.flatten() {
|
||||
receipts.push(private_read_event);
|
||||
}
|
||||
|
||||
|
@ -492,7 +514,7 @@ async fn process_rooms(
|
|||
let room_events: Vec<_> = timeline_pdus
|
||||
.iter()
|
||||
.stream()
|
||||
.filter_map(|item| ignored_filter(&services, item.clone(), sender_user))
|
||||
.filter_map(|item| ignored_filter(services, item.clone(), sender_user))
|
||||
.map(|(_, pdu)| pdu.to_sync_room_event())
|
||||
.collect()
|
||||
.await;
|
||||
|
@ -644,7 +666,7 @@ async fn process_rooms(
|
|||
Ok(rooms)
|
||||
}
|
||||
async fn collect_account_data(
|
||||
services: crate::State,
|
||||
services: &Services,
|
||||
(sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request),
|
||||
) -> sync_events::v5::response::AccountData {
|
||||
let mut account_data = sync_events::v5::response::AccountData {
|
||||
|
@ -680,16 +702,19 @@ async fn collect_account_data(
|
|||
account_data
|
||||
}
|
||||
|
||||
async fn collect_e2ee<'a>(
|
||||
services: crate::State,
|
||||
async fn collect_e2ee<'a, Rooms>(
|
||||
services: &Services,
|
||||
(sender_user, sender_device, globalsince, body): (
|
||||
&UserId,
|
||||
&DeviceId,
|
||||
u64,
|
||||
&sync_events::v5::Request,
|
||||
),
|
||||
all_joined_rooms: &'a Vec<&'a RoomId>,
|
||||
) -> Result<sync_events::v5::response::E2EE> {
|
||||
all_joined_rooms: Rooms,
|
||||
) -> Result<sync_events::v5::response::E2EE>
|
||||
where
|
||||
Rooms: Iterator<Item = &'a RoomId> + Send + 'a,
|
||||
{
|
||||
if !body.extensions.e2ee.enabled.unwrap_or(false) {
|
||||
return Ok(sync_events::v5::response::E2EE::default());
|
||||
}
|
||||
|
@ -790,7 +815,7 @@ async fn collect_e2ee<'a>(
|
|||
| MembershipState::Join => {
|
||||
// A new user joined an encrypted room
|
||||
if !share_encrypted_room(
|
||||
&services,
|
||||
services,
|
||||
sender_user,
|
||||
user_id,
|
||||
Some(room_id),
|
||||
|
@ -823,7 +848,7 @@ async fn collect_e2ee<'a>(
|
|||
// Only send keys if the sender doesn't share an encrypted room with the target
|
||||
// already
|
||||
.filter_map(|user_id| {
|
||||
share_encrypted_room(&services, sender_user, user_id, Some(room_id))
|
||||
share_encrypted_room(services, sender_user, user_id, Some(room_id))
|
||||
.map(|res| res.or_some(user_id.to_owned()))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
|
@ -846,7 +871,7 @@ async fn collect_e2ee<'a>(
|
|||
|
||||
for user_id in left_encrypted_users {
|
||||
let dont_share_encrypted_room =
|
||||
!share_encrypted_room(&services, sender_user, &user_id, None).await;
|
||||
!share_encrypted_room(services, sender_user, &user_id, None).await;
|
||||
|
||||
// If the user doesn't share an encrypted room with the target anymore, we need
|
||||
// to tell them
|
||||
|
@ -856,20 +881,22 @@ async fn collect_e2ee<'a>(
|
|||
}
|
||||
|
||||
Ok(sync_events::v5::response::E2EE {
|
||||
device_lists: DeviceLists {
|
||||
changed: device_list_changes.into_iter().collect(),
|
||||
left: device_list_left.into_iter().collect(),
|
||||
},
|
||||
device_unused_fallback_key_types: None,
|
||||
|
||||
device_one_time_keys_count: services
|
||||
.users
|
||||
.count_one_time_keys(sender_user, sender_device)
|
||||
.await,
|
||||
device_unused_fallback_key_types: None,
|
||||
|
||||
device_lists: DeviceLists {
|
||||
changed: device_list_changes.into_iter().collect(),
|
||||
left: device_list_left.into_iter().collect(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
async fn collect_to_device(
|
||||
services: crate::State,
|
||||
services: &Services,
|
||||
(sender_user, sender_device, globalsince, body): SyncInfo<'_>,
|
||||
next_batch: u64,
|
||||
) -> Option<sync_events::v5::response::ToDevice> {
|
||||
|
@ -892,7 +919,35 @@ async fn collect_to_device(
|
|||
})
|
||||
}
|
||||
|
||||
async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts {
|
||||
async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts {
|
||||
sync_events::v5::response::Receipts { rooms: BTreeMap::new() }
|
||||
// TODO: get explicitly requested read receipts
|
||||
}
|
||||
|
||||
fn filter_rooms<'a, Rooms>(
|
||||
services: &'a Services,
|
||||
filter: &'a [RoomTypeFilter],
|
||||
negate: &'a bool,
|
||||
rooms: Rooms,
|
||||
) -> impl Stream<Item = &'a RoomId> + Send + 'a
|
||||
where
|
||||
Rooms: Stream<Item = &'a RoomId> + Send + 'a,
|
||||
{
|
||||
rooms.filter_map(async |room_id| {
|
||||
let room_type = services.rooms.state_accessor.get_room_type(room_id).await;
|
||||
|
||||
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let room_type_filter = RoomTypeFilter::from(room_type.ok());
|
||||
|
||||
let include = if *negate {
|
||||
!filter.contains(&room_type_filter)
|
||||
} else {
|
||||
filter.is_empty() || filter.contains(&room_type_filter)
|
||||
};
|
||||
|
||||
include.then_some(room_id)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Result,
|
||||
utils::{future::BoolExt, stream::BroadbandExt},
|
||||
utils::{
|
||||
future::BoolExt,
|
||||
stream::{BroadbandExt, ReadyExt},
|
||||
},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt, pin_mut};
|
||||
use ruma::{
|
||||
|
@ -30,29 +33,21 @@ pub(crate) async fn search_users_route(
|
|||
.map_or(LIMIT_DEFAULT, usize::from)
|
||||
.min(LIMIT_MAX);
|
||||
|
||||
let search_term = body.search_term.to_lowercase();
|
||||
let mut users = services
|
||||
.users
|
||||
.stream()
|
||||
.ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term))
|
||||
.map(ToOwned::to_owned)
|
||||
.broad_filter_map(async |user_id| {
|
||||
let user = search_users::v3::User {
|
||||
user_id: user_id.clone(),
|
||||
display_name: services.users.displayname(&user_id).await.ok(),
|
||||
avatar_url: services.users.avatar_url(&user_id).await.ok(),
|
||||
};
|
||||
let display_name = services.users.displayname(&user_id).await.ok();
|
||||
|
||||
let user_id_matches = user
|
||||
.user_id
|
||||
.as_str()
|
||||
.to_lowercase()
|
||||
.contains(&body.search_term.to_lowercase());
|
||||
let display_name_matches = display_name
|
||||
.as_deref()
|
||||
.map(str::to_lowercase)
|
||||
.is_some_and(|display_name| display_name.contains(&search_term));
|
||||
|
||||
let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| {
|
||||
name.to_lowercase()
|
||||
.contains(&body.search_term.to_lowercase())
|
||||
});
|
||||
|
||||
if !user_id_matches && !user_displayname_matches {
|
||||
if !display_name_matches {
|
||||
return None;
|
||||
}
|
||||
|
||||
|
@ -61,11 +56,11 @@ pub(crate) async fn search_users_route(
|
|||
.state_cache
|
||||
.rooms_joined(&user_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.any(|room| async move {
|
||||
.broad_any(async |room_id| {
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_join_rules(&room)
|
||||
.get_join_rules(&room_id)
|
||||
.map(|rule| matches!(rule, JoinRule::Public))
|
||||
.await
|
||||
});
|
||||
|
@ -76,8 +71,14 @@ pub(crate) async fn search_users_route(
|
|||
.user_sees_user(sender_user, &user_id);
|
||||
|
||||
pin_mut!(user_in_public_room, user_sees_user);
|
||||
|
||||
user_in_public_room.or(user_sees_user).await.then_some(user)
|
||||
user_in_public_room
|
||||
.or(user_sees_user)
|
||||
.await
|
||||
.then_some(search_users::v3::User {
|
||||
user_id: user_id.clone(),
|
||||
display_name,
|
||||
avatar_url: services.users.avatar_url(&user_id).await.ok(),
|
||||
})
|
||||
});
|
||||
|
||||
let results = users.by_ref().take(limit).collect().await;
|
||||
|
|
|
@ -306,7 +306,7 @@ async fn auth_server(
|
|||
}
|
||||
|
||||
fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> {
|
||||
if !services.server.config.allow_federation {
|
||||
if !services.config.allow_federation {
|
||||
return Err!(Config("allow_federation", "Federation is disabled."));
|
||||
}
|
||||
|
||||
|
@ -316,11 +316,7 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> {
|
|||
}
|
||||
|
||||
let origin = &x_matrix.origin;
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(origin.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(origin) {
|
||||
return Err!(Request(Forbidden(debug_warn!(
|
||||
"Federation requests from {origin} denied."
|
||||
))));
|
||||
|
|
|
@ -37,19 +37,14 @@ pub(crate) async fn create_invite_route(
|
|||
}
|
||||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(server) {
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
}
|
||||
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(body.origin().host())
|
||||
.moderation
|
||||
.is_remote_server_forbidden(body.origin())
|
||||
{
|
||||
warn!(
|
||||
"Received federated/remote invite from banned server {} for room ID {}. Rejecting.",
|
||||
|
|
|
@ -42,9 +42,8 @@ pub(crate) async fn create_join_event_template_route(
|
|||
.await?;
|
||||
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(body.origin().host())
|
||||
.moderation
|
||||
.is_remote_server_forbidden(body.origin())
|
||||
{
|
||||
warn!(
|
||||
"Server {} for remote user {} tried joining room ID {} which has a server name that \
|
||||
|
@ -57,11 +56,7 @@ pub(crate) async fn create_join_event_template_route(
|
|||
}
|
||||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(server) {
|
||||
return Err!(Request(Forbidden(warn!(
|
||||
"Room ID server name {server} is banned on this homeserver."
|
||||
))));
|
||||
|
|
|
@ -33,9 +33,8 @@ pub(crate) async fn create_knock_event_template_route(
|
|||
.await?;
|
||||
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(body.origin().host())
|
||||
.moderation
|
||||
.is_remote_server_forbidden(body.origin())
|
||||
{
|
||||
warn!(
|
||||
"Server {} for remote user {} tried knocking room ID {} which has a server name \
|
||||
|
@ -48,11 +47,7 @@ pub(crate) async fn create_knock_event_template_route(
|
|||
}
|
||||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(server) {
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -268,9 +268,8 @@ pub(crate) async fn create_join_event_v1_route(
|
|||
body: Ruma<create_join_event::v1::Request>,
|
||||
) -> Result<create_join_event::v1::Response> {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(body.origin().host())
|
||||
.moderation
|
||||
.is_remote_server_forbidden(body.origin())
|
||||
{
|
||||
warn!(
|
||||
"Server {} tried joining room ID {} through us who has a server name that is \
|
||||
|
@ -282,11 +281,7 @@ pub(crate) async fn create_join_event_v1_route(
|
|||
}
|
||||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(server) {
|
||||
warn!(
|
||||
"Server {} tried joining room ID {} through us which has a server name that is \
|
||||
globally forbidden. Rejecting.",
|
||||
|
@ -314,19 +309,14 @@ pub(crate) async fn create_join_event_v2_route(
|
|||
body: Ruma<create_join_event::v2::Request>,
|
||||
) -> Result<create_join_event::v2::Response> {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(body.origin().host())
|
||||
.moderation
|
||||
.is_remote_server_forbidden(body.origin())
|
||||
{
|
||||
return Err!(Request(Forbidden("Server is banned on this homeserver.")));
|
||||
}
|
||||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(server) {
|
||||
warn!(
|
||||
"Server {} tried joining room ID {} through us which has a server name that is \
|
||||
globally forbidden. Rejecting.",
|
||||
|
|
|
@ -26,9 +26,8 @@ pub(crate) async fn create_knock_event_v1_route(
|
|||
body: Ruma<send_knock::v1::Request>,
|
||||
) -> Result<send_knock::v1::Response> {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(body.origin().host())
|
||||
.moderation
|
||||
.is_remote_server_forbidden(body.origin())
|
||||
{
|
||||
warn!(
|
||||
"Server {} tried knocking room ID {} who has a server name that is globally \
|
||||
|
@ -40,11 +39,7 @@ pub(crate) async fn create_knock_event_v1_route(
|
|||
}
|
||||
|
||||
if let Some(server) = body.room_id.server_name() {
|
||||
if services
|
||||
.config
|
||||
.forbidden_remote_server_names
|
||||
.is_match(server.host())
|
||||
{
|
||||
if services.moderation.is_remote_server_forbidden(server) {
|
||||
warn!(
|
||||
"Server {} tried knocking room ID {} which has a server name that is globally \
|
||||
forbidden. Rejecting.",
|
||||
|
|
|
@ -17,17 +17,24 @@ crate-type = [
|
|||
]
|
||||
|
||||
[features]
|
||||
release_max_log_level = [
|
||||
"tracing/max_level_trace",
|
||||
"tracing/release_max_level_info",
|
||||
"log/max_level_trace",
|
||||
"log/release_max_level_info",
|
||||
brotli_compression = [
|
||||
"reqwest/brotli",
|
||||
]
|
||||
conduwuit_mods = [
|
||||
"dep:libloading"
|
||||
]
|
||||
gzip_compression = [
|
||||
"reqwest/gzip",
|
||||
]
|
||||
hardened_malloc = [
|
||||
"dep:hardened_malloc-rs"
|
||||
]
|
||||
jemalloc = [
|
||||
"dep:tikv-jemalloc-sys",
|
||||
"dep:tikv-jemalloc-ctl",
|
||||
"dep:tikv-jemallocator",
|
||||
]
|
||||
jemalloc_conf = []
|
||||
jemalloc_prof = [
|
||||
"tikv-jemalloc-sys/profiling",
|
||||
]
|
||||
|
@ -36,24 +43,17 @@ jemalloc_stats = [
|
|||
"tikv-jemalloc-ctl/stats",
|
||||
"tikv-jemallocator/stats",
|
||||
]
|
||||
jemalloc_conf = []
|
||||
hardened_malloc = [
|
||||
"dep:hardened_malloc-rs"
|
||||
]
|
||||
gzip_compression = [
|
||||
"reqwest/gzip",
|
||||
]
|
||||
brotli_compression = [
|
||||
"reqwest/brotli",
|
||||
perf_measurements = []
|
||||
release_max_log_level = [
|
||||
"tracing/max_level_trace",
|
||||
"tracing/release_max_level_info",
|
||||
"log/max_level_trace",
|
||||
"log/release_max_level_info",
|
||||
]
|
||||
sentry_telemetry = []
|
||||
zstd_compression = [
|
||||
"reqwest/zstd",
|
||||
]
|
||||
perf_measurements = []
|
||||
sentry_telemetry = []
|
||||
conduwuit_mods = [
|
||||
"dep:libloading"
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
argon2.workspace = true
|
||||
|
|
|
@ -161,14 +161,12 @@ pub struct Config {
|
|||
pub new_user_displayname_suffix: String,
|
||||
|
||||
/// If enabled, conduwuit will send a simple GET request periodically to
|
||||
/// `https://pupbrain.dev/check-for-updates/stable` for any new
|
||||
/// announcements made. Despite the name, this is not an update check
|
||||
/// endpoint, it is simply an announcement check endpoint.
|
||||
/// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
||||
/// announcements or major updates. This is not an update check endpoint.
|
||||
///
|
||||
/// This is disabled by default as this is rarely used except for security
|
||||
/// updates or major updates.
|
||||
#[serde(default, alias = "allow_announcements_check")]
|
||||
pub allow_check_for_updates: bool,
|
||||
/// default: true
|
||||
#[serde(alias = "allow_check_for_updates", default = "true_fn")]
|
||||
pub allow_announcements_check: bool,
|
||||
|
||||
/// Set this to any float value to multiply conduwuit's in-memory LRU caches
|
||||
/// with such as "auth_chain_cache_capacity".
|
||||
|
@ -1133,10 +1131,10 @@ pub struct Config {
|
|||
#[serde(default = "true_fn")]
|
||||
pub rocksdb_compaction_ioprio_idle: bool,
|
||||
|
||||
/// Disables RocksDB compaction. You should never ever have to set this
|
||||
/// option to true. If you for some reason find yourself needing to use this
|
||||
/// option as part of troubleshooting or a bug, please reach out to us in
|
||||
/// the conduwuit Matrix room with information and details.
|
||||
/// Enables RocksDB compaction. You should never ever have to set this
|
||||
/// option to false. If you for some reason find yourself needing to use
|
||||
/// this option as part of troubleshooting or a bug, please reach out to us
|
||||
/// in the conduwuit Matrix room with information and details.
|
||||
///
|
||||
/// Disabling compaction will lead to a significantly bloated and
|
||||
/// explosively large database, gradually poor performance, unnecessarily
|
||||
|
@ -1361,6 +1359,38 @@ pub struct Config {
|
|||
#[serde(default)]
|
||||
pub prune_missing_media: bool,
|
||||
|
||||
/// List of forbidden server names via regex patterns that we will block
|
||||
/// incoming AND outgoing federation with, and block client room joins /
|
||||
/// remote user invites.
|
||||
///
|
||||
/// Note that your messages can still make it to forbidden servers through
|
||||
/// backfilling. Events we receive from forbidden servers via backfill
|
||||
/// from servers we *do* federate with will be stored in the database.
|
||||
///
|
||||
/// This check is applied on the room ID, room alias, sender server name,
|
||||
/// sender user's server name, inbound federation X-Matrix origin, and
|
||||
/// outbound federation handler.
|
||||
///
|
||||
/// You can set this to ["*"] to block all servers by default, and then
|
||||
/// use `allowed_remote_server_names` to allow only specific servers.
|
||||
///
|
||||
/// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub forbidden_remote_server_names: RegexSet,
|
||||
|
||||
/// List of allowed server names via regex patterns that we will allow,
|
||||
/// regardless of if they match `forbidden_remote_server_names`.
|
||||
///
|
||||
/// This option has no effect if `forbidden_remote_server_names` is empty.
|
||||
///
|
||||
/// example: ["goodserver\\.tld$", "goodphrase"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub allowed_remote_server_names: RegexSet,
|
||||
|
||||
/// Vector list of regex patterns of server names that conduwuit will refuse
|
||||
/// to download remote media from.
|
||||
///
|
||||
|
@ -1370,22 +1400,6 @@ pub struct Config {
|
|||
#[serde(default, with = "serde_regex")]
|
||||
pub prevent_media_downloads_from: RegexSet,
|
||||
|
||||
/// List of forbidden server names via regex patterns that we will block
|
||||
/// incoming AND outgoing federation with, and block client room joins /
|
||||
/// remote user invites.
|
||||
///
|
||||
/// This check is applied on the room ID, room alias, sender server name,
|
||||
/// sender user's server name, inbound federation X-Matrix origin, and
|
||||
/// outbound federation handler.
|
||||
///
|
||||
/// Basically "global" ACLs.
|
||||
///
|
||||
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub forbidden_remote_server_names: RegexSet,
|
||||
|
||||
/// List of forbidden server names via regex patterns that we will block all
|
||||
/// outgoing federated room directory requests for. Useful for preventing
|
||||
/// our users from wandering into bad servers or spaces.
|
||||
|
@ -1396,6 +1410,31 @@ pub struct Config {
|
|||
#[serde(default, with = "serde_regex")]
|
||||
pub forbidden_remote_room_directory_server_names: RegexSet,
|
||||
|
||||
/// Vector list of regex patterns of server names that conduwuit will not
|
||||
/// send messages to the client from.
|
||||
///
|
||||
/// Note that there is no way for clients to receive messages once a server
|
||||
/// has become unignored without doing a full sync. This is a protocol
|
||||
/// limitation with the current sync protocols. This means this is somewhat
|
||||
/// of a nuclear option.
|
||||
///
|
||||
/// example: ["reallybadserver\.tld$", "reallybadphrase",
|
||||
/// "69dollarfortnitecards"]
|
||||
///
|
||||
/// default: []
|
||||
#[serde(default, with = "serde_regex")]
|
||||
pub ignore_messages_from_server_names: RegexSet,
|
||||
|
||||
/// Send messages from users that the user has ignored to the client.
|
||||
///
|
||||
/// There is no way for clients to receive messages sent while a user was
|
||||
/// ignored without doing a full sync. This is a protocol limitation with
|
||||
/// the current sync protocols. Disabling this option will move
|
||||
/// responsibility of ignoring messages to the client, which can avoid this
|
||||
/// limitation.
|
||||
#[serde(default)]
|
||||
pub send_messages_from_ignored_users_to_client: bool,
|
||||
|
||||
/// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
||||
/// do not want conduwuit to send outbound requests to. Defaults to
|
||||
/// RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
||||
|
@ -1954,7 +1993,7 @@ impl Config {
|
|||
let mut addrs = Vec::with_capacity(
|
||||
self.get_bind_hosts()
|
||||
.len()
|
||||
.saturating_add(self.get_bind_ports().len()),
|
||||
.saturating_mul(self.get_bind_ports().len()),
|
||||
);
|
||||
for host in &self.get_bind_hosts() {
|
||||
for port in &self.get_bind_ports() {
|
||||
|
|
|
@ -12,6 +12,7 @@ pub use crate::{result::DebugInspect, utils::debug::*};
|
|||
/// Log event at given level in debug-mode (when debug-assertions are enabled).
|
||||
/// In release-mode it becomes DEBUG level, and possibly subject to elision.
|
||||
#[macro_export]
|
||||
#[collapse_debuginfo(yes)]
|
||||
macro_rules! debug_event {
|
||||
( $level:expr_2021, $($x:tt)+ ) => {
|
||||
if $crate::debug::logging() {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue