Compare commits
1 commit
alpine-pac
...
renovate/r
Author | SHA1 | Date | |
---|---|---|---|
|
41e4e36559 |
233 changed files with 4769 additions and 4465 deletions
|
@ -1,9 +1,9 @@
|
||||||
# Local build and dev artifacts
|
# Local build and dev artifacts
|
||||||
target/
|
target
|
||||||
|
tests
|
||||||
|
|
||||||
# Docker files
|
# Docker files
|
||||||
Dockerfile*
|
Dockerfile*
|
||||||
docker/
|
|
||||||
|
|
||||||
# IDE files
|
# IDE files
|
||||||
.vscode
|
.vscode
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
on:
|
|
||||||
- workflow-dispatch
|
|
||||||
- push
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
container:
|
|
||||||
image: alpine:edge
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: set up dependencies
|
|
||||||
run: |
|
|
||||||
apk update
|
|
||||||
apk upgrade
|
|
||||||
apk add nodejs git alpine-sdk
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
name: checkout the alpine dir
|
|
||||||
with:
|
|
||||||
sparse-checkout: "alpine/"
|
|
||||||
|
|
||||||
# - uses: actions/checkout@v4
|
|
||||||
# name: checkout the rest in the alpine dir
|
|
||||||
# with:
|
|
||||||
# path: 'alpine/continuwuity'
|
|
||||||
- name: set up user
|
|
||||||
run: adduser -DG abuild ci
|
|
||||||
|
|
||||||
- name: set up keys
|
|
||||||
run: |
|
|
||||||
pwd
|
|
||||||
mkdir ~/.abuild
|
|
||||||
echo "${{ secrets.abuild_privkey }}" > ~/.abuild/ci@continuwuity.rsa
|
|
||||||
echo "${{ secrets.abuild_pubkey }}" > ~/.abuild/ci@continuwuity.rsa.pub
|
|
||||||
echo $HOME
|
|
||||||
echo 'PACKAGER_PRIVKEY="/root/.abuild/ci@continuwuity.rsa"' > ~/.abuild/abuild.conf
|
|
||||||
ls ~/.abuild
|
|
||||||
|
|
||||||
- name: go go gadget abuild
|
|
||||||
run: |
|
|
||||||
cd alpine
|
|
||||||
# modify the APKBUILD to use the current branch instead of the release
|
|
||||||
# note that it seems to require the repo to be public (as you'll get
|
|
||||||
# a 404 even if the token is provided)
|
|
||||||
export ARCHIVE_URL="${{ github.server_url }}/${{ github.repository }}/archive/${{ github.ref_name }}.tar.gz"
|
|
||||||
echo $ARCHIVE_URL
|
|
||||||
sed -i '/^source=/c\source="'"$ARCHIVE_URL" APKBUILD
|
|
||||||
abuild -F checksum
|
|
||||||
abuild -Fr
|
|
|
@ -1,73 +0,0 @@
|
||||||
name: Documentation
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
tags:
|
|
||||||
- "v*"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: "pages-${{ github.ref }}"
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docs:
|
|
||||||
name: Build and Deploy Documentation
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Sync repository
|
|
||||||
uses: https://github.com/actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup mdBook
|
|
||||||
uses: https://github.com/peaceiris/actions-mdbook@v2
|
|
||||||
with:
|
|
||||||
mdbook-version: "latest"
|
|
||||||
|
|
||||||
- name: Build mdbook
|
|
||||||
run: mdbook build
|
|
||||||
|
|
||||||
- name: Prepare static files for deployment
|
|
||||||
run: |
|
|
||||||
mkdir -p ./public/.well-known/matrix
|
|
||||||
mkdir -p ./public/.well-known/continuwuity
|
|
||||||
mkdir -p ./public/schema
|
|
||||||
# Copy the Matrix .well-known files
|
|
||||||
cp ./docs/static/server ./public/.well-known/matrix/server
|
|
||||||
cp ./docs/static/client ./public/.well-known/matrix/client
|
|
||||||
cp ./docs/static/client ./public/.well-known/matrix/support
|
|
||||||
cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements
|
|
||||||
cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json
|
|
||||||
# Copy the custom headers file
|
|
||||||
cp ./docs/static/_headers ./public/_headers
|
|
||||||
echo "Copied .well-known files and _headers to ./public"
|
|
||||||
|
|
||||||
- name: Setup Node.js
|
|
||||||
uses: https://github.com/actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: 20
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: npm install --save-dev wrangler@latest
|
|
||||||
|
|
||||||
- name: Deploy to Cloudflare Pages (Production)
|
|
||||||
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
|
||||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
|
||||||
with:
|
|
||||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
|
||||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
|
||||||
command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
|
||||||
|
|
||||||
- name: Deploy to Cloudflare Pages (Preview)
|
|
||||||
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
|
||||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
|
||||||
with:
|
|
||||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
|
||||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
|
||||||
command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}"
|
|
|
@ -1,127 +0,0 @@
|
||||||
name: Deploy Element Web
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: "element-${{ github.ref }}"
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-deploy:
|
|
||||||
name: Build and Deploy Element Web
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Setup Node.js
|
|
||||||
uses: https://code.forgejo.org/actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: "20"
|
|
||||||
|
|
||||||
- name: Clone, setup, and build Element Web
|
|
||||||
run: |
|
|
||||||
echo "Cloning Element Web..."
|
|
||||||
git clone https://github.com/maunium/element-web
|
|
||||||
cd element-web
|
|
||||||
git checkout develop
|
|
||||||
git pull
|
|
||||||
|
|
||||||
echo "Cloning matrix-js-sdk..."
|
|
||||||
git clone https://github.com/matrix-org/matrix-js-sdk.git
|
|
||||||
|
|
||||||
echo "Installing Yarn..."
|
|
||||||
npm install -g yarn
|
|
||||||
|
|
||||||
echo "Installing dependencies..."
|
|
||||||
yarn install
|
|
||||||
|
|
||||||
echo "Preparing build environment..."
|
|
||||||
mkdir -p .home
|
|
||||||
|
|
||||||
echo "Cleaning up specific node_modules paths..."
|
|
||||||
rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing."
|
|
||||||
|
|
||||||
echo "Getting matrix-js-sdk commit hash..."
|
|
||||||
cd matrix-js-sdk
|
|
||||||
jsver=$(git rev-parse HEAD)
|
|
||||||
jsver=${jsver:0:12}
|
|
||||||
cd ..
|
|
||||||
echo "matrix-js-sdk version hash: $jsver"
|
|
||||||
|
|
||||||
echo "Getting element-web commit hash..."
|
|
||||||
ver=$(git rev-parse HEAD)
|
|
||||||
ver=${ver:0:12}
|
|
||||||
echo "element-web version hash: $ver"
|
|
||||||
|
|
||||||
chmod +x ./build-sh
|
|
||||||
|
|
||||||
export VERSION="$ver-js-$jsver"
|
|
||||||
echo "Building Element Web version: $VERSION"
|
|
||||||
./build-sh
|
|
||||||
|
|
||||||
echo "Checking for build output..."
|
|
||||||
ls -la webapp/
|
|
||||||
|
|
||||||
- name: Create config.json
|
|
||||||
run: |
|
|
||||||
cat <<EOF > ./element-web/webapp/config.json
|
|
||||||
{
|
|
||||||
"default_server_name": "continuwuity.org",
|
|
||||||
"default_server_config": {
|
|
||||||
"m.homeserver": {
|
|
||||||
"base_url": "https://matrix.continuwuity.org"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"default_country_code": "GB",
|
|
||||||
"default_theme": "dark",
|
|
||||||
"mobile_guide_toast": false,
|
|
||||||
"show_labs_settings": true,
|
|
||||||
"room_directory": [
|
|
||||||
"continuwuity.org",
|
|
||||||
"matrixrooms.info"
|
|
||||||
],
|
|
||||||
"settings_defaults": {
|
|
||||||
"UIFeature.urlPreviews": true,
|
|
||||||
"UIFeature.feedback": false,
|
|
||||||
"UIFeature.voip": false,
|
|
||||||
"UIFeature.shareQrCode": false,
|
|
||||||
"UIFeature.shareSocial": false,
|
|
||||||
"UIFeature.locationSharing": false,
|
|
||||||
"enableSyntaxHighlightLanguageDetection": true
|
|
||||||
},
|
|
||||||
"features": {
|
|
||||||
"feature_pinning": true,
|
|
||||||
"feature_custom_themes": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
echo "Created ./element-web/webapp/config.json"
|
|
||||||
cat ./element-web/webapp/config.json
|
|
||||||
|
|
||||||
- name: Upload Artifact
|
|
||||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: element-web
|
|
||||||
path: ./element-web/webapp/
|
|
||||||
retention-days: 14
|
|
||||||
|
|
||||||
- name: Install Wrangler
|
|
||||||
run: npm install --save-dev wrangler@latest
|
|
||||||
|
|
||||||
- name: Deploy to Cloudflare Pages (Production)
|
|
||||||
if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
|
||||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
|
||||||
with:
|
|
||||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
|
||||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
|
||||||
command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
|
||||||
|
|
||||||
- name: Deploy to Cloudflare Pages (Preview)
|
|
||||||
if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != ''
|
|
||||||
uses: https://github.com/cloudflare/wrangler-action@v3
|
|
||||||
with:
|
|
||||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
|
||||||
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
|
|
||||||
command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element"
|
|
|
@ -1,235 +0,0 @@
|
||||||
name: Release Docker Image
|
|
||||||
concurrency:
|
|
||||||
group: "release-image-${{ github.ref }}"
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
paths-ignore:
|
|
||||||
- "*.md"
|
|
||||||
- "**/*.md"
|
|
||||||
- ".gitlab-ci.yml"
|
|
||||||
- ".gitignore"
|
|
||||||
- "renovate.json"
|
|
||||||
- "debian/**"
|
|
||||||
- "docker/**"
|
|
||||||
- "docs/**"
|
|
||||||
# Allows you to run this workflow manually from the Actions tab
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
BUILTIN_REGISTRY: forgejo.ellis.link
|
|
||||||
BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
define-variables:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
outputs:
|
|
||||||
images: ${{ steps.var.outputs.images }}
|
|
||||||
images_list: ${{ steps.var.outputs.images_list }}
|
|
||||||
build_matrix: ${{ steps.var.outputs.build_matrix }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Setting variables
|
|
||||||
uses: https://github.com/actions/github-script@v7
|
|
||||||
id: var
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const githubRepo = '${{ github.repository }}'.toLowerCase()
|
|
||||||
const repoId = githubRepo.split('/')[1]
|
|
||||||
|
|
||||||
core.setOutput('github_repository', githubRepo)
|
|
||||||
const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo
|
|
||||||
let images = []
|
|
||||||
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
|
||||||
images.push(builtinImage)
|
|
||||||
}
|
|
||||||
core.setOutput('images', images.join("\n"))
|
|
||||||
core.setOutput('images_list', images.join(","))
|
|
||||||
const platforms = ['linux/amd64', 'linux/arm64']
|
|
||||||
core.setOutput('build_matrix', JSON.stringify({
|
|
||||||
platform: platforms,
|
|
||||||
include: platforms.map(platform => { return {
|
|
||||||
platform,
|
|
||||||
slug: platform.replace('/', '-')
|
|
||||||
}})
|
|
||||||
}))
|
|
||||||
|
|
||||||
build-image:
|
|
||||||
runs-on: dind
|
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: define-variables
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
attestations: write
|
|
||||||
id-token: write
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
{
|
|
||||||
"include":
|
|
||||||
[
|
|
||||||
{ "platform": "linux/amd64", "slug": "linux-amd64" },
|
|
||||||
{ "platform": "linux/arm64", "slug": "linux-arm64" },
|
|
||||||
],
|
|
||||||
"platform": ["linux/amd64", "linux/arm64"],
|
|
||||||
}
|
|
||||||
steps:
|
|
||||||
- name: Echo strategy
|
|
||||||
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
|
|
||||||
- name: Echo matrix
|
|
||||||
run: echo '${{ toJSON(matrix) }}'
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
persist-credentials: false
|
|
||||||
- run: |
|
|
||||||
if ! command -v rustup &> /dev/null ; then
|
|
||||||
curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y
|
|
||||||
echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH
|
|
||||||
fi
|
|
||||||
- uses: https://github.com/cargo-bins/cargo-binstall@main
|
|
||||||
- run: cargo binstall timelord-cli@3.0.1
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
|
||||||
- name: Login to builtin registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
|
||||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
|
||||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
|
||||||
- name: Extract metadata (labels, annotations) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: ${{needs.define-variables.outputs.images}}
|
|
||||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
|
|
||||||
|
|
||||||
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
|
|
||||||
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
|
|
||||||
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
|
|
||||||
# It will not push images generated from a pull request
|
|
||||||
- name: Get short git commit SHA
|
|
||||||
id: sha
|
|
||||||
run: |
|
|
||||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
|
||||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
|
||||||
- name: Get Git commit timestamps
|
|
||||||
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
|
||||||
- name: Set up timelord
|
|
||||||
uses: actions/cache/restore@v3
|
|
||||||
with:
|
|
||||||
path: /timelord/
|
|
||||||
key: timelord-v0 # Cache is already split per runner
|
|
||||||
- name: Run timelord to set timestamps
|
|
||||||
run: timelord sync --source-dir . --cache-dir /timelord/
|
|
||||||
- name: Save timelord
|
|
||||||
uses: actions/cache/save@v3
|
|
||||||
with:
|
|
||||||
path: /timelord/
|
|
||||||
key: timelord-v0
|
|
||||||
- name: Build and push Docker image by digest
|
|
||||||
id: build
|
|
||||||
uses: docker/build-push-action@v6
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: "docker/Dockerfile"
|
|
||||||
build-args: |
|
|
||||||
CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }}
|
|
||||||
platforms: ${{ matrix.platform }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
annotations: ${{ steps.meta.outputs.annotations }}
|
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
sbom: true
|
|
||||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
|
||||||
env:
|
|
||||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
|
||||||
|
|
||||||
# For publishing multi-platform manifests
|
|
||||||
- name: Export digest
|
|
||||||
run: |
|
|
||||||
mkdir -p /tmp/digests
|
|
||||||
digest="${{ steps.build.outputs.digest }}"
|
|
||||||
touch "/tmp/digests/${digest#sha256:}"
|
|
||||||
|
|
||||||
- name: Upload digest
|
|
||||||
uses: forgejo/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: digests-${{ matrix.slug }}
|
|
||||||
path: /tmp/digests/*
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
merge:
|
|
||||||
runs-on: dind
|
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: [define-variables, build-image]
|
|
||||||
steps:
|
|
||||||
- name: Download digests
|
|
||||||
uses: forgejo/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: /tmp/digests
|
|
||||||
pattern: digests-*
|
|
||||||
merge-multiple: true
|
|
||||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
|
||||||
- name: Login to builtin registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
|
||||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
|
||||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Extract metadata (tags) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
tags: |
|
|
||||||
type=semver,pattern=v{{version}}
|
|
||||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }}
|
|
||||||
type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
|
|
||||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) 1= github.ref && 'branch-' || '' }}
|
|
||||||
type=ref,event=pr
|
|
||||||
type=sha,format=long
|
|
||||||
images: ${{needs.define-variables.outputs.images}}
|
|
||||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
|
||||||
env:
|
|
||||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
|
||||||
|
|
||||||
- name: Create manifest list and push
|
|
||||||
working-directory: /tmp/digests
|
|
||||||
env:
|
|
||||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
IFS=$'\n'
|
|
||||||
IMAGES_LIST=($IMAGES)
|
|
||||||
ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS)
|
|
||||||
TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS)
|
|
||||||
for REPO in "${IMAGES_LIST[@]}"; do
|
|
||||||
docker buildx imagetools create \
|
|
||||||
$(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \
|
|
||||||
$(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \
|
|
||||||
$(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done)
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Inspect image
|
|
||||||
env:
|
|
||||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
IMAGES_LIST=($IMAGES)
|
|
||||||
for REPO in "${IMAGES_LIST[@]}"; do
|
|
||||||
docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }}
|
|
||||||
done
|
|
717
.github/workflows/ci.yml
vendored
Normal file
717
.github/workflows/ci.yml
vendored
Normal file
|
@ -0,0 +1,717 @@
|
||||||
|
name: CI and Artifacts
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
paths-ignore:
|
||||||
|
- '.gitlab-ci.yml'
|
||||||
|
- '.gitignore'
|
||||||
|
- 'renovate.json'
|
||||||
|
- 'debian/**'
|
||||||
|
- 'docker/**'
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.head_ref || github.ref_name }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Required to make some things output color
|
||||||
|
TERM: ansi
|
||||||
|
# Publishing to my nix binary cache
|
||||||
|
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
||||||
|
# conduwuit.cachix.org
|
||||||
|
CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||||
|
# Just in case incremental is still being set to true, speeds up CI
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
# Custom nix binary cache if fork is being used
|
||||||
|
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
||||||
|
ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }}
|
||||||
|
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
|
||||||
|
NIX_CONFIG: |
|
||||||
|
show-trace = true
|
||||||
|
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org
|
||||||
|
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=
|
||||||
|
experimental-features = nix-command flakes
|
||||||
|
extra-experimental-features = nix-command flakes
|
||||||
|
accept-flake-config = true
|
||||||
|
WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||||
|
GH_REF_NAME: ${{ github.ref_name }}
|
||||||
|
WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
tests:
|
||||||
|
name: Test
|
||||||
|
runs-on: self-hosted
|
||||||
|
steps:
|
||||||
|
- name: Setup SSH web publish
|
||||||
|
env:
|
||||||
|
web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}
|
||||||
|
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||||
|
run: |
|
||||||
|
mkdir -p -v ~/.ssh
|
||||||
|
|
||||||
|
echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts
|
||||||
|
echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519
|
||||||
|
|
||||||
|
chmod 600 ~/.ssh/id_ed25519
|
||||||
|
|
||||||
|
cat >>~/.ssh/config <<END
|
||||||
|
Host website
|
||||||
|
HostName ${{ secrets.WEB_UPLOAD_SSH_HOSTNAME }}
|
||||||
|
User ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||||
|
IdentityFile ~/.ssh/id_ed25519
|
||||||
|
StrictHostKeyChecking yes
|
||||||
|
AddKeysToAgent no
|
||||||
|
ForwardX11 no
|
||||||
|
BatchMode yes
|
||||||
|
END
|
||||||
|
|
||||||
|
echo "Checking connection"
|
||||||
|
ssh -q website "echo test" || ssh -q website "echo test"
|
||||||
|
|
||||||
|
echo "Creating commit rev directory on web server"
|
||||||
|
ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/"
|
||||||
|
ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/"
|
||||||
|
|
||||||
|
echo "SSH_WEBSITE=1" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
- name: Sync repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Tag comparison check
|
||||||
|
if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }}
|
||||||
|
run: |
|
||||||
|
# Tag mismatch with latest repo tag check to prevent potential downgrades
|
||||||
|
LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||||
|
|
||||||
|
if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then
|
||||||
|
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.'
|
||||||
|
echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Prepare build environment
|
||||||
|
run: |
|
||||||
|
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||||
|
direnv allow
|
||||||
|
nix develop .#all-features --command true
|
||||||
|
|
||||||
|
- name: Cache CI dependencies
|
||||||
|
run: |
|
||||||
|
bin/nix-build-and-cache ci
|
||||||
|
bin/nix-build-and-cache just '.#devShells.x86_64-linux.default'
|
||||||
|
bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features'
|
||||||
|
bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic'
|
||||||
|
|
||||||
|
# use rust-cache
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||||
|
# releases and tags
|
||||||
|
#if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
with:
|
||||||
|
cache-all-crates: "true"
|
||||||
|
cache-on-failure: "true"
|
||||||
|
cache-targets: "true"
|
||||||
|
|
||||||
|
- name: Run CI tests
|
||||||
|
env:
|
||||||
|
CARGO_PROFILE: "test"
|
||||||
|
run: |
|
||||||
|
direnv exec . engage > >(tee -a test_output.log)
|
||||||
|
|
||||||
|
- name: Run Complement tests
|
||||||
|
env:
|
||||||
|
CARGO_PROFILE: "test"
|
||||||
|
run: |
|
||||||
|
# the nix devshell sets $COMPLEMENT_SRC, so "/dev/null" is no-op
|
||||||
|
direnv exec . bin/complement "/dev/null" complement_test_logs.jsonl complement_test_results.jsonl > >(tee -a test_output.log)
|
||||||
|
cp -v -f result complement_oci_image.tar.gz
|
||||||
|
|
||||||
|
- name: Upload Complement OCI image
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: complement_oci_image.tar.gz
|
||||||
|
path: complement_oci_image.tar.gz
|
||||||
|
if-no-files-found: error
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
|
- name: Upload Complement logs
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: complement_test_logs.jsonl
|
||||||
|
path: complement_test_logs.jsonl
|
||||||
|
if-no-files-found: error
|
||||||
|
|
||||||
|
- name: Upload Complement results
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: complement_test_results.jsonl
|
||||||
|
path: complement_test_results.jsonl
|
||||||
|
if-no-files-found: error
|
||||||
|
|
||||||
|
- name: Diff Complement results with checked-in repo results
|
||||||
|
run: |
|
||||||
|
diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log)
|
||||||
|
|
||||||
|
- name: Update Job Summary
|
||||||
|
env:
|
||||||
|
GH_JOB_STATUS: ${{ job.status }}
|
||||||
|
if: success() || failure()
|
||||||
|
run: |
|
||||||
|
if [ ${GH_JOB_STATUS} == 'success' ]; then
|
||||||
|
echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
|
tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```diff' >> $GITHUB_STEP_SUMMARY
|
||||||
|
tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build
|
||||||
|
runs-on: self-hosted
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target: aarch64-linux-musl
|
||||||
|
- target: x86_64-linux-musl
|
||||||
|
steps:
|
||||||
|
- name: Sync repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Setup SSH web publish
|
||||||
|
env:
|
||||||
|
web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}
|
||||||
|
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]'
|
||||||
|
run: |
|
||||||
|
mkdir -p -v ~/.ssh
|
||||||
|
|
||||||
|
echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts
|
||||||
|
echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519
|
||||||
|
|
||||||
|
chmod 600 ~/.ssh/id_ed25519
|
||||||
|
|
||||||
|
cat >>~/.ssh/config <<END
|
||||||
|
Host website
|
||||||
|
HostName ${{ secrets.WEB_UPLOAD_SSH_HOSTNAME }}
|
||||||
|
User ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||||
|
IdentityFile ~/.ssh/id_ed25519
|
||||||
|
StrictHostKeyChecking yes
|
||||||
|
AddKeysToAgent no
|
||||||
|
ForwardX11 no
|
||||||
|
BatchMode yes
|
||||||
|
END
|
||||||
|
|
||||||
|
echo "Checking connection"
|
||||||
|
ssh -q website "echo test" || ssh -q website "echo test"
|
||||||
|
|
||||||
|
echo "SSH_WEBSITE=1" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
- name: Prepare build environment
|
||||||
|
run: |
|
||||||
|
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||||
|
direnv allow
|
||||||
|
nix develop .#all-features --command true --impure
|
||||||
|
|
||||||
|
# use rust-cache
|
||||||
|
- uses: Swatinem/rust-cache@v2
|
||||||
|
# we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting
|
||||||
|
# releases and tags
|
||||||
|
#if: ${{ !startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
with:
|
||||||
|
cache-all-crates: "true"
|
||||||
|
cache-on-failure: "true"
|
||||||
|
cache-targets: "true"
|
||||||
|
|
||||||
|
- name: Build static ${{ matrix.target }}-all-features
|
||||||
|
run: |
|
||||||
|
if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]]
|
||||||
|
then
|
||||||
|
CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl"
|
||||||
|
elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]]
|
||||||
|
then
|
||||||
|
CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl"
|
||||||
|
fi
|
||||||
|
|
||||||
|
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
|
||||||
|
|
||||||
|
bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features
|
||||||
|
|
||||||
|
mkdir -v -p target/release/
|
||||||
|
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
|
||||||
|
cp -v -f result/bin/conduwuit target/release/conduwuit
|
||||||
|
cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit
|
||||||
|
direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb
|
||||||
|
mv -v target/release/conduwuit static-${{ matrix.target }}
|
||||||
|
mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb
|
||||||
|
|
||||||
|
- name: Build static x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
run: |
|
||||||
|
CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl"
|
||||||
|
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
|
||||||
|
|
||||||
|
bin/nix-build-and-cache just .#static-x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||||
|
|
||||||
|
mkdir -v -p target/release/
|
||||||
|
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
|
||||||
|
cp -v -f result/bin/conduwuit target/release/conduwuit
|
||||||
|
cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit
|
||||||
|
direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||||
|
mv -v target/release/conduwuit static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||||
|
mv -v target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||||
|
|
||||||
|
# quick smoke test of the x86_64 static release binary
|
||||||
|
- name: Quick smoke test the x86_64 static release binary
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
run: |
|
||||||
|
# GH actions default runners are x86_64 only
|
||||||
|
if file result/bin/conduwuit | grep x86-64; then
|
||||||
|
result/bin/conduwuit --version
|
||||||
|
result/bin/conduwuit --help
|
||||||
|
result/bin/conduwuit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Build static debug ${{ matrix.target }}-all-features
|
||||||
|
run: |
|
||||||
|
if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]]
|
||||||
|
then
|
||||||
|
CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl"
|
||||||
|
elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]]
|
||||||
|
then
|
||||||
|
CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl"
|
||||||
|
fi
|
||||||
|
|
||||||
|
SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct)
|
||||||
|
|
||||||
|
bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features-debug
|
||||||
|
|
||||||
|
# > warning: dev profile is not supported and will be a hard error in the future. cargo-deb is for making releases, and it doesn't make sense to use it with dev profiles.
|
||||||
|
# so we need to coerce cargo-deb into thinking this is a release binary
|
||||||
|
mkdir -v -p target/release/
|
||||||
|
mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/
|
||||||
|
cp -v -f result/bin/conduwuit target/release/conduwuit
|
||||||
|
cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit
|
||||||
|
direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}-debug.deb
|
||||||
|
mv -v target/release/conduwuit static-${{ matrix.target }}-debug
|
||||||
|
mv -v target/release/${{ matrix.target }}-debug.deb ${{ matrix.target }}-debug.deb
|
||||||
|
|
||||||
|
# quick smoke test of the x86_64 static debug binary
|
||||||
|
- name: Run x86_64 static debug binary
|
||||||
|
run: |
|
||||||
|
# GH actions default runners are x86_64 only
|
||||||
|
if file result/bin/conduwuit | grep x86-64; then
|
||||||
|
result/bin/conduwuit --version
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check validity of produced deb package, invalid debs will error on these commands
|
||||||
|
- name: Validate produced deb package
|
||||||
|
run: |
|
||||||
|
# List contents
|
||||||
|
dpkg-deb --contents ${{ matrix.target }}.deb
|
||||||
|
dpkg-deb --contents ${{ matrix.target }}-debug.deb
|
||||||
|
# List info
|
||||||
|
dpkg-deb --info ${{ matrix.target }}.deb
|
||||||
|
dpkg-deb --info ${{ matrix.target }}-debug.deb
|
||||||
|
|
||||||
|
- name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
with:
|
||||||
|
name: static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||||
|
path: static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||||
|
if-no-files-found: error
|
||||||
|
|
||||||
|
- name: Upload static-${{ matrix.target }}-all-features to GitHub
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: static-${{ matrix.target }}
|
||||||
|
path: static-${{ matrix.target }}
|
||||||
|
if-no-files-found: error
|
||||||
|
|
||||||
|
- name: Upload static deb ${{ matrix.target }}-all-features to GitHub
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: deb-${{ matrix.target }}
|
||||||
|
path: ${{ matrix.target }}.deb
|
||||||
|
if-no-files-found: error
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
|
- name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||||
|
scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload static-${{ matrix.target }}-all-features to webserver
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
chmod +x static-${{ matrix.target }}
|
||||||
|
scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload static deb ${{ matrix.target }}-all-features to webserver
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload static-${{ matrix.target }}-debug-all-features to GitHub
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: static-${{ matrix.target }}-debug
|
||||||
|
path: static-${{ matrix.target }}-debug
|
||||||
|
if-no-files-found: error
|
||||||
|
|
||||||
|
- name: Upload static deb ${{ matrix.target }}-debug-all-features to GitHub
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: deb-${{ matrix.target }}-debug
|
||||||
|
path: ${{ matrix.target }}-debug.deb
|
||||||
|
if-no-files-found: error
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
|
- name: Upload static-${{ matrix.target }}-debug-all-features to webserver
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Build OCI image ${{ matrix.target }}-all-features
|
||||||
|
run: |
|
||||||
|
bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features
|
||||||
|
|
||||||
|
cp -v -f result oci-image-${{ matrix.target }}.tar.gz
|
||||||
|
|
||||||
|
- name: Build OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
run: |
|
||||||
|
bin/nix-build-and-cache just .#oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||||
|
|
||||||
|
cp -v -f result oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||||
|
|
||||||
|
- name: Build debug OCI image ${{ matrix.target }}-all-features
|
||||||
|
run: |
|
||||||
|
bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features-debug
|
||||||
|
|
||||||
|
cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz
|
||||||
|
|
||||||
|
- name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised
|
||||||
|
path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||||
|
if-no-files-found: error
|
||||||
|
compression-level: 0
|
||||||
|
- name: Upload OCI image ${{ matrix.target }}-all-features to GitHub
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: oci-image-${{ matrix.target }}
|
||||||
|
path: oci-image-${{ matrix.target }}.tar.gz
|
||||||
|
if-no-files-found: error
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
|
- name: Upload OCI image ${{ matrix.target }}-debug-all-features to GitHub
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: oci-image-${{ matrix.target }}-debug
|
||||||
|
path: oci-image-${{ matrix.target }}-debug.tar.gz
|
||||||
|
if-no-files-found: error
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
|
- name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver
|
||||||
|
if: ${{ matrix.target == 'x86_64-linux-musl' }}
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload OCI image ${{ matrix.target }}-all-features to webserver
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver
|
||||||
|
run: |
|
||||||
|
if [ ! -z $SSH_WEBSITE ]; then
|
||||||
|
scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz
|
||||||
|
fi
|
||||||
|
|
||||||
|
variables:
|
||||||
|
outputs:
|
||||||
|
github_repository: ${{ steps.var.outputs.github_repository }}
|
||||||
|
runs-on: self-hosted
|
||||||
|
steps:
|
||||||
|
- name: Setting global variables
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
id: var
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase())
|
||||||
|
docker:
|
||||||
|
name: Docker publish
|
||||||
|
runs-on: self-hosted
|
||||||
|
needs: [build, variables, tests]
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
contents: read
|
||||||
|
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]'
|
||||||
|
env:
|
||||||
|
DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }}
|
||||||
|
GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }}
|
||||||
|
GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit
|
||||||
|
UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }}
|
||||||
|
BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}
|
||||||
|
|
||||||
|
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||||
|
GHCR_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}"
|
||||||
|
steps:
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }}
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: docker.io
|
||||||
|
username: ${{ vars.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to GitLab Container Registry
|
||||||
|
if: ${{ (vars.GITLAB_USERNAME != '') && (env.GITLAB_TOKEN != '') }}
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: registry.gitlab.com
|
||||||
|
username: ${{ vars.GITLAB_USERNAME }}
|
||||||
|
password: ${{ secrets.GITLAB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
pattern: "oci*"
|
||||||
|
|
||||||
|
- name: Move OCI images into position
|
||||||
|
run: |
|
||||||
|
mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz
|
||||||
|
mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz
|
||||||
|
mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz
|
||||||
|
mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz
|
||||||
|
mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz
|
||||||
|
|
||||||
|
- name: Load and push amd64 haswell image
|
||||||
|
run: |
|
||||||
|
docker load -i oci-image-amd64-haswell-optimised.tar.gz
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
fi
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
fi
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Load and push amd64 image
|
||||||
|
run: |
|
||||||
|
docker load -i oci-image-amd64.tar.gz
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
fi
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
fi
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Load and push arm64 image
|
||||||
|
run: |
|
||||||
|
docker load -i oci-image-arm64v8.tar.gz
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8
|
||||||
|
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8
|
||||||
|
fi
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||||
|
docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||||
|
fi
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||||
|
docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Load and push amd64 debug image
|
||||||
|
run: |
|
||||||
|
docker load -i oci-image-amd64-debug.tar.gz
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
fi
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
fi
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Load and push arm64 debug image
|
||||||
|
run: |
|
||||||
|
docker load -i oci-image-arm64v8-debug.tar.gz
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||||
|
docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||||
|
fi
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||||
|
docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||||
|
fi
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||||
|
docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Create Docker haswell manifests
|
||||||
|
run: |
|
||||||
|
# Dockerhub Container Registry
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
fi
|
||||||
|
# GitHub Container Registry
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
fi
|
||||||
|
# GitLab Container Registry
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Create Docker combined manifests
|
||||||
|
run: |
|
||||||
|
# Dockerhub Container Registry
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
fi
|
||||||
|
# GitHub Container Registry
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
fi
|
||||||
|
# GitLab Container Registry
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Create Docker combined debug manifests
|
||||||
|
run: |
|
||||||
|
# Dockerhub Container Registry
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
fi
|
||||||
|
# GitHub Container Registry
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
fi
|
||||||
|
# GitLab Container Registry
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Push manifests to Docker registries
|
||||||
|
run: |
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}
|
||||||
|
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}
|
||||||
|
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug
|
||||||
|
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug
|
||||||
|
docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell
|
||||||
|
fi
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}
|
||||||
|
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}
|
||||||
|
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug
|
||||||
|
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug
|
||||||
|
docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell
|
||||||
|
fi
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}
|
||||||
|
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}
|
||||||
|
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug
|
||||||
|
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug
|
||||||
|
docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell
|
||||||
|
docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Add Image Links to Job Summary
|
||||||
|
run: |
|
||||||
|
if [ ! -z $DOCKERHUB_TOKEN ]; then
|
||||||
|
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
if [ $GHCR_ENABLED = "true" ]; then
|
||||||
|
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
if [ ! -z $GITLAB_TOKEN ]; then
|
||||||
|
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
41
.github/workflows/docker-hub-description.yml
vendored
Normal file
41
.github/workflows/docker-hub-description.yml
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
name: Update Docker Hub Description
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- README.md
|
||||||
|
- .github/workflows/docker-hub-description.yml
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dockerHubDescription:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Setting variables
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
id: var
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const githubRepo = '${{ github.repository }}'.toLowerCase()
|
||||||
|
const repoId = githubRepo.split('/')[1]
|
||||||
|
|
||||||
|
core.setOutput('github_repository', githubRepo)
|
||||||
|
const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId
|
||||||
|
core.setOutput('docker_repo', dockerRepo)
|
||||||
|
|
||||||
|
- name: Docker Hub Description
|
||||||
|
uses: peter-evans/dockerhub-description@v4
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
repository: ${{ steps.var.outputs.docker_repo }}
|
||||||
|
short-description: ${{ github.event.repository.description }}
|
||||||
|
enable-url-completion: true
|
104
.github/workflows/documentation.yml
vendored
Normal file
104
.github/workflows/documentation.yml
vendored
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
name: Documentation and GitHub Pages
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Required to make some things output color
|
||||||
|
TERM: ansi
|
||||||
|
# Publishing to my nix binary cache
|
||||||
|
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
||||||
|
# conduwuit.cachix.org
|
||||||
|
CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||||
|
# Custom nix binary cache if fork is being used
|
||||||
|
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
||||||
|
ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }}
|
||||||
|
# Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps
|
||||||
|
NIX_CONFIG: |
|
||||||
|
show-trace = true
|
||||||
|
extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org
|
||||||
|
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=
|
||||||
|
experimental-features = nix-command flakes
|
||||||
|
extra-experimental-features = nix-command flakes
|
||||||
|
accept-flake-config = true
|
||||||
|
|
||||||
|
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||||
|
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||||
|
concurrency:
|
||||||
|
group: "pages"
|
||||||
|
cancel-in-progress: false
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docs:
|
||||||
|
name: Documentation and GitHub Pages
|
||||||
|
runs-on: self-hosted
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
environment:
|
||||||
|
name: github-pages
|
||||||
|
url: ${{ steps.deployment.outputs.page_url }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Sync repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Setup GitHub Pages
|
||||||
|
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request')
|
||||||
|
uses: actions/configure-pages@v5
|
||||||
|
|
||||||
|
- name: Prepare build environment
|
||||||
|
run: |
|
||||||
|
echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc"
|
||||||
|
direnv allow
|
||||||
|
nix develop --command true
|
||||||
|
|
||||||
|
- name: Cache CI dependencies
|
||||||
|
run: |
|
||||||
|
bin/nix-build-and-cache ci
|
||||||
|
|
||||||
|
- name: Run lychee and markdownlint
|
||||||
|
run: |
|
||||||
|
direnv exec . engage just lints lychee
|
||||||
|
direnv exec . engage just lints markdownlint
|
||||||
|
|
||||||
|
- name: Build documentation (book)
|
||||||
|
run: |
|
||||||
|
bin/nix-build-and-cache just .#book
|
||||||
|
|
||||||
|
cp -r --dereference result public
|
||||||
|
chmod u+w -R public
|
||||||
|
|
||||||
|
- name: Upload generated documentation (book) as normal artifact
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: public
|
||||||
|
path: public
|
||||||
|
if-no-files-found: error
|
||||||
|
# don't compress again
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
|
- name: Upload generated documentation (book) as GitHub Pages artifact
|
||||||
|
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request')
|
||||||
|
uses: actions/upload-pages-artifact@v3
|
||||||
|
with:
|
||||||
|
path: public
|
||||||
|
|
||||||
|
- name: Deploy to GitHub Pages
|
||||||
|
if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request')
|
||||||
|
id: deployment
|
||||||
|
uses: actions/deploy-pages@v4
|
118
.github/workflows/release.yml
vendored
Normal file
118
.github/workflows/release.yml
vendored
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
name: Upload Release Assets
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: 'Tag to release'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
action_id:
|
||||||
|
description: 'Action ID of the CI run'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
env:
|
||||||
|
GH_EVENT_NAME: ${{ github.event_name }}
|
||||||
|
GH_EVENT_INPUTS_ACTION_ID: ${{ github.event.inputs.action_id }}
|
||||||
|
GH_EVENT_INPUTS_TAG: ${{ github.event.inputs.tag }}
|
||||||
|
GH_REPOSITORY: ${{ github.repository }}
|
||||||
|
GH_SHA: ${{ github.sha }}
|
||||||
|
GH_TAG: ${{ github.event.release.tag_name }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: get latest ci id
|
||||||
|
id: get_ci_id
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
run: |
|
||||||
|
if [ "${GH_EVENT_NAME}" == "workflow_dispatch" ]; then
|
||||||
|
id="${GH_EVENT_INPUTS_ACTION_ID}"
|
||||||
|
tag="${GH_EVENT_INPUTS_TAG}"
|
||||||
|
else
|
||||||
|
# get all runs of the ci workflow
|
||||||
|
json=$(gh api "repos/${GH_REPOSITORY}/actions/workflows/ci.yml/runs")
|
||||||
|
|
||||||
|
# find first run that is github sha and status is completed
|
||||||
|
id=$(echo "$json" | jq ".workflow_runs[] | select(.head_sha == \"${GH_SHA}\" and .status == \"completed\") | .id" | head -n 1)
|
||||||
|
|
||||||
|
if [ ! "$id" ]; then
|
||||||
|
echo "No completed runs found"
|
||||||
|
echo "ci_id=0" >> "$GITHUB_OUTPUT"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
tag="${GH_TAG}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "ci_id=$id" >> "$GITHUB_OUTPUT"
|
||||||
|
echo "tag=$tag" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
- name: get latest ci artifacts
|
||||||
|
if: steps.get_ci_id.outputs.ci_id != 0
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
with:
|
||||||
|
merge-multiple: true
|
||||||
|
run-id: ${{ steps.get_ci_id.outputs.ci_id }}
|
||||||
|
github-token: ${{ github.token }}
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
ls
|
||||||
|
|
||||||
|
- name: upload release assets
|
||||||
|
if: steps.get_ci_id.outputs.ci_id != 0
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
TAG: ${{ steps.get_ci_id.outputs.tag }}
|
||||||
|
run: |
|
||||||
|
for file in $(find . -type f); do
|
||||||
|
case "$file" in
|
||||||
|
*json*) echo "Skipping $file...";;
|
||||||
|
*) echo "Uploading $file..."; gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping.";;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: upload release assets to website
|
||||||
|
if: steps.get_ci_id.outputs.ci_id != 0
|
||||||
|
env:
|
||||||
|
TAG: ${{ steps.get_ci_id.outputs.tag }}
|
||||||
|
run: |
|
||||||
|
mkdir -p -v ~/.ssh
|
||||||
|
|
||||||
|
echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts
|
||||||
|
echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519
|
||||||
|
|
||||||
|
chmod 600 ~/.ssh/id_ed25519
|
||||||
|
|
||||||
|
cat >>~/.ssh/config <<END
|
||||||
|
Host website
|
||||||
|
HostName ${{ secrets.WEB_UPLOAD_SSH_HOSTNAME }}
|
||||||
|
User ${{ secrets.WEB_UPLOAD_SSH_USERNAME }}
|
||||||
|
IdentityFile ~/.ssh/id_ed25519
|
||||||
|
StrictHostKeyChecking yes
|
||||||
|
AddKeysToAgent no
|
||||||
|
ForwardX11 no
|
||||||
|
BatchMode yes
|
||||||
|
END
|
||||||
|
|
||||||
|
echo "Creating tag directory on web server"
|
||||||
|
ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/"
|
||||||
|
ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/"
|
||||||
|
|
||||||
|
for file in $(find . -type f); do
|
||||||
|
case "$file" in
|
||||||
|
*json*) echo "Skipping $file...";;
|
||||||
|
*) echo "Uploading $file to website"; scp $file website:/var/www/girlboss.ceo/~strawberry/conduwuit/releases/$TAG/$file;;
|
||||||
|
esac
|
||||||
|
done
|
152
.gitlab-ci.yml
Normal file
152
.gitlab-ci.yml
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
stages:
|
||||||
|
- ci
|
||||||
|
- artifacts
|
||||||
|
- publish
|
||||||
|
|
||||||
|
variables:
|
||||||
|
# Makes some things print in color
|
||||||
|
TERM: ansi
|
||||||
|
# Faster cache and artifact compression / decompression
|
||||||
|
FF_USE_FASTZIP: true
|
||||||
|
# Print progress reports for cache and artifact transfers
|
||||||
|
TRANSFER_METER_FREQUENCY: 5s
|
||||||
|
NIX_CONFIG: |
|
||||||
|
show-trace = true
|
||||||
|
extra-substituters = https://attic.kennel.juneis.dog/conduit https://attic.kennel.juneis.dog/conduwuit https://conduwuit.cachix.org
|
||||||
|
extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||||
|
experimental-features = nix-command flakes
|
||||||
|
extra-experimental-features = nix-command flakes
|
||||||
|
accept-flake-config = true
|
||||||
|
|
||||||
|
# Avoid duplicate pipelines
|
||||||
|
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||||
|
workflow:
|
||||||
|
rules:
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
|
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
|
||||||
|
when: never
|
||||||
|
- if: $CI
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
# Enable nix-command and flakes
|
||||||
|
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
|
||||||
|
# Accept flake config from "untrusted" users
|
||||||
|
- if command -v nix > /dev/null; then echo "accept-flake-config = true" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add conduwuit binary cache
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduwuit" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduit" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add alternate binary cache
|
||||||
|
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add crane binary cache
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Add nix-community binary cache
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
- if command -v nix > /dev/null; then echo "extra-substituters = https://aseipp-nix-cache.freetls.fastly.net" >> /etc/nix/nix.conf; fi
|
||||||
|
|
||||||
|
# Install direnv and nix-direnv
|
||||||
|
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
|
||||||
|
|
||||||
|
# Allow .envrc
|
||||||
|
- if command -v nix > /dev/null; then direnv allow; fi
|
||||||
|
|
||||||
|
# Set CARGO_HOME to a cacheable path
|
||||||
|
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
|
||||||
|
|
||||||
|
ci:
|
||||||
|
stage: ci
|
||||||
|
image: nixos/nix:2.24.9
|
||||||
|
script:
|
||||||
|
# Cache CI dependencies
|
||||||
|
- ./bin/nix-build-and-cache ci
|
||||||
|
|
||||||
|
- direnv exec . engage
|
||||||
|
cache:
|
||||||
|
key: nix
|
||||||
|
paths:
|
||||||
|
- target
|
||||||
|
- .gitlab-ci.d
|
||||||
|
rules:
|
||||||
|
# CI on upstream runners (only available for maintainers)
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
|
||||||
|
# Manual CI on unprotected branches that are not MRs
|
||||||
|
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
|
||||||
|
when: manual
|
||||||
|
# Manual CI on forks
|
||||||
|
- if: $IS_UPSTREAM_CI != "true"
|
||||||
|
when: manual
|
||||||
|
- if: $CI
|
||||||
|
interruptible: true
|
||||||
|
|
||||||
|
artifacts:
|
||||||
|
stage: artifacts
|
||||||
|
image: nixos/nix:2.24.9
|
||||||
|
script:
|
||||||
|
- ./bin/nix-build-and-cache just .#static-x86_64-linux-musl
|
||||||
|
- cp result/bin/conduit x86_64-linux-musl
|
||||||
|
|
||||||
|
- mkdir -p target/release
|
||||||
|
- cp result/bin/conduit target/release
|
||||||
|
- direnv exec . cargo deb --no-build --no-strip
|
||||||
|
- mv target/debian/*.deb x86_64-linux-musl.deb
|
||||||
|
|
||||||
|
# Since the OCI image package is based on the binary package, this has the
|
||||||
|
# fun side effect of uploading the normal binary too. Conduit users who are
|
||||||
|
# deploying with Nix can leverage this fact by adding our binary cache to
|
||||||
|
# their systems.
|
||||||
|
#
|
||||||
|
# Note that although we have an `oci-image-x86_64-linux-musl`
|
||||||
|
# output, we don't build it because it would be largely redundant to this
|
||||||
|
# one since it's all containerized anyway.
|
||||||
|
- ./bin/nix-build-and-cache just .#oci-image
|
||||||
|
- cp result oci-image-amd64.tar.gz
|
||||||
|
|
||||||
|
- ./bin/nix-build-and-cache just .#static-aarch64-linux-musl
|
||||||
|
- cp result/bin/conduit aarch64-linux-musl
|
||||||
|
|
||||||
|
- ./bin/nix-build-and-cache just .#oci-image-aarch64-linux-musl
|
||||||
|
- cp result oci-image-arm64v8.tar.gz
|
||||||
|
|
||||||
|
- ./bin/nix-build-and-cache just .#book
|
||||||
|
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
|
||||||
|
- cp -r --dereference result public
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- x86_64-linux-musl
|
||||||
|
- aarch64-linux-musl
|
||||||
|
- x86_64-linux-musl.deb
|
||||||
|
- oci-image-amd64.tar.gz
|
||||||
|
- oci-image-arm64v8.tar.gz
|
||||||
|
- public
|
||||||
|
rules:
|
||||||
|
# CI required for all MRs
|
||||||
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
|
# Optional CI on forks
|
||||||
|
- if: $IS_UPSTREAM_CI != "true"
|
||||||
|
when: manual
|
||||||
|
allow_failure: true
|
||||||
|
- if: $CI
|
||||||
|
interruptible: true
|
||||||
|
|
||||||
|
pages:
|
||||||
|
stage: publish
|
||||||
|
dependencies:
|
||||||
|
- artifacts
|
||||||
|
only:
|
||||||
|
- next
|
||||||
|
script:
|
||||||
|
- "true"
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- public
|
8
.gitlab/merge_request_templates/MR.md
Normal file
8
.gitlab/merge_request_templates/MR.md
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
|
||||||
|
<!-- Please describe your changes here -->
|
||||||
|
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test`
|
||||||
|
- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license
|
||||||
|
|
3
.gitlab/route-map.yml
Normal file
3
.gitlab/route-map.yml
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# Docs: Map markdown to html files
|
||||||
|
- source: /docs/(.+)\.md/
|
||||||
|
public: '\1.html'
|
15
.mailmap
15
.mailmap
|
@ -1,15 +0,0 @@
|
||||||
AlexPewMaster <git@alex.unbox.at> <68469103+AlexPewMaster@users.noreply.github.com>
|
|
||||||
Daniel Wiesenberg <weasy@hotmail.de> <weasy666@gmail.com>
|
|
||||||
Devin Ragotzy <devin.ragotzy@gmail.com> <d6ragotzy@wmich.edu>
|
|
||||||
Devin Ragotzy <devin.ragotzy@gmail.com> <dragotzy7460@mail.kvcc.edu>
|
|
||||||
Jonas Platte <jplatte+git@posteo.de> <jplatte+gitlab@posteo.de>
|
|
||||||
Jonas Zohren <git-pbkyr@jzohren.de> <gitlab-jfowl-0ux98@sh14.de>
|
|
||||||
Jonathan de Jong <jonathan@automatia.nl> <jonathandejong02@gmail.com>
|
|
||||||
June Clementine Strawberry <june@3.dog> <june@girlboss.ceo>
|
|
||||||
June Clementine Strawberry <june@3.dog> <strawberry@pupbrain.dev>
|
|
||||||
June Clementine Strawberry <june@3.dog> <strawberry@puppygock.gay>
|
|
||||||
Olivia Lee <olivia@computer.surgery> <benjamin@computer.surgery>
|
|
||||||
Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
|
|
||||||
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
|
||||||
Timo Kösters <timo@koesters.xyz>
|
|
||||||
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
|
11
.vscode/settings.json
vendored
11
.vscode/settings.json
vendored
|
@ -1,11 +0,0 @@
|
||||||
{
|
|
||||||
"cSpell.words": [
|
|
||||||
"Forgejo",
|
|
||||||
"appservice",
|
|
||||||
"appservices",
|
|
||||||
"conduwuit",
|
|
||||||
"continuwuity",
|
|
||||||
"homeserver",
|
|
||||||
"homeservers"
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
|
||||||
# Contributor Covenant Code of Conduct
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
## Our Pledge
|
## Our Pledge
|
||||||
|
@ -59,7 +60,8 @@ representative at an online or offline event.
|
||||||
## Enforcement
|
## Enforcement
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at <tom@tcpip.uk>, <jade@continuwuity.org> and <nex@continuwuity.org> respectively.
|
reported to the community leaders responsible for enforcement over email at
|
||||||
|
<strawberry@puppygock.gay> or over Matrix at @strawberry:puppygock.gay.
|
||||||
All complaints will be reviewed and investigated promptly and fairly.
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
|
|
@ -4,7 +4,7 @@ This page is for about contributing to conduwuit. The
|
||||||
[development](./development.md) page may be of interest for you as well.
|
[development](./development.md) page may be of interest for you as well.
|
||||||
|
|
||||||
If you would like to work on an [issue][issues] that is not assigned, preferably
|
If you would like to work on an [issue][issues] that is not assigned, preferably
|
||||||
ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix],
|
ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix],
|
||||||
and comment on it.
|
and comment on it.
|
||||||
|
|
||||||
### Linting and Formatting
|
### Linting and Formatting
|
||||||
|
@ -23,9 +23,9 @@ suggestion, allow the lint and mention that in a comment.
|
||||||
|
|
||||||
### Running CI tests locally
|
### Running CI tests locally
|
||||||
|
|
||||||
continuwuity's CI for tests, linting, formatting, audit, etc use
|
conduwuit's CI for tests, linting, formatting, audit, etc use
|
||||||
[`engage`][engage]. engage can be installed from nixpkgs or `cargo install
|
[`engage`][engage]. engage can be installed from nixpkgs or `cargo install
|
||||||
engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`.
|
engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`.
|
||||||
Use `engage --help` for more usage details.
|
Use `engage --help` for more usage details.
|
||||||
|
|
||||||
To test, format, lint, etc that CI would do, install engage, allow the `.envrc`
|
To test, format, lint, etc that CI would do, install engage, allow the `.envrc`
|
||||||
|
@ -111,28 +111,33 @@ applies here.
|
||||||
|
|
||||||
### Creating pull requests
|
### Creating pull requests
|
||||||
|
|
||||||
Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity
|
Please try to keep contributions to the GitHub. While the mirrors of conduwuit
|
||||||
allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely
|
allow for pull/merge requests, there is no guarantee I will see them in a timely
|
||||||
manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts.
|
manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts.
|
||||||
This prevents us from having to ping once in a while to double check the status
|
This prevents me from having to ping once in a while to double check the status
|
||||||
of it, especially when the CI completed successfully and everything so it
|
of it, especially when the CI completed successfully and everything so it
|
||||||
*looks* done.
|
*looks* done.
|
||||||
|
|
||||||
|
If you open a pull request on one of the mirrors, it is your responsibility to
|
||||||
|
inform me about its existence. In the future I may try to solve this with more
|
||||||
|
repo bots in the conduwuit Matrix room. There is no mailing list or email-patch
|
||||||
|
support on the sr.ht mirror, but if you'd like to email me a git patch you can
|
||||||
|
do so at `strawberry@puppygock.gay`.
|
||||||
|
|
||||||
Direct all PRs/MRs to the `main` branch.
|
Direct all PRs/MRs to the `main` branch.
|
||||||
|
|
||||||
By sending a pull request or patch, you are agreeing that your changes are
|
By sending a pull request or patch, you are agreeing that your changes are
|
||||||
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
||||||
in line with the Contributor's Covenant, and continuwuity's Code of Conduct.
|
in line with the Contributor's Covenant, and conduwuit's Code of Conduct.
|
||||||
|
|
||||||
Contribution by users who violate either of these code of conducts will not have
|
Contribution by users who violate either of these code of conducts will not have
|
||||||
their contributions accepted. This includes users who have been banned from
|
their contributions accepted. This includes users who have been banned from
|
||||||
continuwuityMatrix rooms for Code of Conduct violations.
|
conduwuit Matrix rooms for Code of Conduct violations.
|
||||||
|
|
||||||
[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues
|
[issues]: https://github.com/girlbossceo/conduwuit/issues
|
||||||
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org
|
[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay
|
||||||
[complement]: https://github.com/matrix-org/complement/
|
[complement]: https://github.com/matrix-org/complement/
|
||||||
[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml
|
[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml
|
||||||
[engage]: https://charles.page.computer.surgery/engage/
|
[engage]: https://charles.page.computer.surgery/engage/
|
||||||
[sytest]: https://github.com/matrix-org/sytest/
|
[sytest]: https://github.com/matrix-org/sytest/
|
||||||
[cargo-deb]: https://github.com/kornelski/cargo-deb
|
[cargo-deb]: https://github.com/kornelski/cargo-deb
|
||||||
|
@ -141,4 +146,4 @@ continuwuityMatrix rooms for Code of Conduct violations.
|
||||||
[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit
|
[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit
|
||||||
[direnv]: https://direnv.net/
|
[direnv]: https://direnv.net/
|
||||||
[mdbook]: https://rust-lang.github.io/mdBook/
|
[mdbook]: https://rust-lang.github.io/mdBook/
|
||||||
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml
|
||||||
|
|
84
Cargo.lock
generated
84
Cargo.lock
generated
|
@ -118,7 +118,7 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002"
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-channel"
|
name = "async-channel"
|
||||||
version = "2.3.1"
|
version = "2.3.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280"
|
source = "git+https://github.com/girlbossceo/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"concurrent-queue",
|
"concurrent-queue",
|
||||||
"event-listener-strategy",
|
"event-listener-strategy",
|
||||||
|
@ -725,7 +725,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit"
|
name = "conduwuit"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap",
|
"clap",
|
||||||
"conduwuit_admin",
|
"conduwuit_admin",
|
||||||
|
@ -754,7 +754,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit_admin"
|
name = "conduwuit_admin"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap",
|
"clap",
|
||||||
"conduwuit_api",
|
"conduwuit_api",
|
||||||
|
@ -775,7 +775,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit_api"
|
name = "conduwuit_api"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum",
|
"axum",
|
||||||
|
@ -784,6 +784,7 @@ dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"bytes",
|
"bytes",
|
||||||
"conduwuit_core",
|
"conduwuit_core",
|
||||||
|
"conduwuit_database",
|
||||||
"conduwuit_service",
|
"conduwuit_service",
|
||||||
"const-str",
|
"const-str",
|
||||||
"futures",
|
"futures",
|
||||||
|
@ -807,7 +808,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit_core"
|
name = "conduwuit_core"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"argon2",
|
"argon2",
|
||||||
"arrayvec",
|
"arrayvec",
|
||||||
|
@ -865,7 +866,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit_database"
|
name = "conduwuit_database"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-channel",
|
"async-channel",
|
||||||
"conduwuit_core",
|
"conduwuit_core",
|
||||||
|
@ -883,7 +884,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit_macros"
|
name = "conduwuit_macros"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itertools 0.14.0",
|
"itertools 0.14.0",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
|
@ -893,7 +894,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit_router"
|
name = "conduwuit_router"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"axum",
|
"axum",
|
||||||
"axum-client-ip",
|
"axum-client-ip",
|
||||||
|
@ -926,7 +927,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "conduwuit_service"
|
name = "conduwuit_service"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
|
@ -1046,7 +1047,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "core_affinity"
|
name = "core_affinity"
|
||||||
version = "0.8.1"
|
version = "0.8.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da"
|
source = "git+https://github.com/girlbossceo/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
|
@ -1118,9 +1119,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-channel"
|
name = "crossbeam-channel"
|
||||||
version = "0.5.15"
|
version = "0.5.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
]
|
]
|
||||||
|
@ -1278,9 +1279,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deranged"
|
name = "deranged"
|
||||||
version = "0.4.0"
|
version = "0.4.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e"
|
checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"powerfmt",
|
"powerfmt",
|
||||||
]
|
]
|
||||||
|
@ -1378,7 +1379,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "event-listener"
|
name = "event-listener"
|
||||||
version = "5.3.1"
|
version = "5.3.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d"
|
source = "git+https://github.com/girlbossceo/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"concurrent-queue",
|
"concurrent-queue",
|
||||||
"parking",
|
"parking",
|
||||||
|
@ -2029,7 +2030,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper-util"
|
name = "hyper-util"
|
||||||
version = "0.1.11"
|
version = "0.1.11"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
source = "git+https://github.com/girlbossceo/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
|
@ -3624,7 +3625,8 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "resolv-conf"
|
name = "resolv-conf"
|
||||||
version = "0.7.1"
|
version = "0.7.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hostname",
|
"hostname",
|
||||||
]
|
]
|
||||||
|
@ -3652,7 +3654,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma"
|
name = "ruma"
|
||||||
version = "0.10.1"
|
version = "0.10.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"assign",
|
"assign",
|
||||||
"js_int",
|
"js_int",
|
||||||
|
@ -3672,7 +3674,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-appservice-api"
|
name = "ruma-appservice-api"
|
||||||
version = "0.10.0"
|
version = "0.10.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -3684,7 +3686,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-client-api"
|
name = "ruma-client-api"
|
||||||
version = "0.18.0"
|
version = "0.18.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"assign",
|
"assign",
|
||||||
|
@ -3707,7 +3709,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-common"
|
name = "ruma-common"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
|
@ -3739,7 +3741,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-events"
|
name = "ruma-events"
|
||||||
version = "0.28.1"
|
version = "0.28.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"as_variant",
|
"as_variant",
|
||||||
"indexmap 2.8.0",
|
"indexmap 2.8.0",
|
||||||
|
@ -3764,7 +3766,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-federation-api"
|
name = "ruma-federation-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"headers",
|
"headers",
|
||||||
|
@ -3786,7 +3788,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-identifiers-validation"
|
name = "ruma-identifiers-validation"
|
||||||
version = "0.9.5"
|
version = "0.9.5"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"thiserror 2.0.12",
|
"thiserror 2.0.12",
|
||||||
|
@ -3795,7 +3797,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-identity-service-api"
|
name = "ruma-identity-service-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -3805,7 +3807,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-macros"
|
name = "ruma-macros"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"proc-macro-crate",
|
"proc-macro-crate",
|
||||||
|
@ -3820,7 +3822,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-push-gateway-api"
|
name = "ruma-push-gateway-api"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js_int",
|
"js_int",
|
||||||
"ruma-common",
|
"ruma-common",
|
||||||
|
@ -3832,7 +3834,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ruma-signatures"
|
name = "ruma-signatures"
|
||||||
version = "0.15.0"
|
version = "0.15.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"ed25519-dalek",
|
"ed25519-dalek",
|
||||||
|
@ -3848,7 +3850,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rust-librocksdb-sys"
|
name = "rust-librocksdb-sys"
|
||||||
version = "0.33.0+9.11.1"
|
version = "0.33.0+9.11.1"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd"
|
source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bindgen 0.71.1",
|
"bindgen 0.71.1",
|
||||||
"bzip2-sys",
|
"bzip2-sys",
|
||||||
|
@ -3865,7 +3867,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rust-rocksdb"
|
name = "rust-rocksdb"
|
||||||
version = "0.37.0"
|
version = "0.37.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd"
|
source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"rust-librocksdb-sys",
|
"rust-librocksdb-sys",
|
||||||
|
@ -3978,7 +3980,7 @@ checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2"
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustyline-async"
|
name = "rustyline-async"
|
||||||
version = "0.4.3"
|
version = "0.4.3"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c"
|
source = "git+https://github.com/girlbossceo/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crossterm",
|
"crossterm",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
|
@ -4674,7 +4676,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tikv-jemalloc-ctl"
|
name = "tikv-jemalloc-ctl"
|
||||||
version = "0.6.0"
|
version = "0.6.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"paste",
|
"paste",
|
||||||
|
@ -4684,7 +4686,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tikv-jemalloc-sys"
|
name = "tikv-jemalloc-sys"
|
||||||
version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7"
|
version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"libc",
|
"libc",
|
||||||
|
@ -4693,7 +4695,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tikv-jemallocator"
|
name = "tikv-jemallocator"
|
||||||
version = "0.6.0"
|
version = "0.6.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"tikv-jemalloc-sys",
|
"tikv-jemalloc-sys",
|
||||||
|
@ -4757,9 +4759,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "1.44.2"
|
version = "1.44.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48"
|
checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -4979,7 +4981,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing"
|
name = "tracing"
|
||||||
version = "0.1.41"
|
version = "0.1.41"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"tracing-attributes",
|
"tracing-attributes",
|
||||||
|
@ -4989,7 +4991,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing-attributes"
|
name = "tracing-attributes"
|
||||||
version = "0.1.28"
|
version = "0.1.28"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
|
@ -4999,7 +5001,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing-core"
|
name = "tracing-core"
|
||||||
version = "0.1.33"
|
version = "0.1.33"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"valuable",
|
"valuable",
|
||||||
|
@ -5019,7 +5021,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing-log"
|
name = "tracing-log"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
|
@ -5047,7 +5049,7 @@ dependencies = [
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing-subscriber"
|
name = "tracing-subscriber"
|
||||||
version = "0.3.19"
|
version = "0.3.19"
|
||||||
source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"matchers",
|
"matchers",
|
||||||
"nu-ansi-term",
|
"nu-ansi-term",
|
||||||
|
|
57
Cargo.toml
57
Cargo.toml
|
@ -14,14 +14,14 @@ authors = [
|
||||||
categories = ["network-programming"]
|
categories = ["network-programming"]
|
||||||
description = "a very cool Matrix chat homeserver written in Rust"
|
description = "a very cool Matrix chat homeserver written in Rust"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
homepage = "https://continuwuity.org/"
|
homepage = "https://conduwuit.puppyirl.gay/"
|
||||||
keywords = ["chat", "matrix", "networking", "server", "uwu"]
|
keywords = ["chat", "matrix", "networking", "server", "uwu"]
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
repository = "https://github.com/girlbossceo/conduwuit"
|
||||||
rust-version = "1.86.0"
|
rust-version = "1.85.0"
|
||||||
version = "0.5.0-rc.5"
|
version = "0.5.0"
|
||||||
|
|
||||||
[workspace.metadata.crane]
|
[workspace.metadata.crane]
|
||||||
name = "conduwuit"
|
name = "conduwuit"
|
||||||
|
@ -77,7 +77,7 @@ default-features = false
|
||||||
version = "0.1.3"
|
version = "0.1.3"
|
||||||
|
|
||||||
[workspace.dependencies.rand]
|
[workspace.dependencies.rand]
|
||||||
version = "0.8.5"
|
version = "0.9.0"
|
||||||
|
|
||||||
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
# Used for the http request / response body type for Ruma endpoints used with reqwest
|
||||||
[workspace.dependencies.bytes]
|
[workspace.dependencies.bytes]
|
||||||
|
@ -242,7 +242,7 @@ default-features = false
|
||||||
features = ["std", "async-await"]
|
features = ["std", "async-await"]
|
||||||
|
|
||||||
[workspace.dependencies.tokio]
|
[workspace.dependencies.tokio]
|
||||||
version = "1.44.2"
|
version = "1.44.1"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"fs",
|
"fs",
|
||||||
|
@ -348,9 +348,9 @@ version = "0.1.2"
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
[workspace.dependencies.ruma]
|
[workspace.dependencies.ruma]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
git = "https://github.com/girlbossceo/ruwuma"
|
||||||
#branch = "conduwuit-changes"
|
#branch = "conduwuit-changes"
|
||||||
rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
rev = "edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef"
|
||||||
features = [
|
features = [
|
||||||
"compat",
|
"compat",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -388,8 +388,8 @@ features = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies.rust-rocksdb]
|
[workspace.dependencies.rust-rocksdb]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1"
|
git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1"
|
||||||
rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd"
|
rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
"multi-threaded-cf",
|
"multi-threaded-cf",
|
||||||
|
@ -449,7 +449,7 @@ version = "0.37.0"
|
||||||
|
|
||||||
# jemalloc usage
|
# jemalloc usage
|
||||||
[workspace.dependencies.tikv-jemalloc-sys]
|
[workspace.dependencies.tikv-jemalloc-sys]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/jemallocator"
|
git = "https://github.com/girlbossceo/jemallocator"
|
||||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
|
@ -457,7 +457,7 @@ features = [
|
||||||
"unprefixed_malloc_on_supported_platforms",
|
"unprefixed_malloc_on_supported_platforms",
|
||||||
]
|
]
|
||||||
[workspace.dependencies.tikv-jemallocator]
|
[workspace.dependencies.tikv-jemallocator]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/jemallocator"
|
git = "https://github.com/girlbossceo/jemallocator"
|
||||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = [
|
features = [
|
||||||
|
@ -465,7 +465,7 @@ features = [
|
||||||
"unprefixed_malloc_on_supported_platforms",
|
"unprefixed_malloc_on_supported_platforms",
|
||||||
]
|
]
|
||||||
[workspace.dependencies.tikv-jemalloc-ctl]
|
[workspace.dependencies.tikv-jemalloc-ctl]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/jemallocator"
|
git = "https://github.com/girlbossceo/jemallocator"
|
||||||
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["use_std"]
|
features = ["use_std"]
|
||||||
|
@ -542,51 +542,44 @@ version = "1.0.2"
|
||||||
|
|
||||||
# backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing.
|
# backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing.
|
||||||
# we can switch back to upstream if #2956 is merged and backported in the upstream repo.
|
# we can switch back to upstream if #2956 is merged and backported in the upstream repo.
|
||||||
# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
|
# https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
|
||||||
[patch.crates-io.tracing-subscriber]
|
[patch.crates-io.tracing-subscriber]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
git = "https://github.com/girlbossceo/tracing"
|
||||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
[patch.crates-io.tracing]
|
[patch.crates-io.tracing]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
git = "https://github.com/girlbossceo/tracing"
|
||||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
[patch.crates-io.tracing-core]
|
[patch.crates-io.tracing-core]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
git = "https://github.com/girlbossceo/tracing"
|
||||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
[patch.crates-io.tracing-log]
|
[patch.crates-io.tracing-log]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/tracing"
|
git = "https://github.com/girlbossceo/tracing"
|
||||||
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
|
||||||
|
|
||||||
# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50
|
# adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50
|
||||||
# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
|
# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b
|
||||||
[patch.crates-io.rustyline-async]
|
[patch.crates-io.rustyline-async]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/rustyline-async"
|
git = "https://github.com/girlbossceo/rustyline-async"
|
||||||
rev = "deaeb0694e2083f53d363b648da06e10fc13900c"
|
rev = "deaeb0694e2083f53d363b648da06e10fc13900c"
|
||||||
|
|
||||||
# adds LIFO queue scheduling; this should be updated with PR progress.
|
# adds LIFO queue scheduling; this should be updated with PR progress.
|
||||||
[patch.crates-io.event-listener]
|
[patch.crates-io.event-listener]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/event-listener"
|
git = "https://github.com/girlbossceo/event-listener"
|
||||||
rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d"
|
rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d"
|
||||||
[patch.crates-io.async-channel]
|
[patch.crates-io.async-channel]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/async-channel"
|
git = "https://github.com/girlbossceo/async-channel"
|
||||||
rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280"
|
rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280"
|
||||||
|
|
||||||
# adds affinity masks for selecting more than one core at a time
|
# adds affinity masks for selecting more than one core at a time
|
||||||
[patch.crates-io.core_affinity]
|
[patch.crates-io.core_affinity]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/core_affinity_rs"
|
git = "https://github.com/girlbossceo/core_affinity_rs"
|
||||||
rev = "9c8e51510c35077df888ee72a36b4b05637147da"
|
rev = "9c8e51510c35077df888ee72a36b4b05637147da"
|
||||||
|
|
||||||
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
|
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
|
||||||
[patch.crates-io.hyper-util]
|
[patch.crates-io.hyper-util]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/hyper-util"
|
git = "https://github.com/girlbossceo/hyper-util"
|
||||||
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
||||||
|
|
||||||
# allows no-aaaa option in resolv.conf
|
|
||||||
# bumps rust edition and toolchain to 1.86.0 and 2024
|
|
||||||
# use sat_add on line number errors
|
|
||||||
[patch.crates-io.resolv-conf]
|
|
||||||
git = "https://forgejo.ellis.link/continuwuation/resolv-conf"
|
|
||||||
rev = "200e958941d522a70c5877e3d846f55b5586c68d"
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Our crates
|
# Our crates
|
||||||
#
|
#
|
||||||
|
|
225
README.md
225
README.md
|
@ -1,115 +1,178 @@
|
||||||
# continuwuity
|
# conduwuit
|
||||||
|
|
||||||
|
[](https://matrix.to/#/#conduwuit:puppygock.gay) [](https://matrix.to/#/#conduwuit-space:puppygock.gay)
|
||||||
|
|
||||||
|
[](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml)
|
||||||
|
|
||||||
|
    
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dlatest) &link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dmain)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!-- ANCHOR: catchphrase -->
|
<!-- ANCHOR: catchphrase -->
|
||||||
|
|
||||||
## A community-driven [Matrix](https://matrix.org/) homeserver in Rust
|
### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust
|
||||||
|
|
||||||
<!-- ANCHOR_END: catchphrase -->
|
<!-- ANCHOR_END: catchphrase -->
|
||||||
|
|
||||||
[continuwuity] is a Matrix homeserver written in Rust.
|
Visit the [conduwuit documentation](https://conduwuit.puppyirl.gay/) for more
|
||||||
It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
|
information and how to deploy/setup conduwuit.
|
||||||
|
|
||||||
<!-- ANCHOR: body -->
|
<!-- ANCHOR: body -->
|
||||||
|
|
||||||
|
#### What is Matrix?
|
||||||
### Why does this exist?
|
|
||||||
|
|
||||||
The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features.
|
|
||||||
|
|
||||||
We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver.
|
|
||||||
|
|
||||||
### Who are we?
|
|
||||||
|
|
||||||
We are a group of Matrix enthusiasts, developers and system administrators who have used conduwuit and believe in its potential. Our team includes both previous
|
|
||||||
contributors to the original project and new developers who want to help maintain and improve this important piece of Matrix infrastructure.
|
|
||||||
|
|
||||||
We operate as an open community project, welcoming contributions from anyone interested in improving continuwuity.
|
|
||||||
|
|
||||||
### What is Matrix?
|
|
||||||
|
|
||||||
[Matrix](https://matrix.org) is an open, federated, and extensible network for
|
[Matrix](https://matrix.org) is an open, federated, and extensible network for
|
||||||
decentralized communication. Users from any Matrix homeserver can chat with users from all
|
decentralised communication. Users from any Matrix homeserver can chat with users from all
|
||||||
other homeservers over federation. Matrix is designed to be extensible and built on top of.
|
other homeservers over federation. Matrix is designed to be extensible and built on top of.
|
||||||
You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord.
|
You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord.
|
||||||
|
|
||||||
### What are the project's goals?
|
#### What is the goal?
|
||||||
|
|
||||||
Continuwuity aims to:
|
A high-performance, efficient, low-cost, and featureful Matrix homeserver that's
|
||||||
|
easy to set up and just works with minimal configuration needed.
|
||||||
|
|
||||||
- Maintain a stable, reliable Matrix homeserver implementation in Rust
|
#### Can I try it out?
|
||||||
- Improve compatibility and specification compliance with the Matrix protocol
|
|
||||||
- Fix bugs and performance issues from the original conduwuit
|
|
||||||
- Add missing features needed by homeserver administrators
|
|
||||||
- Provide comprehensive documentation and easy deployment options
|
|
||||||
- Create a sustainable development model for long-term maintenance
|
|
||||||
- Keep a lightweight, efficient codebase that can run on modest hardware
|
|
||||||
|
|
||||||
### Can I try it out?
|
An official conduwuit server ran by me is available at transfem.dev
|
||||||
|
([element.transfem.dev](https://element.transfem.dev) /
|
||||||
|
[cinny.transfem.dev](https://cinny.transfem.dev))
|
||||||
|
|
||||||
Check out the [documentation](introduction) for installation instructions.
|
transfem.dev is a public homeserver that can be used, it is not a "test only
|
||||||
|
homeserver". This means there are rules, so please read the rules:
|
||||||
|
[https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt)
|
||||||
|
|
||||||
There are currently no open registration Continuwuity instances available.
|
transfem.dev is also listed at
|
||||||
|
[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of
|
||||||
|
popular public Matrix homeservers, including some others that run conduwuit.
|
||||||
|
|
||||||
### What are we working on?
|
#### What is the current status?
|
||||||
|
|
||||||
We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues).
|
conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta.
|
||||||
|
The beta status initially was inherited from Conduit, however the huge amount of
|
||||||
|
codebase divergance, changes, fixes, and improvements have effectively made this
|
||||||
|
beta status not entirely applicable to us anymore.
|
||||||
|
|
||||||
- [Replacing old conduwuit links with working continuwuity links](https://forgejo.ellis.link/continuwuation/continuwuity/issues/742)
|
conduwuit is very stable based on our rapidly growing userbase, has lots of features that users
|
||||||
- [Getting CI and docs deployment working on the new Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues/740)
|
expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers.
|
||||||
- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747)
|
|
||||||
- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0)
|
|
||||||
- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119)
|
|
||||||
- Automated testing
|
|
||||||
- [Admin API](https://forgejo.ellis.link/continuwuation/continuwuity/issues/748)
|
|
||||||
- [Policy-list controlled moderation](https://forgejo.ellis.link/continuwuation/continuwuity/issues/750)
|
|
||||||
|
|
||||||
### Can I migrate my data from x?
|
A lot of critical stability and performance issues have been fixed, and a lot of
|
||||||
|
necessary groundwork has finished; making this project way better than it was
|
||||||
|
back in the start at ~early 2024.
|
||||||
|
|
||||||
- Conduwuit: Yes
|
#### Where is the differences page?
|
||||||
- Conduit: No, database is now incompatible
|
|
||||||
- Grapevine: No, database is now incompatible
|
conduwuit historically had a "differences" page that listed each and every single
|
||||||
- Dendrite: No
|
different thing about conduwuit from Conduit, as a way to promote and advertise
|
||||||
- Synapse: No
|
conduwuit by showing significant amounts of work done. While this was feasible to
|
||||||
|
maintain back when the project was new in early-2024, this became impossible
|
||||||
|
very quickly and has unfortunately became heavily outdated, missing tons of things, etc.
|
||||||
|
|
||||||
|
It's difficult to list out what we do differently, what are our notable features, etc
|
||||||
|
when there's so many things and features and bug fixes and performance optimisations,
|
||||||
|
the list goes on. We simply recommend folks to just try out conduwuit, or ask us
|
||||||
|
what features you are looking for and if they're implemented in conduwuit.
|
||||||
|
|
||||||
|
#### How is conduwuit funded? Is conduwuit sustainable?
|
||||||
|
|
||||||
|
conduwuit has no external funding. This is made possible purely in my freetime with
|
||||||
|
contributors, also in their free time, and only by user-curated donations.
|
||||||
|
|
||||||
|
conduwuit has existed since around November 2023, but [only became more publicly known
|
||||||
|
in March/April 2024](https://matrix.org/blog/2024/04/26/this-week-in-matrix-2024-04-26/#conduwuit-website)
|
||||||
|
and we have no plans in stopping or slowing down any time soon!
|
||||||
|
|
||||||
|
#### Can I migrate or switch from Conduit?
|
||||||
|
|
||||||
|
conduwuit had drop-in migration/replacement support for Conduit for about 12 months before
|
||||||
|
bugs somewhere along the line broke it. Maintaining this has been difficult and
|
||||||
|
the majority of Conduit users have already migrated, additionally debugging Conduit
|
||||||
|
is not one of our interests, and so Conduit migration no longer works. We also
|
||||||
|
feel that 12 months has been plenty of time for people to seamlessly migrate.
|
||||||
|
|
||||||
|
If you are a Conduit user looking to migrate, you will have to wipe and reset
|
||||||
|
your database. We may fix seamless migration support at some point, but it's not an interest
|
||||||
|
from us.
|
||||||
|
|
||||||
|
#### Can I migrate from Synapse or Dendrite?
|
||||||
|
|
||||||
|
Currently there is no known way to seamlessly migrate all user data from the old
|
||||||
|
homeserver to conduwuit. However it is perfectly acceptable to replace the old
|
||||||
|
homeserver software with conduwuit using the same server name and there will not
|
||||||
|
be any issues with federation.
|
||||||
|
|
||||||
|
There is an interest in developing a built-in seamless user data migration
|
||||||
|
method into conduwuit, however there is no concrete ETA or timeline for this.
|
||||||
|
|
||||||
We haven't written up a guide on migrating from incompatible homeservers yet. Reach out to us if you need to do this!
|
|
||||||
|
|
||||||
<!-- ANCHOR_END: body -->
|
<!-- ANCHOR_END: body -->
|
||||||
|
|
||||||
## Contribution
|
|
||||||
|
|
||||||
### Development flow
|
|
||||||
|
|
||||||
- Features / changes must developed in a separate branch
|
|
||||||
- For each change, create a descriptive PR
|
|
||||||
- Your code will be reviewed by one or more of the continuwuity developers
|
|
||||||
- The branch will be deployed live on multiple tester's matrix servers to shake out bugs
|
|
||||||
- Once all testers and reviewers have agreed, the PR will be merged to the main branch
|
|
||||||
- The main branch will have nightly builds deployed to users on the cutting edge
|
|
||||||
- Every week or two, a new release is cut.
|
|
||||||
|
|
||||||
The main branch is always green!
|
|
||||||
|
|
||||||
|
|
||||||
### Policy on pulling from other forks
|
|
||||||
|
|
||||||
We welcome contributions from other forks of conduwuit, subject to our review process.
|
|
||||||
When incorporating code from other forks:
|
|
||||||
|
|
||||||
- All external contributions must go through our standard PR process
|
|
||||||
- Code must meet our quality standards and pass tests
|
|
||||||
- Code changes will require testing on multiple test servers before merging
|
|
||||||
- Attribution will be given to original authors and forks
|
|
||||||
- We prioritize stability and compatibility when evaluating external contributions
|
|
||||||
- Features that align with our project goals will be given priority consideration
|
|
||||||
|
|
||||||
<!-- ANCHOR: footer -->
|
<!-- ANCHOR: footer -->
|
||||||
|
|
||||||
#### Contact
|
#### Contact
|
||||||
|
|
||||||
Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project!
|
[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay)
|
||||||
|
is the official project Matrix room. You can get support here, ask questions or
|
||||||
|
concerns, get assistance setting up conduwuit, etc.
|
||||||
|
|
||||||
|
This room should stay relevant and focused on conduwuit. An offtopic general
|
||||||
|
chatter room can be found in the room topic there as well.
|
||||||
|
|
||||||
|
Please keep the issue trackers focused on *actual* bug reports and enhancement requests.
|
||||||
|
|
||||||
|
General support is extremely difficult to be offered over an issue tracker, and
|
||||||
|
simple questions should be asked directly in an interactive platform like our
|
||||||
|
Matrix room above as they can turn into a relevant discussion and/or may not be
|
||||||
|
simple to answer. If you're not sure, just ask in the Matrix room.
|
||||||
|
|
||||||
|
If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new)
|
||||||
|
|
||||||
|
If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo
|
||||||
|
|
||||||
|
#### Donate
|
||||||
|
|
||||||
|
conduwuit development is purely made possible by myself and contributors. I do
|
||||||
|
not get paid to work on this, and I work on it in my free time. Donations are
|
||||||
|
heavily appreciated! 💜🥺
|
||||||
|
|
||||||
|
- Liberapay: <https://liberapay.com/girlbossceo>
|
||||||
|
- GitHub Sponsors: <https://github.com/sponsors/girlbossceo>
|
||||||
|
- Ko-fi: <https://ko-fi.com/puppygock>
|
||||||
|
|
||||||
|
I do not and will not accept cryptocurrency donations, including things related.
|
||||||
|
|
||||||
|
Note that donations will NOT guarantee you or give you any kind of tangible product,
|
||||||
|
feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT
|
||||||
|
going to provide you any goods or services as part of your donation, and this
|
||||||
|
donation is purely a generous donation. We will not provide things like paid
|
||||||
|
personal/direct support, feature request priority, merchandise, etc.
|
||||||
|
|
||||||
|
#### Logo
|
||||||
|
|
||||||
|
Original repo and Matrix room picture was from bran (<3). Current banner image
|
||||||
|
and logo is directly from [this cohost
|
||||||
|
post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for).
|
||||||
|
|
||||||
|
An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: <https://github.com/girlbossceo/conduwuit/blob/main/docs/assets/>
|
||||||
|
|
||||||
|
#### Is it conduwuit or Conduwuit?
|
||||||
|
|
||||||
|
Both, but I prefer conduwuit.
|
||||||
|
|
||||||
|
#### Mirrors of conduwuit
|
||||||
|
|
||||||
|
If GitHub is unavailable in your country, or has poor connectivity, conduwuit's
|
||||||
|
source code is mirrored onto the following additional platforms I maintain:
|
||||||
|
|
||||||
|
- GitHub: <https://github.com/girlbossceo/conduwuit>
|
||||||
|
- GitLab: <https://gitlab.com/conduwuit/conduwuit>
|
||||||
|
- git.girlcock.ceo: <https://git.girlcock.ceo/strawberry/conduwuit>
|
||||||
|
- git.gay: <https://git.gay/june/conduwuit>
|
||||||
|
- mau.dev: <https://mau.dev/june/conduwuit>
|
||||||
|
- Codeberg: <https://codeberg.org/arf/conduwuit>
|
||||||
|
- sourcehut: <https://git.sr.ht/~girlbossceo/conduwuit>
|
||||||
|
|
||||||
<!-- ANCHOR_END: footer -->
|
<!-- ANCHOR_END: footer -->
|
||||||
|
|
||||||
|
|
||||||
[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity
|
|
||||||
|
|
||||||
|
|
|
@ -1,63 +0,0 @@
|
||||||
# Contributor: magmaus3 <maia@magmaus3.eu.org>
|
|
||||||
# Maintainer: magmaus3 <maia@magmaus3.eu.org>
|
|
||||||
pkgname=continuwuity
|
|
||||||
|
|
||||||
# abuild doesn't like the format of v0.5.0-rc.5, so i had to change it
|
|
||||||
# see https://wiki.alpinelinux.org/wiki/Package_policies
|
|
||||||
pkgver=0.5.0_rc5
|
|
||||||
pkgrel=0
|
|
||||||
pkgdesc="a continuwuation of a very cool, featureful fork of conduit"
|
|
||||||
url="https://continuwuity.org/"
|
|
||||||
arch="all"
|
|
||||||
license="Apache-2.0"
|
|
||||||
depends="liburing"
|
|
||||||
|
|
||||||
# cargo version on alpine v3.21 is too old to use the 2024 edition
|
|
||||||
# i recommend either building everything on edge, or adding
|
|
||||||
# the edge repo as a tag
|
|
||||||
makedepends="cargo liburing-dev clang-dev linux-headers"
|
|
||||||
checkdepends=""
|
|
||||||
install="$pkgname.pre-install"
|
|
||||||
subpackages="$pkgname-openrc"
|
|
||||||
source="https://forgejo.ellis.link/continuwuation/continuwuity/archive/v0.5.0-rc.5.tar.gz
|
|
||||||
continuwuity.initd
|
|
||||||
continuwuity.confd
|
|
||||||
"
|
|
||||||
builddir="$srcdir/continuwuity"
|
|
||||||
options="net !check"
|
|
||||||
|
|
||||||
prepare() {
|
|
||||||
default_prepare
|
|
||||||
cd $srcdir/continuwuity
|
|
||||||
|
|
||||||
# add the default database path to the config (commented out)
|
|
||||||
cat conduwuit-example.toml \
|
|
||||||
| sed '/#database_path/ s:$: "/var/lib/continuwuity":' \
|
|
||||||
> "$srcdir"/continuwuity.toml
|
|
||||||
|
|
||||||
cargo fetch --target="$CTARGET" --locked
|
|
||||||
}
|
|
||||||
|
|
||||||
build() {
|
|
||||||
cargo build --frozen --release --all-features
|
|
||||||
}
|
|
||||||
|
|
||||||
check() {
|
|
||||||
# TODO: make sure the tests work
|
|
||||||
#cargo test --frozen
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
package() {
|
|
||||||
cd $srcdir
|
|
||||||
install -Dm755 continuwuity/target/release/conduwuit "$pkgdir"/usr/bin/continuwuity
|
|
||||||
install -Dm644 "$srcdir"/continuwuity.toml -t "$pkgdir"/etc/continuwuity
|
|
||||||
install -Dm755 "$srcdir"/continuwuity.initd "$pkgdir"/etc/init.d/continuwuity
|
|
||||||
install -Dm644 "$srcdir"/continuwuity.confd "$pkgdir"/etc/conf.d/continuwuity
|
|
||||||
}
|
|
||||||
|
|
||||||
sha512sums="
|
|
||||||
66f6da5e98b6f7bb8c1082500101d5c87b1b79955c139b44c6ef5123919fb05feb0dffc669a3af1bc8d571ddb9f3576660f08dc10a6b19eab6db9e391175436a v0.5.0-rc.5.tar.gz
|
|
||||||
0482674be24740496d70da256d4121c5a5e3b749f2445d2bbe0e8991f1449de052724f8427da21a6f55574bc53eac9ca1e47e5012b4c13049b2b39044734d80d continuwuity.initd
|
|
||||||
38e2576278b450d16ba804dd8f4a128f18cd793e6c3ce55aedee1e186905755b31ee23baaa6586b1ab0e25a1f29bf1ea86bfaae4185b0cb1a29203726a199426 continuwuity.confd
|
|
||||||
"
|
|
|
@ -1,7 +0,0 @@
|
||||||
# building
|
|
||||||
|
|
||||||
1. [set up your build
|
|
||||||
environment](https://wiki.alpinelinux.org/wiki/Include:Setup_your_system_and_account_for_building_packages)
|
|
||||||
|
|
||||||
2. run `abuild` (or `abuild -K` if you want to keep the source directory to make
|
|
||||||
rebuilding faster)
|
|
|
@ -1,3 +0,0 @@
|
||||||
supervisor=supervise-daemon
|
|
||||||
export CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/sbin/openrc-run
|
|
||||||
|
|
||||||
command="/usr/bin/continuwuity"
|
|
||||||
command_user="continuwuity:continuwuity"
|
|
||||||
command_args="--config ${CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml}"
|
|
||||||
command_background=true
|
|
||||||
pidfile="/run/$RC_SVCNAME.pid"
|
|
||||||
|
|
||||||
output_log="/var/log/continuwuity.log"
|
|
||||||
error_log="/var/log/continuwuity.log"
|
|
||||||
|
|
||||||
depend() {
|
|
||||||
need net
|
|
||||||
}
|
|
||||||
|
|
||||||
start_pre() {
|
|
||||||
checkpath -d -m 0755 -o "$command_user" /var/lib/continuwuity
|
|
||||||
checkpath -f -m 0644 -o "$command_user" "$output_log"
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
addgroup -S continuwuity 2>/dev/null
|
|
||||||
adduser -S -D -H -h /var/lib/continuwuity -s /sbin/nologin -G continuwuity -g continuwuity continuwuity 2>/dev/null
|
|
||||||
exit 0
|
|
15
book.toml
15
book.toml
|
@ -1,8 +1,8 @@
|
||||||
[book]
|
[book]
|
||||||
title = "continuwuity"
|
title = "conduwuit 🏳️⚧️ 💜 🦴"
|
||||||
description = "continuwuity is a community continuation of the conduwuit Matrix homeserver, written in Rust."
|
description = "conduwuit, which is a well-maintained fork of Conduit, is a simple, fast and reliable chat server for the Matrix protocol"
|
||||||
language = "en"
|
language = "en"
|
||||||
authors = ["The continuwuity Community"]
|
authors = ["strawberry (June)"]
|
||||||
text-direction = "ltr"
|
text-direction = "ltr"
|
||||||
multilingual = false
|
multilingual = false
|
||||||
src = "docs"
|
src = "docs"
|
||||||
|
@ -16,9 +16,12 @@ extra-watch-dirs = ["debian", "docs"]
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[output.html]
|
[output.html]
|
||||||
edit-url-template = "https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/{path}"
|
git-repository-url = "https://github.com/girlbossceo/conduwuit"
|
||||||
git-repository-url = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}"
|
||||||
git-repository-icon = "fa-git-alt"
|
git-repository-icon = "fa-github-square"
|
||||||
|
|
||||||
|
[output.html.redirect]
|
||||||
|
"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page"
|
||||||
|
|
||||||
[output.html.search]
|
[output.html.search]
|
||||||
limit-results = 15
|
limit-results = 15
|
||||||
|
|
|
@ -113,10 +113,14 @@
|
||||||
#new_user_displayname_suffix = "🏳️⚧️"
|
#new_user_displayname_suffix = "🏳️⚧️"
|
||||||
|
|
||||||
# If enabled, conduwuit will send a simple GET request periodically to
|
# If enabled, conduwuit will send a simple GET request periodically to
|
||||||
# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
# `https://pupbrain.dev/check-for-updates/stable` for any new
|
||||||
# announcements or major updates. This is not an update check endpoint.
|
# announcements made. Despite the name, this is not an update check
|
||||||
|
# endpoint, it is simply an announcement check endpoint.
|
||||||
#
|
#
|
||||||
#allow_announcements_check = true
|
# This is disabled by default as this is rarely used except for security
|
||||||
|
# updates or major updates.
|
||||||
|
#
|
||||||
|
#allow_check_for_updates = false
|
||||||
|
|
||||||
# Set this to any float value to multiply conduwuit's in-memory LRU caches
|
# Set this to any float value to multiply conduwuit's in-memory LRU caches
|
||||||
# with such as "auth_chain_cache_capacity".
|
# with such as "auth_chain_cache_capacity".
|
||||||
|
@ -523,9 +527,9 @@
|
||||||
|
|
||||||
# Default room version conduwuit will create rooms with.
|
# Default room version conduwuit will create rooms with.
|
||||||
#
|
#
|
||||||
# Per spec, room version 11 is the default.
|
# Per spec, room version 10 is the default.
|
||||||
#
|
#
|
||||||
#default_room_version = 11
|
#default_room_version = 10
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# This item is undocumented. Please contribute documentation for it.
|
||||||
#
|
#
|
||||||
|
@ -590,7 +594,7 @@
|
||||||
# Currently, conduwuit doesn't support inbound batched key requests, so
|
# Currently, conduwuit doesn't support inbound batched key requests, so
|
||||||
# this list should only contain other Synapse servers.
|
# this list should only contain other Synapse servers.
|
||||||
#
|
#
|
||||||
# example: ["matrix.org", "tchncs.de"]
|
# example: ["matrix.org", "envs.net", "tchncs.de"]
|
||||||
#
|
#
|
||||||
#trusted_servers = ["matrix.org"]
|
#trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
|
@ -966,10 +970,10 @@
|
||||||
#
|
#
|
||||||
#rocksdb_compaction_ioprio_idle = true
|
#rocksdb_compaction_ioprio_idle = true
|
||||||
|
|
||||||
# Enables RocksDB compaction. You should never ever have to set this
|
# Disables RocksDB compaction. You should never ever have to set this
|
||||||
# option to false. If you for some reason find yourself needing to use
|
# option to true. If you for some reason find yourself needing to use this
|
||||||
# this option as part of troubleshooting or a bug, please reach out to us
|
# option as part of troubleshooting or a bug, please reach out to us in
|
||||||
# in the conduwuit Matrix room with information and details.
|
# the conduwuit Matrix room with information and details.
|
||||||
#
|
#
|
||||||
# Disabling compaction will lead to a significantly bloated and
|
# Disabling compaction will lead to a significantly bloated and
|
||||||
# explosively large database, gradually poor performance, unnecessarily
|
# explosively large database, gradually poor performance, unnecessarily
|
||||||
|
@ -1182,72 +1186,28 @@
|
||||||
#
|
#
|
||||||
#prune_missing_media = false
|
#prune_missing_media = false
|
||||||
|
|
||||||
# List of forbidden server names via regex patterns that we will block
|
# Vector list of servers that conduwuit will refuse to download remote
|
||||||
# incoming AND outgoing federation with, and block client room joins /
|
# media from.
|
||||||
# remote user invites.
|
|
||||||
#
|
#
|
||||||
# Note that your messages can still make it to forbidden servers through
|
#prevent_media_downloads_from = []
|
||||||
# backfilling. Events we receive from forbidden servers via backfill
|
|
||||||
# from servers we *do* federate with will be stored in the database.
|
# List of forbidden server names that we will block incoming AND outgoing
|
||||||
|
# federation with, and block client room joins / remote user invites.
|
||||||
#
|
#
|
||||||
# This check is applied on the room ID, room alias, sender server name,
|
# This check is applied on the room ID, room alias, sender server name,
|
||||||
# sender user's server name, inbound federation X-Matrix origin, and
|
# sender user's server name, inbound federation X-Matrix origin, and
|
||||||
# outbound federation handler.
|
# outbound federation handler.
|
||||||
#
|
#
|
||||||
# You can set this to ["*"] to block all servers by default, and then
|
# Basically "global" ACLs.
|
||||||
# use `allowed_remote_server_names` to allow only specific servers.
|
|
||||||
#
|
|
||||||
# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"]
|
|
||||||
#
|
#
|
||||||
#forbidden_remote_server_names = []
|
#forbidden_remote_server_names = []
|
||||||
|
|
||||||
# List of allowed server names via regex patterns that we will allow,
|
# List of forbidden server names that we will block all outgoing federated
|
||||||
# regardless of if they match `forbidden_remote_server_names`.
|
# room directory requests for. Useful for preventing our users from
|
||||||
#
|
# wandering into bad servers or spaces.
|
||||||
# This option has no effect if `forbidden_remote_server_names` is empty.
|
|
||||||
#
|
|
||||||
# example: ["goodserver\\.tld$", "goodphrase"]
|
|
||||||
#
|
|
||||||
#allowed_remote_server_names = []
|
|
||||||
|
|
||||||
# Vector list of regex patterns of server names that conduwuit will refuse
|
|
||||||
# to download remote media from.
|
|
||||||
#
|
|
||||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
|
||||||
#
|
|
||||||
#prevent_media_downloads_from = []
|
|
||||||
|
|
||||||
# List of forbidden server names via regex patterns that we will block all
|
|
||||||
# outgoing federated room directory requests for. Useful for preventing
|
|
||||||
# our users from wandering into bad servers or spaces.
|
|
||||||
#
|
|
||||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
|
||||||
#
|
#
|
||||||
#forbidden_remote_room_directory_server_names = []
|
#forbidden_remote_room_directory_server_names = []
|
||||||
|
|
||||||
# Vector list of regex patterns of server names that conduwuit will not
|
|
||||||
# send messages to the client from.
|
|
||||||
#
|
|
||||||
# Note that there is no way for clients to receive messages once a server
|
|
||||||
# has become unignored without doing a full sync. This is a protocol
|
|
||||||
# limitation with the current sync protocols. This means this is somewhat
|
|
||||||
# of a nuclear option.
|
|
||||||
#
|
|
||||||
# example: ["reallybadserver\.tld$", "reallybadphrase",
|
|
||||||
# "69dollarfortnitecards"]
|
|
||||||
#
|
|
||||||
#ignore_messages_from_server_names = []
|
|
||||||
|
|
||||||
# Send messages from users that the user has ignored to the client.
|
|
||||||
#
|
|
||||||
# There is no way for clients to receive messages sent while a user was
|
|
||||||
# ignored without doing a full sync. This is a protocol limitation with
|
|
||||||
# the current sync protocols. Disabling this option will move
|
|
||||||
# responsibility of ignoring messages to the client, which can avoid this
|
|
||||||
# limitation.
|
|
||||||
#
|
|
||||||
#send_messages_from_ignored_users_to_client = false
|
|
||||||
|
|
||||||
# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
||||||
# do not want conduwuit to send outbound requests to. Defaults to
|
# do not want conduwuit to send outbound requests to. Defaults to
|
||||||
# RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
# RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
||||||
|
@ -1355,7 +1315,7 @@
|
||||||
# used, and startup as warnings if any room aliases in your database have
|
# used, and startup as warnings if any room aliases in your database have
|
||||||
# a forbidden room alias/ID.
|
# a forbidden room alias/ID.
|
||||||
#
|
#
|
||||||
# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"]
|
# example: ["19dollarfortnitecards", "b[4a]droom"]
|
||||||
#
|
#
|
||||||
#forbidden_alias_names = []
|
#forbidden_alias_names = []
|
||||||
|
|
||||||
|
@ -1368,7 +1328,7 @@
|
||||||
# startup as warnings if any local users in your database have a forbidden
|
# startup as warnings if any local users in your database have a forbidden
|
||||||
# username.
|
# username.
|
||||||
#
|
#
|
||||||
# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"]
|
# example: ["administrator", "b[a4]dusernam[3e]"]
|
||||||
#
|
#
|
||||||
#forbidden_usernames = []
|
#forbidden_usernames = []
|
||||||
|
|
||||||
|
@ -1461,7 +1421,7 @@
|
||||||
|
|
||||||
# Sentry reporting URL, if a custom one is desired.
|
# Sentry reporting URL, if a custom one is desired.
|
||||||
#
|
#
|
||||||
#sentry_endpoint = ""
|
#sentry_endpoint = "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536"
|
||||||
|
|
||||||
# Report your conduwuit server_name in Sentry.io crash reports and
|
# Report your conduwuit server_name in Sentry.io crash reports and
|
||||||
# metrics.
|
# metrics.
|
||||||
|
|
2
debian/conduwuit.service
vendored
2
debian/conduwuit.service
vendored
|
@ -3,7 +3,7 @@ Description=conduwuit Matrix homeserver
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
Alias=matrix-conduwuit.service
|
Alias=matrix-conduwuit.service
|
||||||
Documentation=https://continuwuity.org/
|
Documentation=https://conduwuit.puppyirl.gay/
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
DynamicUser=yes
|
DynamicUser=yes
|
||||||
|
|
|
@ -1,208 +0,0 @@
|
||||||
ARG RUST_VERSION=1
|
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
|
|
||||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS base
|
|
||||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS toolchain
|
|
||||||
|
|
||||||
# Prevent deletion of apt cache
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
|
||||||
|
|
||||||
# Match Rustc version as close as possible
|
|
||||||
# rustc -vV
|
|
||||||
ARG LLVM_VERSION=19
|
|
||||||
# ENV RUSTUP_TOOLCHAIN=${RUST_VERSION}
|
|
||||||
|
|
||||||
# Install repo tools
|
|
||||||
# Line one: compiler tools
|
|
||||||
# Line two: curl, for downloading binaries
|
|
||||||
# Line three: for xx-verify
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update && apt-get install -y \
|
|
||||||
clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \
|
|
||||||
curl git \
|
|
||||||
file
|
|
||||||
|
|
||||||
# Create symlinks for LLVM tools
|
|
||||||
RUN <<EOF
|
|
||||||
# clang
|
|
||||||
ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang
|
|
||||||
ln -s "/usr/bin/clang++-${LLVM_VERSION}" "/usr/bin/clang++"
|
|
||||||
# lld
|
|
||||||
ln -s /usr/bin/ld64.lld-${LLVM_VERSION} /usr/bin/ld64.lld
|
|
||||||
ln -s /usr/bin/ld.lld-${LLVM_VERSION} /usr/bin/ld.lld
|
|
||||||
ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/lld
|
|
||||||
ln -s /usr/bin/lld-link-${LLVM_VERSION} /usr/bin/lld-link
|
|
||||||
ln -s /usr/bin/wasm-ld-${LLVM_VERSION} /usr/bin/wasm-ld
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Developer tool versions
|
|
||||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
|
||||||
ENV BINSTALL_VERSION=1.12.3
|
|
||||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
|
||||||
ENV CARGO_SBOM_VERSION=0.9.1
|
|
||||||
# renovate: datasource=crate depName=lddtree
|
|
||||||
ENV LDDTREE_VERSION=0.3.7
|
|
||||||
|
|
||||||
# Install unpackaged tools
|
|
||||||
RUN <<EOF
|
|
||||||
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
|
||||||
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
|
||||||
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Set up xx (cross-compilation scripts)
|
|
||||||
COPY --from=xx / /
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
|
|
||||||
# Install libraries linked by the binary
|
|
||||||
# xx-* are xx-specific meta-packages
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
xx-apt-get install -y \
|
|
||||||
xx-c-essentials xx-cxx-essentials pkg-config \
|
|
||||||
liburing-dev
|
|
||||||
|
|
||||||
# Set up Rust toolchain
|
|
||||||
WORKDIR /app
|
|
||||||
COPY ./rust-toolchain.toml .
|
|
||||||
RUN rustc --version \
|
|
||||||
&& rustup target add $(xx-cargo --print-target-triple)
|
|
||||||
|
|
||||||
# Build binary
|
|
||||||
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
|
|
||||||
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
|
|
||||||
|
|
||||||
# Configure pkg-config
|
|
||||||
RUN <<EOF
|
|
||||||
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
|
||||||
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
|
||||||
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Configure cc to use clang version
|
|
||||||
RUN <<EOF
|
|
||||||
echo "CC=clang" >> /etc/environment
|
|
||||||
echo "CXX=clang++" >> /etc/environment
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Cross-language LTO
|
|
||||||
RUN <<EOF
|
|
||||||
echo "CFLAGS=-flto" >> /etc/environment
|
|
||||||
echo "CXXFLAGS=-flto" >> /etc/environment
|
|
||||||
# Linker is set to target-compatible clang by xx
|
|
||||||
echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Apply CPU-specific optimizations if TARGET_CPU is provided
|
|
||||||
ARG TARGET_CPU=
|
|
||||||
RUN <<EOF
|
|
||||||
set -o allexport
|
|
||||||
. /etc/environment
|
|
||||||
if [ -n "${TARGET_CPU}" ]; then
|
|
||||||
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
|
||||||
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
|
||||||
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
|
|
||||||
fi
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Prepare output directories
|
|
||||||
RUN mkdir /out
|
|
||||||
|
|
||||||
FROM toolchain AS builder
|
|
||||||
|
|
||||||
# Conduwuit version info
|
|
||||||
ARG COMMIT_SHA=
|
|
||||||
ARG CONDUWUIT_VERSION_EXTRA=
|
|
||||||
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
|
|
||||||
RUN <<EOF
|
|
||||||
if [ -z "${CONDUWUIT_VERSION_EXTRA}" ]; then
|
|
||||||
echo "CONDUWUIT_VERSION_EXTRA='$(set -e; git rev-parse --short ${COMMIT_SHA:-HEAD} || echo unknown revision)'" >> /etc/environment
|
|
||||||
fi
|
|
||||||
EOF
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
|
|
||||||
# Verify environment configuration
|
|
||||||
RUN cat /etc/environment
|
|
||||||
RUN xx-cargo --print-target-triple
|
|
||||||
|
|
||||||
# Get source
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Build the binary
|
|
||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
|
||||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
|
||||||
--mount=type=cache,target=/app/target \
|
|
||||||
bash <<'EOF'
|
|
||||||
set -o allexport
|
|
||||||
. /etc/environment
|
|
||||||
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
|
||||||
jq -r ".target_directory"))
|
|
||||||
mkdir /out/sbin
|
|
||||||
PACKAGE=conduwuit
|
|
||||||
xx-cargo build --locked --release \
|
|
||||||
-p $PACKAGE;
|
|
||||||
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
|
|
||||||
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
|
||||||
for BINARY in "${BINARIES[@]}"; do
|
|
||||||
echo $BINARY
|
|
||||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
|
|
||||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
|
|
||||||
done
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Generate Software Bill of Materials (SBOM)
|
|
||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
|
||||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
|
||||||
bash <<'EOF'
|
|
||||||
mkdir /out/sbom
|
|
||||||
typeset -A PACKAGES
|
|
||||||
for BINARY in /out/sbin/*; do
|
|
||||||
BINARY_BASE=$(basename ${BINARY})
|
|
||||||
package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name")
|
|
||||||
if [ -z "$package" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
PACKAGES[$package]=1
|
|
||||||
done
|
|
||||||
for PACKAGE in $(echo ${!PACKAGES[@]}); do
|
|
||||||
echo $PACKAGE
|
|
||||||
cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json
|
|
||||||
done
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Extract dynamically linked dependencies
|
|
||||||
RUN <<EOF
|
|
||||||
mkdir /out/libs
|
|
||||||
mkdir /out/libs-root
|
|
||||||
for BINARY in /out/sbin/*; do
|
|
||||||
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
|
|
||||||
done
|
|
||||||
EOF
|
|
||||||
|
|
||||||
FROM scratch
|
|
||||||
|
|
||||||
WORKDIR /
|
|
||||||
|
|
||||||
# Copy root certs for tls into image
|
|
||||||
# You can also mount the certs from the host
|
|
||||||
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
|
|
||||||
COPY --from=base /etc/ssl/certs /etc/ssl/certs
|
|
||||||
|
|
||||||
# Copy our build
|
|
||||||
COPY --from=builder /out/sbin/ /sbin/
|
|
||||||
# Copy SBOM
|
|
||||||
COPY --from=builder /out/sbom/ /sbom/
|
|
||||||
|
|
||||||
# Copy dynamic libraries to root
|
|
||||||
COPY --from=builder /out/libs-root/ /
|
|
||||||
COPY --from=builder /out/libs/ /usr/lib/
|
|
||||||
|
|
||||||
# Inform linker where to find libraries
|
|
||||||
ENV LD_LIBRARY_PATH=/usr/lib
|
|
||||||
|
|
||||||
# Continuwuity default port
|
|
||||||
EXPOSE 8008
|
|
||||||
|
|
||||||
CMD ["/sbin/conduwuit"]
|
|
|
@ -19,4 +19,4 @@
|
||||||
- [Contributing](contributing.md)
|
- [Contributing](contributing.md)
|
||||||
- [Testing](development/testing.md)
|
- [Testing](development/testing.md)
|
||||||
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
||||||
- [Community (and Guidelines)](community.md)
|
- [conduwuit Community Code of Conduct](conduwuit_coc.md)
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
## Getting help
|
## Getting help
|
||||||
|
|
||||||
If you run into any problems while setting up an Appservice: ask us in
|
If you run into any problems while setting up an Appservice: ask us in
|
||||||
[#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or
|
[#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) or
|
||||||
[open an issue on Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new).
|
[open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new).
|
||||||
|
|
||||||
## Set up the appservice - general instructions
|
## Set up the appservice - general instructions
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ later starting it.
|
||||||
|
|
||||||
At some point the appservice guide should ask you to add a registration yaml
|
At some point the appservice guide should ask you to add a registration yaml
|
||||||
file to the homeserver. In Synapse you would do this by adding the path to the
|
file to the homeserver. In Synapse you would do this by adding the path to the
|
||||||
homeserver.yaml, but in Continuwuity you can do this from within Matrix:
|
homeserver.yaml, but in conduwuit you can do this from within Matrix:
|
||||||
|
|
||||||
First, go into the `#admins` room of your homeserver. The first person that
|
First, go into the `#admins` room of your homeserver. The first person that
|
||||||
registered on the homeserver automatically joins it. Then send a message into
|
registered on the homeserver automatically joins it. Then send a message into
|
||||||
|
@ -37,9 +37,9 @@ You can confirm it worked by sending a message like this:
|
||||||
|
|
||||||
The server bot should answer with `Appservices (1): your-bridge`
|
The server bot should answer with `Appservices (1): your-bridge`
|
||||||
|
|
||||||
Then you are done. Continuwuity will send messages to the appservices and the
|
Then you are done. conduwuit will send messages to the appservices and the
|
||||||
appservice can send requests to the homeserver. You don't need to restart
|
appservice can send requests to the homeserver. You don't need to restart
|
||||||
Continuwuity, but if it doesn't work, restarting while the appservice is running
|
conduwuit, but if it doesn't work, restarting while the appservice is running
|
||||||
could help.
|
could help.
|
||||||
|
|
||||||
## Appservice-specific instructions
|
## Appservice-specific instructions
|
||||||
|
|
|
@ -1,139 +0,0 @@
|
||||||
# Continuwuity Community Guidelines
|
|
||||||
|
|
||||||
Welcome to the Continuwuity commuwunity! We're excited to have you here. Continuwuity is a
|
|
||||||
continuation of the conduwuit homeserver, which in turn is a hard-fork of the Conduit homeserver,
|
|
||||||
aimed at making Matrix more accessible and inclusive for everyone.
|
|
||||||
|
|
||||||
This space is dedicated to fostering a positive, supportive, and welcoming environment for everyone.
|
|
||||||
These guidelines apply to all Continuwuity spaces, including our Matrix rooms and any other
|
|
||||||
community channels that reference them. We've written these guidelines to help us all create an
|
|
||||||
environment where everyone feels safe and respected.
|
|
||||||
|
|
||||||
For code and contribution guidelines, please refer to the
|
|
||||||
[Contributor's Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md).
|
|
||||||
Below are additional guidelines specific to the Continuwuity community.
|
|
||||||
|
|
||||||
## Our Values and Expected Behaviors
|
|
||||||
|
|
||||||
We strive to create a community based on mutual respect, collaboration, and inclusivity. We expect
|
|
||||||
all members to:
|
|
||||||
|
|
||||||
1. **Be Respectful and Inclusive**: Treat everyone with respect. We're committed to a community
|
|
||||||
where everyone feels safe, regardless of background, identity, or experience. Discrimination,
|
|
||||||
harassment, or hate speech won't be tolerated. Remember that each person experiences the world
|
|
||||||
differently; share your own perspective and be open to learning about others'.
|
|
||||||
|
|
||||||
2. **Be Positive and Constructive**: Engage in discussions constructively and support each other.
|
|
||||||
If you feel angry or frustrated, take a break before participating. Approach disagreements with
|
|
||||||
the goal of understanding, not winning. Focus on the issue, not the person.
|
|
||||||
|
|
||||||
3. **Communicate Clearly and Kindly**: Our community includes neurodivergent individuals and those
|
|
||||||
who may not appreciate sarcasm or subtlety. Communicate clearly and kindly. Avoid ambiguity and
|
|
||||||
ensure your messages can be easily understood by all. Avoid placing the burden of education on
|
|
||||||
marginalized groups; please make an effort to look into your questions before asking others for
|
|
||||||
detailed explanations.
|
|
||||||
|
|
||||||
4. **Be Open to Improving Inclusivity**: Actively participate in making our community more inclusive.
|
|
||||||
Report behaviour that contradicts these guidelines (see Reporting and Enforcement below) and be
|
|
||||||
open to constructive feedback aimed at improving our community. Understand that discussing
|
|
||||||
negative experiences can be emotionally taxing; focus on the message, not the tone.
|
|
||||||
|
|
||||||
5. **Commit to Our Values**: Building an inclusive community requires ongoing effort from everyone.
|
|
||||||
Recognise that addressing bias and discrimination is a continuous process that needs commitment
|
|
||||||
and action from all members.
|
|
||||||
|
|
||||||
## Unacceptable Behaviors
|
|
||||||
|
|
||||||
To ensure everyone feels safe and welcome, the following behaviors are considered unacceptable
|
|
||||||
within the Continuwuity community:
|
|
||||||
|
|
||||||
* **Harassment and Discrimination**: Avoid offensive comments related to background, family status,
|
|
||||||
gender, gender identity or expression, marital status, sex, sexual orientation, native language,
|
|
||||||
age, ability, race and/or ethnicity, caste, national origin, socioeconomic status, religion,
|
|
||||||
geographic location, or any other dimension of diversity. Don't deliberately misgender someone or
|
|
||||||
question the legitimacy of their gender identity.
|
|
||||||
|
|
||||||
* **Violence and Threats**: Do not engage in any form of violence or threats, including inciting
|
|
||||||
violence towards anyone or encouraging self-harm. Posting or threatening to post someone else's
|
|
||||||
personally identifying information ("doxxing") is also forbidden.
|
|
||||||
|
|
||||||
* **Personal Attacks**: Disagreements happen, but they should never turn into personal attacks.
|
|
||||||
Don't insult, demean, or belittle others.
|
|
||||||
|
|
||||||
* **Unwelcome Attention or Contact**: Avoid unwelcome sexual attention, inappropriate physical
|
|
||||||
contact (or simulation thereof), sexualized comments, jokes, or imagery.
|
|
||||||
|
|
||||||
* **Disruption**: Do not engage in sustained disruption of discussions, events, or other
|
|
||||||
community activities.
|
|
||||||
|
|
||||||
* **Bad Faith Actions**: Do not intentionally make false reports or otherwise abuse the reporting
|
|
||||||
process.
|
|
||||||
|
|
||||||
This is not an exhaustive list. Any behaviour that makes others feel unsafe or unwelcome may be
|
|
||||||
subject to enforcement action.
|
|
||||||
|
|
||||||
## Matrix Community
|
|
||||||
|
|
||||||
These Community Guidelines apply to the entire
|
|
||||||
[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including:
|
|
||||||
|
|
||||||
### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org)
|
|
||||||
|
|
||||||
This room is for support and discussions about Continuwuity. Ask questions, share insights, and help
|
|
||||||
each other out while adhering to these guidelines.
|
|
||||||
|
|
||||||
We ask that this room remain focused on the Continuwuity software specifically: the team are
|
|
||||||
typically happy to engage in conversations about related subjects in the off-topic room.
|
|
||||||
|
|
||||||
### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org)
|
|
||||||
|
|
||||||
For off-topic community conversations about any subject. While this room allows for a wide range of
|
|
||||||
topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid
|
|
||||||
divisive or stressful subjects like specific country/world politics unless handled with exceptional
|
|
||||||
care and respect for diverse viewpoints.
|
|
||||||
|
|
||||||
General topics, such as world events, are welcome as long as they follow the guidelines. If a member
|
|
||||||
of the team asks for the conversation to end, please respect their decision.
|
|
||||||
|
|
||||||
### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org)
|
|
||||||
|
|
||||||
This room is dedicated to discussing active development of Continuwuity, including ongoing issues or
|
|
||||||
code development. Collaboration here must follow these guidelines, and please consider raising
|
|
||||||
[an issue](https://forgejo.ellis.link/continuwuation/continuwuity/issues) on the repository to help
|
|
||||||
track progress.
|
|
||||||
|
|
||||||
## Reporting and Enforcement
|
|
||||||
|
|
||||||
We take these Community Guidelines seriously to protect our community members. If you witness or
|
|
||||||
experience unacceptable behaviour, or have any other concerns, please report it.
|
|
||||||
|
|
||||||
**How to Report:**
|
|
||||||
|
|
||||||
* **Alert Moderators in the Room:** If you feel comfortable doing so, you can address the issue
|
|
||||||
publicly in the relevant room by mentioning the moderation bot, `@rock:continuwuity.org`, which
|
|
||||||
will immediately alert all available moderators.
|
|
||||||
* **Direct Message:** If you're not comfortable raising the issue publicly, please send a direct
|
|
||||||
message (DM) to one of the room moderators.
|
|
||||||
|
|
||||||
Reports will be handled with discretion. We will investigate promptly and thoroughly.
|
|
||||||
|
|
||||||
**Enforcement Actions:**
|
|
||||||
|
|
||||||
Anyone asked to stop unacceptable behaviour is expected to comply immediately. Failure to do so, or
|
|
||||||
engaging in prohibited behaviour, may result in enforcement action. Moderators may take actions they
|
|
||||||
deem appropriate, including but not limited to:
|
|
||||||
|
|
||||||
1. **Warning**: A direct message or public warning identifying the violation and requesting
|
|
||||||
corrective action.
|
|
||||||
2. **Temporary Mute**: Temporary restriction from participating in discussions for a specified
|
|
||||||
period.
|
|
||||||
3. **Kick or Ban**: Removal from a room (kick) or the entire community space (ban). Egregious or
|
|
||||||
repeated violations may result in an immediate ban. Bans are typically permanent and reviewed
|
|
||||||
only in exceptional circumstances.
|
|
||||||
|
|
||||||
Retaliation against those who report concerns in good faith will not be tolerated and will be
|
|
||||||
subject to the same enforcement actions.
|
|
||||||
|
|
||||||
Together, let's build and maintain a community where everyone feels valued, safe, and respected.
|
|
||||||
|
|
||||||
— The Continuwuity Moderation Team
|
|
93
docs/conduwuit_coc.md
Normal file
93
docs/conduwuit_coc.md
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
# conduwuit Community Code of Conduct
|
||||||
|
|
||||||
|
Welcome to the conduwuit community! We’re excited to have you here. conduwuit is
|
||||||
|
a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible
|
||||||
|
and inclusive for everyone.
|
||||||
|
|
||||||
|
This space is dedicated to fostering a positive, supportive, and inclusive
|
||||||
|
environment for everyone. This Code of Conduct applies to all conduwuit spaces,
|
||||||
|
including any further community rooms that reference this CoC. Here are our
|
||||||
|
guidelines to help maintain the welcoming atmosphere that sets conduwuit apart.
|
||||||
|
|
||||||
|
For the general foundational rules, please refer to the [Contributor's
|
||||||
|
Covenant](https://github.com/girlbossceo/conduwuit/blob/main/CODE_OF_CONDUCT.md).
|
||||||
|
Below are additional guidelines specific to the conduwuit community.
|
||||||
|
|
||||||
|
## Our Values and Guidelines
|
||||||
|
|
||||||
|
1. **Respect and Inclusivity**: We are committed to maintaining a community
|
||||||
|
where everyone feels safe and respected. Discrimination, harassment, or hate
|
||||||
|
speech of any kind will not be tolerated. Recognise that each community member
|
||||||
|
experiences the world differently based on their past experiences, background,
|
||||||
|
and identity. Share your own experiences and be open to learning about others'
|
||||||
|
diverse perspectives.
|
||||||
|
|
||||||
|
2. **Positivity and Constructiveness**: Engage in constructive discussions and
|
||||||
|
support each other. If you feel angry, negative, or aggressive, take a break
|
||||||
|
until you can participate in a positive and constructive manner. Process intense
|
||||||
|
feelings with a friend or in a private setting before engaging in community
|
||||||
|
conversations to help maintain a supportive and focused environment.
|
||||||
|
|
||||||
|
3. **Clarity and Understanding**: Our community includes neurodivergent
|
||||||
|
individuals and those who may not appreciate sarcasm or subtlety. Communicate
|
||||||
|
clearly and kindly, avoiding sarcasm and ensuring your messages are easily
|
||||||
|
understood by all. Additionally, avoid putting the burden of education on
|
||||||
|
marginalized groups by doing your own research before asking for explanations.
|
||||||
|
|
||||||
|
4. **Be Open to Inclusivity**: Actively engage in conversations about making our
|
||||||
|
community more inclusive. Report discriminatory behavior to the moderators
|
||||||
|
and be open to constructive feedback that aims to improve our community.
|
||||||
|
Understand that discussing discrimination and negative experiences can be
|
||||||
|
emotionally taxing, so focus on the message rather than critiquing the tone
|
||||||
|
used.
|
||||||
|
|
||||||
|
5. **Commit to Inclusivity**: Building an inclusive community requires time,
|
||||||
|
energy, and resources. Recognise that addressing discrimination and bias is
|
||||||
|
an ongoing process that necessitates commitment and action from all community
|
||||||
|
members.
|
||||||
|
|
||||||
|
## Matrix Community
|
||||||
|
|
||||||
|
This Code of Conduct applies to the entire [conduwuit Matrix
|
||||||
|
Space](https://matrix.to/#/#conduwuit-space:puppygock.gay) and its rooms,
|
||||||
|
including:
|
||||||
|
|
||||||
|
### [#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay)
|
||||||
|
|
||||||
|
This room is for support and discussions about conduwuit. Ask questions, share
|
||||||
|
insights, and help each other out.
|
||||||
|
|
||||||
|
### [#conduwuit-offtopic:girlboss.ceo](https://matrix.to/#/#conduwuit-offtopic:girlboss.ceo)
|
||||||
|
|
||||||
|
For off-topic community conversations about any subject. While this room allows
|
||||||
|
for a wide range of topics, the same CoC applies. Keep discussions respectful
|
||||||
|
and inclusive, and avoid divisive subjects like country/world politics. General
|
||||||
|
topics, such as world events, are welcome as long as they follow the CoC.
|
||||||
|
|
||||||
|
### [#conduwuit-dev:puppygock.gay](https://matrix.to/#/#conduwuit-dev:puppygock.gay)
|
||||||
|
|
||||||
|
This room is dedicated to discussing active development of conduwuit. Posting
|
||||||
|
requires an elevated power level, which can be requested in one of the other
|
||||||
|
rooms. Use this space to collaborate and innovate.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
We have a zero-tolerance policy for violations of this Code of Conduct. If
|
||||||
|
someone’s behavior makes you uncomfortable, please report it to the moderators.
|
||||||
|
Actions we may take include:
|
||||||
|
|
||||||
|
1. **Warning**: A warning given directly in the room or via a private message
|
||||||
|
from the moderators, identifying the violation and requesting corrective
|
||||||
|
action.
|
||||||
|
2. **Temporary Mute**: Temporary restriction from participating in discussions
|
||||||
|
for a specified period to allow for reflection and cooling off.
|
||||||
|
3. **Kick or Ban**: Egregious behavior may result in an immediate kick or ban to
|
||||||
|
protect other community members. Bans are considered permanent and will only
|
||||||
|
be reversed in exceptional circumstances after proven good behavior.
|
||||||
|
|
||||||
|
Please highlight issues directly in rooms when possible, but if you don't feel
|
||||||
|
comfortable doing that, then please send a DM to one of the moderators directly.
|
||||||
|
|
||||||
|
Together, let’s build a community where everyone feels valued and respected.
|
||||||
|
|
||||||
|
— The conduwuit Moderation Team
|
|
@ -1,10 +1,10 @@
|
||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
This chapter describes various ways to configure Continuwuity.
|
This chapter describes various ways to configure conduwuit.
|
||||||
|
|
||||||
## Basics
|
## Basics
|
||||||
|
|
||||||
Continuwuity uses a config file for the majority of the settings, but also supports
|
conduwuit uses a config file for the majority of the settings, but also supports
|
||||||
setting individual config options via commandline.
|
setting individual config options via commandline.
|
||||||
|
|
||||||
Please refer to the [example config
|
Please refer to the [example config
|
||||||
|
@ -12,13 +12,13 @@ file](./configuration/examples.md#example-configuration) for all of those
|
||||||
settings.
|
settings.
|
||||||
|
|
||||||
The config file to use can be specified on the commandline when running
|
The config file to use can be specified on the commandline when running
|
||||||
Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use
|
conduwuit by specifying the `-c`, `--config` flag. Alternatively, you can use
|
||||||
the environment variable `CONDUWUIT_CONFIG` to specify the config file to used.
|
the environment variable `CONDUWUIT_CONFIG` to specify the config file to used.
|
||||||
Conduit's environment variables are supported for backwards compatibility.
|
Conduit's environment variables are supported for backwards compatibility.
|
||||||
|
|
||||||
## Option commandline flag
|
## Option commandline flag
|
||||||
|
|
||||||
Continuwuity supports setting individual config options in TOML format from the
|
conduwuit supports setting individual config options in TOML format from the
|
||||||
`-O` / `--option` flag. For example, you can set your server name via `-O
|
`-O` / `--option` flag. For example, you can set your server name via `-O
|
||||||
server_name=\"example.com\"`.
|
server_name=\"example.com\"`.
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ string. This does not apply to options that take booleans or numbers:
|
||||||
|
|
||||||
## Execute commandline flag
|
## Execute commandline flag
|
||||||
|
|
||||||
Continuwuity supports running admin commands on startup using the commandline
|
conduwuit supports running admin commands on startup using the commandline
|
||||||
argument `--execute`. The most notable use for this is to create an admin user
|
argument `--execute`. The most notable use for this is to create an admin user
|
||||||
on first startup.
|
on first startup.
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
# Deploying
|
# Deploying
|
||||||
|
|
||||||
This chapter describes various ways to deploy Continuwuity.
|
This chapter describes various ways to deploy conduwuit.
|
||||||
|
|
|
@ -1,3 +1,15 @@
|
||||||
# Continuwuity for Arch Linux
|
# conduwuit for Arch Linux
|
||||||
|
|
||||||
Continuwuity does not have any Arch Linux packages at this time.
|
Currently conduwuit is only on the Arch User Repository (AUR).
|
||||||
|
|
||||||
|
The conduwuit AUR packages are community maintained and are not maintained by
|
||||||
|
conduwuit development team, but the AUR package maintainers are in the Matrix
|
||||||
|
room. Please attempt to verify your AUR package's PKGBUILD file looks fine
|
||||||
|
before asking for support.
|
||||||
|
|
||||||
|
- [conduwuit](https://aur.archlinux.org/packages/conduwuit) - latest tagged
|
||||||
|
conduwuit
|
||||||
|
- [conduwuit-git](https://aur.archlinux.org/packages/conduwuit-git) - latest git
|
||||||
|
conduwuit from `main` branch
|
||||||
|
- [conduwuit-bin](https://aur.archlinux.org/packages/conduwuit-bin) - latest
|
||||||
|
tagged conduwuit static binary
|
||||||
|
|
|
@ -1,14 +1,13 @@
|
||||||
# Continuwuity - Behind Traefik Reverse Proxy
|
# conduwuit - Behind Traefik Reverse Proxy
|
||||||
|
|
||||||
services:
|
services:
|
||||||
homeserver:
|
homeserver:
|
||||||
### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image,
|
### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image,
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: girlbossceo/conduwuit:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/conduwuit
|
- db:/var/lib/conduwuit
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
|
||||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||||
networks:
|
networks:
|
||||||
- proxy
|
- proxy
|
||||||
|
@ -36,14 +35,14 @@ services:
|
||||||
server=your.server.name.example:443
|
server=your.server.name.example:443
|
||||||
}
|
}
|
||||||
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
||||||
ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||||
nofile:
|
nofile:
|
||||||
soft: 1048567
|
soft: 1048567
|
||||||
hard: 1048567
|
hard: 1048567
|
||||||
|
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
### Domain or Subdomain for the communication between Element and conduwuit
|
||||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||||
# element-web:
|
# element-web:
|
||||||
# image: vectorim/element-web:latest
|
# image: vectorim/element-web:latest
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Continuwuity - Traefik Reverse Proxy Labels
|
# conduwuit - Traefik Reverse Proxy Labels
|
||||||
|
|
||||||
services:
|
services:
|
||||||
homeserver:
|
homeserver:
|
||||||
|
@ -6,7 +6,7 @@ services:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
||||||
|
|
||||||
- "traefik.http.routers.to-conduwuit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Continuwuity is hosted
|
- "traefik.http.routers.to-conduwuit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which conduwuit is hosted
|
||||||
- "traefik.http.routers.to-conduwuit.tls=true"
|
- "traefik.http.routers.to-conduwuit.tls=true"
|
||||||
- "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt"
|
- "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt"
|
||||||
- "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker"
|
- "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker"
|
||||||
|
@ -16,7 +16,7 @@ services:
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
|
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
|
||||||
|
|
||||||
# If you want to have your account on <DOMAIN>, but host Continuwuity on a subdomain,
|
# If you want to have your account on <DOMAIN>, but host conduwuit on a subdomain,
|
||||||
# you can let it only handle the well known file on that domain instead
|
# you can let it only handle the well known file on that domain instead
|
||||||
#- "traefik.http.routers.to-matrix-wellknown.rule=Host(`<DOMAIN>`) && PathPrefix(`/.well-known/matrix`)"
|
#- "traefik.http.routers.to-matrix-wellknown.rule=Host(`<DOMAIN>`) && PathPrefix(`/.well-known/matrix`)"
|
||||||
#- "traefik.http.routers.to-matrix-wellknown.tls=true"
|
#- "traefik.http.routers.to-matrix-wellknown.tls=true"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
services:
|
services:
|
||||||
caddy:
|
caddy:
|
||||||
# This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity!
|
# This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit!
|
||||||
# For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy
|
# For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy
|
||||||
image: lucaslorentz/caddy-docker-proxy:ci-alpine
|
image: lucaslorentz/caddy-docker-proxy:ci-alpine
|
||||||
ports:
|
ports:
|
||||||
|
@ -20,13 +20,12 @@ services:
|
||||||
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
|
caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}}
|
||||||
|
|
||||||
homeserver:
|
homeserver:
|
||||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
### If you already built the conduwuit image with 'docker build' or want to use a registry image,
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: girlbossceo/conduwuit:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/conduwuit
|
- db:/var/lib/conduwuit
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
|
||||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||||
environment:
|
environment:
|
||||||
CONDUWUIT_SERVER_NAME: example.com # EDIT THIS
|
CONDUWUIT_SERVER_NAME: example.com # EDIT THIS
|
||||||
|
|
|
@ -1,14 +1,13 @@
|
||||||
# Continuwuity - Behind Traefik Reverse Proxy
|
# conduwuit - Behind Traefik Reverse Proxy
|
||||||
|
|
||||||
services:
|
services:
|
||||||
homeserver:
|
homeserver:
|
||||||
### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image,
|
### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image,
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: girlbossceo/conduwuit:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/conduwuit
|
- db:/var/lib/conduwuit
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
|
||||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
#- ./conduwuit.toml:/etc/conduwuit.toml
|
||||||
networks:
|
networks:
|
||||||
- proxy
|
- proxy
|
||||||
|
@ -22,7 +21,7 @@ services:
|
||||||
CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
|
CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
|
||||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
||||||
#CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above
|
#CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above
|
||||||
### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too
|
### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUWUIT_LOG: info # default is: "warn,state_res=warn"
|
# CONDUWUIT_LOG: info # default is: "warn,state_res=warn"
|
||||||
# CONDUWUIT_ALLOW_ENCRYPTION: 'true'
|
# CONDUWUIT_ALLOW_ENCRYPTION: 'true'
|
||||||
|
@ -44,14 +43,14 @@ services:
|
||||||
server=your.server.name.example:443
|
server=your.server.name.example:443
|
||||||
}
|
}
|
||||||
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
#cpuset: "0-4" # Uncomment to limit to specific CPU cores
|
||||||
ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it
|
||||||
nofile:
|
nofile:
|
||||||
soft: 1048567
|
soft: 1048567
|
||||||
hard: 1048567
|
hard: 1048567
|
||||||
|
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
### Domain or Subdomain for the communication between Element and conduwuit
|
||||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||||
# element-web:
|
# element-web:
|
||||||
# image: vectorim/element-web:latest
|
# image: vectorim/element-web:latest
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
# Continuwuity
|
# conduwuit
|
||||||
|
|
||||||
services:
|
services:
|
||||||
homeserver:
|
homeserver:
|
||||||
### If you already built the Continuwuity image with 'docker build' or want to use a registry image,
|
### If you already built the conduwuit image with 'docker build' or want to use a registry image,
|
||||||
### then you are ready to go.
|
### then you are ready to go.
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: girlbossceo/conduwuit:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
ports:
|
ports:
|
||||||
- 8448:6167
|
- 8448:6167
|
||||||
|
@ -28,7 +28,7 @@ services:
|
||||||
#
|
#
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
### Domain or Subdomain for the communication between Element and Continuwuity
|
### Domain or Subdomain for the communication between Element and conduwuit
|
||||||
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||||
# element-web:
|
# element-web:
|
||||||
# image: vectorim/element-web:latest
|
# image: vectorim/element-web:latest
|
||||||
|
|
|
@ -1,20 +1,31 @@
|
||||||
# Continuwuity for Docker
|
# conduwuit for Docker
|
||||||
|
|
||||||
## Docker
|
## Docker
|
||||||
|
|
||||||
To run Continuwuity with Docker you can either build the image yourself or pull it
|
To run conduwuit with Docker you can either build the image yourself or pull it
|
||||||
from a registry.
|
from a registry.
|
||||||
|
|
||||||
### Use a registry
|
### Use a registry
|
||||||
|
|
||||||
OCI images for Continuwuity are available in the registries listed below.
|
OCI images for conduwuit are available in the registries listed below.
|
||||||
|
|
||||||
| Registry | Image | Notes |
|
| Registry | Image | Size | Notes |
|
||||||
| --------------- | --------------------------------------------------------------- | -----------------------|
|
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
|
||||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. |
|
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||||
| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. |
|
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||||
|
| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. |
|
||||||
|
| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. |
|
||||||
|
| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. |
|
||||||
|
| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. |
|
||||||
|
|
||||||
[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity
|
[dh]: https://hub.docker.com/r/girlbossceo/conduwuit
|
||||||
|
[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit
|
||||||
|
[gl]: https://gitlab.com/conduwuit/conduwuit/container_registry/6369729
|
||||||
|
[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest
|
||||||
|
[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main
|
||||||
|
|
||||||
|
OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a
|
||||||
|
commit hash/revision or a tagged release: <https://pup.systems/~strawberry/conduwuit/>
|
||||||
|
|
||||||
Use
|
Use
|
||||||
|
|
||||||
|
@ -41,11 +52,11 @@ or you can use [docker compose](#docker-compose).
|
||||||
The `-d` flag lets the container run in detached mode. You may supply an
|
The `-d` flag lets the container run in detached mode. You may supply an
|
||||||
optional `conduwuit.toml` config file, the example config can be found
|
optional `conduwuit.toml` config file, the example config can be found
|
||||||
[here](../configuration/examples.md). You can pass in different env vars to
|
[here](../configuration/examples.md). You can pass in different env vars to
|
||||||
change config values on the fly. You can even configure Continuwuity completely by
|
change config values on the fly. You can even configure conduwuit completely by
|
||||||
using env vars. For an overview of possible values, please take a look at the
|
using env vars. For an overview of possible values, please take a look at the
|
||||||
[`docker-compose.yml`](docker-compose.yml) file.
|
[`docker-compose.yml`](docker-compose.yml) file.
|
||||||
|
|
||||||
If you just want to test Continuwuity for a short time, you can use the `--rm`
|
If you just want to test conduwuit for a short time, you can use the `--rm`
|
||||||
flag, which will clean up everything related to your container after you stop
|
flag, which will clean up everything related to your container after you stop
|
||||||
it.
|
it.
|
||||||
|
|
||||||
|
@ -80,32 +91,32 @@ docker network create caddy
|
||||||
After that, you can rename it so it matches `docker-compose.yml` and spin up the
|
After that, you can rename it so it matches `docker-compose.yml` and spin up the
|
||||||
containers!
|
containers!
|
||||||
|
|
||||||
Additional info about deploying Continuwuity can be found [here](generic.md).
|
Additional info about deploying conduwuit can be found [here](generic.md).
|
||||||
|
|
||||||
### Build
|
### Build
|
||||||
|
|
||||||
Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently.
|
Official conduwuit images are built using Nix's
|
||||||
|
[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are
|
||||||
|
repeatable and reproducible by anyone, keeps the images lightweight, and can be
|
||||||
|
built offline.
|
||||||
|
|
||||||
The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd.
|
This also ensures portability of our images because `buildLayeredImage` builds
|
||||||
|
OCI images, not Docker images, and works with other container software.
|
||||||
|
|
||||||
The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition.
|
The OCI images are OS-less with only a very minimal environment of the `tini`
|
||||||
|
init system, CA certificates, and the conduwuit binary. This does mean there is
|
||||||
|
not a shell, but in theory you can get a shell by adding the necessary layers
|
||||||
|
to the layered image. However it's very unlikely you will need a shell for any
|
||||||
|
real troubleshooting.
|
||||||
|
|
||||||
To build an image locally using Docker Buildx, you can typically run a command like:
|
The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def].
|
||||||
|
|
||||||
```bash
|
To build an OCI image using Nix, the following outputs can be built:
|
||||||
# Build for the current platform and load into the local Docker daemon
|
- `nix build -L .#oci-image` (default features, x86_64 glibc)
|
||||||
docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile .
|
- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl)
|
||||||
|
- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl)
|
||||||
# Example: Build for specific platforms and push to a registry.
|
- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl)
|
||||||
# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push
|
- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl)
|
||||||
|
|
||||||
# Example: Build binary optimized for the current CPU
|
|
||||||
# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile .
|
|
||||||
```
|
|
||||||
|
|
||||||
Refer to the Docker Buildx documentation for more advanced build options.
|
|
||||||
|
|
||||||
[dockerfile-path]: ../../docker/Dockerfile
|
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
||||||
|
@ -127,10 +138,10 @@ web. With the two provided files,
|
||||||
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
|
||||||
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
|
||||||
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
|
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy
|
||||||
to deploy and use Continuwuity, with a little caveat. If you already took a look at
|
to deploy and use conduwuit, with a little caveat. If you already took a look at
|
||||||
the files, then you should have seen the `well-known` service, and that is the
|
the files, then you should have seen the `well-known` service, and that is the
|
||||||
little caveat. Traefik is simply a proxy and loadbalancer and is not able to
|
little caveat. Traefik is simply a proxy and loadbalancer and is not able to
|
||||||
serve any kind of content, but for Continuwuity to federate, we need to either
|
serve any kind of content, but for conduwuit to federate, we need to either
|
||||||
expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client`
|
expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client`
|
||||||
and `.well-known/matrix/server`.
|
and `.well-known/matrix/server`.
|
||||||
|
|
||||||
|
@ -142,3 +153,4 @@ those two files.
|
||||||
See the [TURN](../turn.md) page.
|
See the [TURN](../turn.md) page.
|
||||||
|
|
||||||
[nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage
|
[nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage
|
||||||
|
[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Continuwuity for FreeBSD
|
# conduwuit for FreeBSD
|
||||||
|
|
||||||
Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
|
conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB.
|
||||||
|
|
||||||
Contributions for getting Continuwuity packaged are welcome.
|
Contributions for getting conduwuit packaged are welcome.
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
|
|
||||||
> ### Getting help
|
> ### Getting help
|
||||||
>
|
>
|
||||||
> If you run into any problems while setting up Continuwuity, ask us in
|
> If you run into any problems while setting up conduwuit, ask us in
|
||||||
> `#continuwuity:continuwuity.org` or [open an issue on
|
> `#conduwuit:puppygock.gay` or [open an issue on
|
||||||
> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new).
|
> GitHub](https://github.com/girlbossceo/conduwuit/issues/new).
|
||||||
|
|
||||||
## Installing Continuwuity
|
## Installing conduwuit
|
||||||
|
|
||||||
### Static prebuilt binary
|
### Static prebuilt binary
|
||||||
|
|
||||||
|
@ -14,10 +14,12 @@ You may simply download the binary that fits your machine architecture (x86_64
|
||||||
or aarch64). Run `uname -m` to see what you need.
|
or aarch64). Run `uname -m` to see what you need.
|
||||||
|
|
||||||
Prebuilt fully static musl binaries can be downloaded from the latest tagged
|
Prebuilt fully static musl binaries can be downloaded from the latest tagged
|
||||||
release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or
|
release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or
|
||||||
`main` CI branch workflow artifact output. These also include Debian/Ubuntu
|
`main` CI branch workflow artifact output. These also include Debian/Ubuntu
|
||||||
packages.
|
packages.
|
||||||
|
|
||||||
|
Binaries are also available on my website directly at: <https://pup.systems/~strawberry/conduwuit/>
|
||||||
|
|
||||||
These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit
|
These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit
|
||||||
hash/revision, and `releases` are tagged releases. Sort by descending last
|
hash/revision, and `releases` are tagged releases. Sort by descending last
|
||||||
modified for the latest.
|
modified for the latest.
|
||||||
|
@ -35,7 +37,7 @@ for performance.
|
||||||
### Compiling
|
### Compiling
|
||||||
|
|
||||||
Alternatively, you may compile the binary yourself. We recommend using
|
Alternatively, you may compile the binary yourself. We recommend using
|
||||||
Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most
|
Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most
|
||||||
guaranteed reproducibiltiy and easiest to get a build environment and output
|
guaranteed reproducibiltiy and easiest to get a build environment and output
|
||||||
going. This also allows easy cross-compilation.
|
going. This also allows easy cross-compilation.
|
||||||
|
|
||||||
|
@ -49,35 +51,35 @@ If wanting to build using standard Rust toolchains, make sure you install:
|
||||||
- `liburing-dev` on the compiling machine, and `liburing` on the target host
|
- `liburing-dev` on the compiling machine, and `liburing` on the target host
|
||||||
- LLVM and libclang for RocksDB
|
- LLVM and libclang for RocksDB
|
||||||
|
|
||||||
You can build Continuwuity using `cargo build --release --all-features`
|
You can build conduwuit using `cargo build --release --all-features`
|
||||||
|
|
||||||
## Adding a Continuwuity user
|
## Adding a conduwuit user
|
||||||
|
|
||||||
While Continuwuity can run as any user it is better to use dedicated users for
|
While conduwuit can run as any user it is better to use dedicated users for
|
||||||
different services. This also allows you to make sure that the file permissions
|
different services. This also allows you to make sure that the file permissions
|
||||||
are correctly set up.
|
are correctly set up.
|
||||||
|
|
||||||
In Debian, you can use this command to create a Continuwuity user:
|
In Debian, you can use this command to create a conduwuit user:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo adduser --system continuwuity --group --disabled-login --no-create-home
|
sudo adduser --system conduwuit --group --disabled-login --no-create-home
|
||||||
```
|
```
|
||||||
|
|
||||||
For distros without `adduser` (or where it's a symlink to `useradd`):
|
For distros without `adduser` (or where it's a symlink to `useradd`):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity
|
sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit
|
||||||
```
|
```
|
||||||
|
|
||||||
## Forwarding ports in the firewall or the router
|
## Forwarding ports in the firewall or the router
|
||||||
|
|
||||||
Matrix's default federation port is port 8448, and clients must be using port 443.
|
Matrix's default federation port is port 8448, and clients must be using port 443.
|
||||||
If you would like to use only port 443, or a different port, you will need to setup
|
If you would like to use only port 443, or a different port, you will need to setup
|
||||||
delegation. Continuwuity has config options for doing delegation, or you can configure
|
delegation. conduwuit has config options for doing delegation, or you can configure
|
||||||
your reverse proxy to manually serve the necessary JSON files to do delegation
|
your reverse proxy to manually serve the necessary JSON files to do delegation
|
||||||
(see the `[global.well_known]` config section).
|
(see the `[global.well_known]` config section).
|
||||||
|
|
||||||
If Continuwuity runs behind a router or in a container and has a different public
|
If conduwuit runs behind a router or in a container and has a different public
|
||||||
IP address than the host system these public ports need to be forwarded directly
|
IP address than the host system these public ports need to be forwarded directly
|
||||||
or indirectly to the port mentioned in the config.
|
or indirectly to the port mentioned in the config.
|
||||||
|
|
||||||
|
@ -92,9 +94,9 @@ on the network level, consider something like NextDNS or Pi-Hole.
|
||||||
|
|
||||||
## Setting up a systemd service
|
## Setting up a systemd service
|
||||||
|
|
||||||
Two example systemd units for Continuwuity can be found
|
Two example systemd units for conduwuit can be found
|
||||||
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
|
[on the configuration page](../configuration/examples.md#debian-systemd-unit-file).
|
||||||
You may need to change the `ExecStart=` path to where you placed the Continuwuity
|
You may need to change the `ExecStart=` path to where you placed the conduwuit
|
||||||
binary if it is not `/usr/bin/conduwuit`.
|
binary if it is not `/usr/bin/conduwuit`.
|
||||||
|
|
||||||
On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros
|
On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros
|
||||||
|
@ -112,9 +114,9 @@ and entering the following:
|
||||||
ReadWritePaths=/path/to/custom/database/path
|
ReadWritePaths=/path/to/custom/database/path
|
||||||
```
|
```
|
||||||
|
|
||||||
## Creating the Continuwuity configuration file
|
## Creating the conduwuit configuration file
|
||||||
|
|
||||||
Now we need to create the Continuwuity's config file in
|
Now we need to create the conduwuit's config file in
|
||||||
`/etc/conduwuit/conduwuit.toml`. The example config can be found at
|
`/etc/conduwuit/conduwuit.toml`. The example config can be found at
|
||||||
[conduwuit-example.toml](../configuration/examples.md).
|
[conduwuit-example.toml](../configuration/examples.md).
|
||||||
|
|
||||||
|
@ -125,7 +127,7 @@ RocksDB is the only supported database backend.
|
||||||
|
|
||||||
## Setting the correct file permissions
|
## Setting the correct file permissions
|
||||||
|
|
||||||
If you are using a dedicated user for Continuwuity, you will need to allow it to
|
If you are using a dedicated user for conduwuit, you will need to allow it to
|
||||||
read the config. To do that you can run this:
|
read the config. To do that you can run this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -137,7 +139,7 @@ If you use the default database path you also need to run this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo mkdir -p /var/lib/conduwuit/
|
sudo mkdir -p /var/lib/conduwuit/
|
||||||
sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/
|
sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/
|
||||||
sudo chmod 700 /var/lib/conduwuit/
|
sudo chmod 700 /var/lib/conduwuit/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -172,13 +174,13 @@ As we would prefer our users to use Caddy, we will not provide configuration fil
|
||||||
|
|
||||||
You will need to reverse proxy everything under following routes:
|
You will need to reverse proxy everything under following routes:
|
||||||
- `/_matrix/` - core Matrix C-S and S-S APIs
|
- `/_matrix/` - core Matrix C-S and S-S APIs
|
||||||
- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and
|
- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and
|
||||||
`/server_version`
|
`/server_version`
|
||||||
|
|
||||||
You can optionally reverse proxy the following individual routes:
|
You can optionally reverse proxy the following individual routes:
|
||||||
- `/.well-known/matrix/client` and `/.well-known/matrix/server` if using
|
- `/.well-known/matrix/client` and `/.well-known/matrix/server` if using
|
||||||
Continuwuity to perform delegation (see the `[global.well_known]` config section)
|
conduwuit to perform delegation (see the `[global.well_known]` config section)
|
||||||
- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin
|
- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin
|
||||||
contact and support page (formerly known as MSC1929)
|
contact and support page (formerly known as MSC1929)
|
||||||
- `/` if you would like to see `hewwo from conduwuit woof!` at the root
|
- `/` if you would like to see `hewwo from conduwuit woof!` at the root
|
||||||
|
|
||||||
|
@ -198,7 +200,7 @@ header, making federation non-functional. If a workaround is found, feel free to
|
||||||
|
|
||||||
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
|
If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can).
|
||||||
|
|
||||||
If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so:
|
If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so:
|
||||||
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
- `proxy_pass http://127.0.0.1:6167$request_uri;`
|
||||||
- `proxy_pass http://127.0.0.1:6167;`
|
- `proxy_pass http://127.0.0.1:6167;`
|
||||||
|
|
||||||
|
@ -207,7 +209,7 @@ Nginx users need to increase `client_max_body_size` (default is 1M) to match
|
||||||
|
|
||||||
## You're done
|
## You're done
|
||||||
|
|
||||||
Now you can start Continuwuity with:
|
Now you can start conduwuit with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo systemctl start conduwuit
|
sudo systemctl start conduwuit
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
# Continuwuity for Kubernetes
|
# conduwuit for Kubernetes
|
||||||
|
|
||||||
Continuwuity doesn't support horizontal scalability or distributed loading
|
conduwuit doesn't support horizontal scalability or distributed loading
|
||||||
natively, however a community maintained Helm Chart is available here to run
|
natively, however a community maintained Helm Chart is available here to run
|
||||||
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
conduwuit on Kubernetes: <https://gitlab.cronce.io/charts/conduwuit>
|
||||||
|
|
||||||
This should be compatible with continuwuity, but you will need to change the image reference.
|
Should changes need to be made, please reach out to the maintainer in our
|
||||||
|
Matrix room as this is not maintained/controlled by the conduwuit maintainers.
|
||||||
Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers.
|
|
||||||
|
|
|
@ -1,33 +1,66 @@
|
||||||
# Continuwuity for NixOS
|
# conduwuit for NixOS
|
||||||
|
|
||||||
Continuwuity can be acquired by Nix (or [Lix][lix]) from various places:
|
conduwuit can be acquired by Nix (or [Lix][lix]) from various places:
|
||||||
|
|
||||||
* The `flake.nix` at the root of the repo
|
* The `flake.nix` at the root of the repo
|
||||||
* The `default.nix` at the root of the repo
|
* The `default.nix` at the root of the repo
|
||||||
* From Continuwuity's binary cache
|
* From conduwuit's binary cache
|
||||||
|
|
||||||
|
A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit)
|
||||||
|
|
||||||
|
### Binary cache
|
||||||
|
|
||||||
|
A binary cache for conduwuit that the CI/CD publishes to is available at the
|
||||||
|
following places (both are the same just different names):
|
||||||
|
|
||||||
|
```
|
||||||
|
https://attic.kennel.juneis.dog/conduit
|
||||||
|
conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=
|
||||||
|
|
||||||
|
https://attic.kennel.juneis.dog/conduwuit
|
||||||
|
conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=
|
||||||
|
```
|
||||||
|
|
||||||
|
The binary caches were recreated some months ago due to attic issues. The old public
|
||||||
|
keys were:
|
||||||
|
|
||||||
|
```
|
||||||
|
conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg=
|
||||||
|
conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw=
|
||||||
|
```
|
||||||
|
|
||||||
|
If needed, we have a binary cache on Cachix but it is only limited to 5GB:
|
||||||
|
|
||||||
|
```
|
||||||
|
https://conduwuit.cachix.org
|
||||||
|
conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg=
|
||||||
|
```
|
||||||
|
|
||||||
|
If specifying a Git remote URL in your flake, you can use any remotes that
|
||||||
|
are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit`
|
||||||
|
|
||||||
### NixOS module
|
### NixOS module
|
||||||
|
|
||||||
The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions
|
The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions
|
||||||
welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure
|
welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure
|
||||||
Continuwuity.
|
conduwuit.
|
||||||
|
|
||||||
### Conduit NixOS Config Module and SQLite
|
### Conduit NixOS Config Module and SQLite
|
||||||
|
|
||||||
Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend.
|
Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend.
|
||||||
Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB.
|
Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB.
|
||||||
Make sure that you are using the RocksDB backend before migrating!
|
Make sure that you are using the RocksDB backend before migrating!
|
||||||
|
|
||||||
There is a [tool to migrate a Conduit SQLite database to
|
There is a [tool to migrate a Conduit SQLite database to
|
||||||
RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
|
RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/).
|
||||||
|
|
||||||
If you want to run the latest code, you should get Continuwuity from the `flake.nix`
|
If you want to run the latest code, you should get conduwuit from the `flake.nix`
|
||||||
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
or `default.nix` and set [`services.matrix-conduit.package`][package]
|
||||||
appropriately to use Continuwuity instead of Conduit.
|
appropriately to use conduwuit instead of Conduit.
|
||||||
|
|
||||||
### UNIX sockets
|
### UNIX sockets
|
||||||
|
|
||||||
Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module
|
Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module
|
||||||
a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX
|
a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX
|
||||||
socket option does not exist in Conduit, and the module forcibly sets the `address` and
|
socket option does not exist in Conduit, and the module forcibly sets the `address` and
|
||||||
`port` config options.
|
`port` config options.
|
||||||
|
@ -51,13 +84,13 @@ disallows the namespace from accessing or creating UNIX sockets and has to be en
|
||||||
systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ];
|
systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ];
|
||||||
```
|
```
|
||||||
|
|
||||||
Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and
|
Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and
|
||||||
published by the community, would be appreciated.
|
published by the community, would be appreciated.
|
||||||
|
|
||||||
### jemalloc and hardened profile
|
### jemalloc and hardened profile
|
||||||
|
|
||||||
Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
|
conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix]
|
||||||
due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or
|
due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or
|
||||||
disable jemalloc like so:
|
disable jemalloc like so:
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
|
|
|
@ -4,9 +4,9 @@ Information about developing the project. If you are only interested in using
|
||||||
it, you can safely ignore this page. If you plan on contributing, see the
|
it, you can safely ignore this page. If you plan on contributing, see the
|
||||||
[contributor's guide](./contributing.md).
|
[contributor's guide](./contributing.md).
|
||||||
|
|
||||||
## Continuwuity project layout
|
## conduwuit project layout
|
||||||
|
|
||||||
Continuwuity uses a collection of sub-crates, packages, or workspace members
|
conduwuit uses a collection of sub-crates, packages, or workspace members
|
||||||
that indicate what each general area of code is for. All of the workspace
|
that indicate what each general area of code is for. All of the workspace
|
||||||
members are under `src/`. The workspace definition is at the top level / root
|
members are under `src/`. The workspace definition is at the top level / root
|
||||||
`Cargo.toml`.
|
`Cargo.toml`.
|
||||||
|
@ -14,11 +14,11 @@ members are under `src/`. The workspace definition is at the top level / root
|
||||||
The crate names are generally self-explanatory:
|
The crate names are generally self-explanatory:
|
||||||
- `admin` is the admin room
|
- `admin` is the admin room
|
||||||
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
- `api` is the HTTP API, Matrix C-S and S-S endpoints, etc
|
||||||
- `core` is core Continuwuity functionality like config loading, error definitions,
|
- `core` is core conduwuit functionality like config loading, error definitions,
|
||||||
global utilities, logging infrastructure, etc
|
global utilities, logging infrastructure, etc
|
||||||
- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
|
- `database` is RocksDB methods, helpers, RocksDB config, and general database definitions,
|
||||||
utilities, or functions
|
utilities, or functions
|
||||||
- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging
|
- `macros` are conduwuit Rust [macros][macros] like general helper macros, logging
|
||||||
and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
|
and error handling macros, and [syn][syn] and [procedural macros][proc-macro]
|
||||||
used for admin room commands and others
|
used for admin room commands and others
|
||||||
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
- `main` is the "primary" sub-crate. This is where the `main()` function lives,
|
||||||
|
@ -35,7 +35,7 @@ if you truly find yourself needing to, we recommend reaching out to us in
|
||||||
the Matrix room for discussions about it beforehand.
|
the Matrix room for discussions about it beforehand.
|
||||||
|
|
||||||
The primary inspiration for this design was apart of hot reloadable development,
|
The primary inspiration for this design was apart of hot reloadable development,
|
||||||
to support "Continuwuity as a library" where specific parts can simply be swapped out.
|
to support "conduwuit as a library" where specific parts can simply be swapped out.
|
||||||
There is evidence Conduit wanted to go this route too as `axum` is technically an
|
There is evidence Conduit wanted to go this route too as `axum` is technically an
|
||||||
optional feature in Conduit, and can be compiled without the binary or axum library
|
optional feature in Conduit, and can be compiled without the binary or axum library
|
||||||
for handling inbound web requests; but it was never completed or worked.
|
for handling inbound web requests; but it was never completed or worked.
|
||||||
|
@ -68,10 +68,10 @@ do this if Rust supported workspace-level features to begin with.
|
||||||
|
|
||||||
## List of forked dependencies
|
## List of forked dependencies
|
||||||
|
|
||||||
During Continuwuity development, we have had to fork
|
During conduwuit development, we have had to fork
|
||||||
some dependencies to support our use-cases in some areas. This ranges from
|
some dependencies to support our use-cases in some areas. This ranges from
|
||||||
things said upstream project won't accept for any reason, faster-paced
|
things said upstream project won't accept for any reason, faster-paced
|
||||||
development (unresponsive or slow upstream), Continuwuity-specific usecases, or
|
development (unresponsive or slow upstream), conduwuit-specific usecases, or
|
||||||
lack of time to upstream some things.
|
lack of time to upstream some things.
|
||||||
|
|
||||||
- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance
|
- [ruma/ruma][1]: <https://github.com/girlbossceo/ruwuma> - various performance
|
||||||
|
@ -84,7 +84,7 @@ builds seem to be broken on upstream, fixes some broken/suspicious code in
|
||||||
places, additional safety measures, and support redzones for Valgrind
|
places, additional safety measures, and support redzones for Valgrind
|
||||||
- [zyansheep/rustyline-async][4]:
|
- [zyansheep/rustyline-async][4]:
|
||||||
<https://github.com/girlbossceo/rustyline-async> - tab completion callback and
|
<https://github.com/girlbossceo/rustyline-async> - tab completion callback and
|
||||||
`CTRL+\` signal quit event for Continuwuity console CLI
|
`CTRL+\` signal quit event for conduwuit console CLI
|
||||||
- [rust-rocksdb/rust-rocksdb][5]:
|
- [rust-rocksdb/rust-rocksdb][5]:
|
||||||
<https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`][8]'s fork
|
<https://github.com/girlbossceo/rust-rocksdb-zaidoon1> - [`@zaidoon1`][8]'s fork
|
||||||
has quicker updates, more up to date dependencies, etc. Our fork fixes musl build
|
has quicker updates, more up to date dependencies, etc. Our fork fixes musl build
|
||||||
|
@ -97,7 +97,7 @@ alongside other logging/metrics things
|
||||||
## Debugging with `tokio-console`
|
## Debugging with `tokio-console`
|
||||||
|
|
||||||
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
[`tokio-console`][7] can be a useful tool for debugging and profiling. To make a
|
||||||
`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature,
|
`tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature,
|
||||||
disable the default `release_max_log_level` feature, and set the `--cfg
|
disable the default `release_max_log_level` feature, and set the `--cfg
|
||||||
tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
tokio_unstable` flag to enable experimental tokio APIs. A build might look like
|
||||||
this:
|
this:
|
||||||
|
@ -109,7 +109,7 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \
|
||||||
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
--features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console
|
||||||
```
|
```
|
||||||
|
|
||||||
You will also need to enable the `tokio_console` config option in Continuwuity when
|
You will also need to enable the `tokio_console` config option in conduwuit when
|
||||||
starting it. This was due to tokio-console causing gradual memory leak/usage
|
starting it. This was due to tokio-console causing gradual memory leak/usage
|
||||||
if left enabled.
|
if left enabled.
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ guaranteed to work at this time.
|
||||||
|
|
||||||
### Summary
|
### Summary
|
||||||
|
|
||||||
When developing in debug-builds with the nightly toolchain, Continuwuity is modular
|
When developing in debug-builds with the nightly toolchain, conduwuit is modular
|
||||||
using dynamic libraries and various parts of the application are hot-reloadable
|
using dynamic libraries and various parts of the application are hot-reloadable
|
||||||
while the server is running: http api handlers, admin commands, services,
|
while the server is running: http api handlers, admin commands, services,
|
||||||
database, etc. These are all split up into individual workspace crates as seen
|
database, etc. These are all split up into individual workspace crates as seen
|
||||||
|
@ -42,7 +42,7 @@ library, macOS, and likely other host architectures are not supported (if other
|
||||||
architectures work, feel free to let us know and/or make a PR updating this).
|
architectures work, feel free to let us know and/or make a PR updating this).
|
||||||
This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you
|
This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you
|
||||||
happen to have linker issues it's recommended to try using `mold` or `gold`
|
happen to have linker issues it's recommended to try using `mold` or `gold`
|
||||||
linkers, and please let us know in the [Continuwuity Matrix room][7] the linker
|
linkers, and please let us know in the [conduwuit Matrix room][7] the linker
|
||||||
error and what linker solved this issue so we can figure out a solution. Ideally
|
error and what linker solved this issue so we can figure out a solution. Ideally
|
||||||
there should be minimal friction to using this, and in the future a build script
|
there should be minimal friction to using this, and in the future a build script
|
||||||
(`build.rs`) may be suitable to making this easier to use if the capabilities
|
(`build.rs`) may be suitable to making this easier to use if the capabilities
|
||||||
|
@ -52,13 +52,13 @@ allow us.
|
||||||
|
|
||||||
As of 19 May 2024, the instructions for using this are:
|
As of 19 May 2024, the instructions for using this are:
|
||||||
|
|
||||||
0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to
|
0. Have patience. Don't hesitate to join the [conduwuit Matrix room][7] to
|
||||||
receive help using this. As indicated by the various rustflags used and some
|
receive help using this. As indicated by the various rustflags used and some
|
||||||
of the interesting issues linked at the bottom, this is definitely not something
|
of the interesting issues linked at the bottom, this is definitely not something
|
||||||
the Rust ecosystem or toolchain is used to doing.
|
the Rust ecosystem or toolchain is used to doing.
|
||||||
|
|
||||||
1. Install the nightly toolchain using rustup. You may need to use `rustup
|
1. Install the nightly toolchain using rustup. You may need to use `rustup
|
||||||
override set nightly` in your local Continuwuity directory, or use `cargo
|
override set nightly` in your local conduwuit directory, or use `cargo
|
||||||
+nightly` for all actions.
|
+nightly` for all actions.
|
||||||
|
|
||||||
2. Uncomment `cargo-features` at the top level / root Cargo.toml
|
2. Uncomment `cargo-features` at the top level / root Cargo.toml
|
||||||
|
@ -85,14 +85,14 @@ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown
|
||||||
Cargo should only rebuild what was changed / what's necessary, so it should
|
Cargo should only rebuild what was changed / what's necessary, so it should
|
||||||
not be rebuilding all the crates.
|
not be rebuilding all the crates.
|
||||||
|
|
||||||
9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell
|
9. In your conduwuit server terminal, hit/send `CTRL+C` signal. This will tell
|
||||||
Continuwuity to find which libraries need to be reloaded, and reloads them as
|
conduwuit to find which libraries need to be reloaded, and reloads them as
|
||||||
necessary.
|
necessary.
|
||||||
|
|
||||||
10. If there were no errors, it will tell you it successfully reloaded `#`
|
10. If there were no errors, it will tell you it successfully reloaded `#`
|
||||||
modules, and your changes should now be visible. Repeat 7 - 9 as needed.
|
modules, and your changes should now be visible. Repeat 7 - 9 as needed.
|
||||||
|
|
||||||
To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still
|
To shutdown conduwuit in this setup, hit/send `CTRL+\`. Normal builds still
|
||||||
shutdown with `CTRL+C` as usual.
|
shutdown with `CTRL+C` as usual.
|
||||||
|
|
||||||
Steps 1 - 5 are the initial first-time steps for using this. To remove the hot
|
Steps 1 - 5 are the initial first-time steps for using this. To remove the hot
|
||||||
|
@ -101,7 +101,7 @@ reload setup, revert/comment all the Cargo.toml changes.
|
||||||
As mentioned in the requirements section, if you happen to have some linker
|
As mentioned in the requirements section, if you happen to have some linker
|
||||||
issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the
|
issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the
|
||||||
`rustflags` definitions in the top level Cargo.toml, and please let us know in
|
`rustflags` definitions in the top level Cargo.toml, and please let us know in
|
||||||
the [Continuwuity Matrix room][7] the problem. mold can be installed typically
|
the [conduwuit Matrix room][7] the problem. mold can be installed typically
|
||||||
through your distro, and gold is provided by the binutils package.
|
through your distro, and gold is provided by the binutils package.
|
||||||
|
|
||||||
It's possible a helper script can be made to do all of this, or most preferably
|
It's possible a helper script can be made to do all of this, or most preferably
|
||||||
|
@ -136,7 +136,7 @@ acyclic graph. The primary rule is simple and illustrated in the figure below:
|
||||||
**no crate is allowed to call a function or use a variable from a crate below
|
**no crate is allowed to call a function or use a variable from a crate below
|
||||||
it.**
|
it.**
|
||||||
|
|
||||||

|
Volk](assets/libraries.png)
|
||||||
|
|
||||||
When a symbol is referenced between crates they become bound: **crates cannot be
|
When a symbol is referenced between crates they become bound: **crates cannot be
|
||||||
|
@ -147,7 +147,7 @@ by using an `RTLD_LOCAL` binding for just one link between the main executable
|
||||||
and the first crate, freeing the executable from all modules as no global
|
and the first crate, freeing the executable from all modules as no global
|
||||||
binding ever occurs between them.
|
binding ever occurs between them.
|
||||||
|
|
||||||

|
Volk](assets/reload_order.png)
|
||||||
|
|
||||||
Proper resource management is essential for reliable reloading to occur. This is
|
Proper resource management is essential for reliable reloading to occur. This is
|
||||||
|
@ -196,5 +196,5 @@ The initial implementation PR is available [here][1].
|
||||||
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049
|
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049
|
||||||
[5]: https://github.com/rust-lang/cargo/issues/12746
|
[5]: https://github.com/rust-lang/cargo/issues/12746
|
||||||
[6]: https://crates.io/crates/hot-lib-reloader/
|
[6]: https://crates.io/crates/hot-lib-reloader/
|
||||||
[7]: https://matrix.to/#/#continuwuity:continuwuity.org
|
[7]: https://matrix.to/#/#conduwuit:puppygock.gay
|
||||||
[8]: https://crates.io/crates/libloading
|
[8]: https://crates.io/crates/libloading
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Continuwuity
|
# conduwuit
|
||||||
|
|
||||||
{{#include ../README.md:catchphrase}}
|
{{#include ../README.md:catchphrase}}
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
- [Deployment options](deploying.md)
|
- [Deployment options](deploying.md)
|
||||||
|
|
||||||
If you want to connect an appservice to Continuwuity, take a look at the
|
If you want to connect an appservice to conduwuit, take a look at the
|
||||||
[appservices documentation](appservices.md).
|
[appservices documentation](appservices.md).
|
||||||
|
|
||||||
#### How can I contribute?
|
#### How can I contribute?
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
# Maintaining your Continuwuity setup
|
# Maintaining your conduwuit setup
|
||||||
|
|
||||||
## Moderation
|
## Moderation
|
||||||
|
|
||||||
Continuwuity has moderation through admin room commands. "binary commands" (medium
|
conduwuit has moderation through admin room commands. "binary commands" (medium
|
||||||
priority) and an admin API (low priority) is planned. Some moderation-related
|
priority) and an admin API (low priority) is planned. Some moderation-related
|
||||||
config options are available in the example config such as "global ACLs" and
|
config options are available in the example config such as "global ACLs" and
|
||||||
blocking media requests to certain servers. See the example config for the
|
blocking media requests to certain servers. See the example config for the
|
||||||
moderation config options under the "Moderation / Privacy / Security" section.
|
moderation config options under the "Moderation / Privacy / Security" section.
|
||||||
|
|
||||||
Continuwuity has moderation admin commands for:
|
conduwuit has moderation admin commands for:
|
||||||
|
|
||||||
- managing room aliases (`!admin rooms alias`)
|
- managing room aliases (`!admin rooms alias`)
|
||||||
- managing room directory (`!admin rooms directory`)
|
- managing room directory (`!admin rooms directory`)
|
||||||
|
@ -36,7 +36,7 @@ each object being newline delimited. An example of doing this is:
|
||||||
## Database (RocksDB)
|
## Database (RocksDB)
|
||||||
|
|
||||||
Generally there is very little you need to do. [Compaction][rocksdb-compaction]
|
Generally there is very little you need to do. [Compaction][rocksdb-compaction]
|
||||||
is ran automatically based on various defined thresholds tuned for Continuwuity to
|
is ran automatically based on various defined thresholds tuned for conduwuit to
|
||||||
be high performance with the least I/O amplifcation or overhead. Manually
|
be high performance with the least I/O amplifcation or overhead. Manually
|
||||||
running compaction is not recommended, or compaction via a timer, due to
|
running compaction is not recommended, or compaction via a timer, due to
|
||||||
creating unnecessary I/O amplification. RocksDB is built with io_uring support
|
creating unnecessary I/O amplification. RocksDB is built with io_uring support
|
||||||
|
@ -50,7 +50,7 @@ Some RocksDB settings can be adjusted such as the compression method chosen. See
|
||||||
the RocksDB section in the [example config](configuration/examples.md).
|
the RocksDB section in the [example config](configuration/examples.md).
|
||||||
|
|
||||||
btrfs users have reported that database compression does not need to be disabled
|
btrfs users have reported that database compression does not need to be disabled
|
||||||
on Continuwuity as the filesystem already does not attempt to compress. This can be
|
on conduwuit as the filesystem already does not attempt to compress. This can be
|
||||||
validated by using `filefrag -v` on a `.SST` file in your database, and ensure
|
validated by using `filefrag -v` on a `.SST` file in your database, and ensure
|
||||||
the `physical_offset` matches (no filesystem compression). It is very important
|
the `physical_offset` matches (no filesystem compression). It is very important
|
||||||
to ensure no additional filesystem compression takes place as this can render
|
to ensure no additional filesystem compression takes place as this can render
|
||||||
|
@ -70,7 +70,7 @@ they're server logs or database logs, however they are critical RocksDB files
|
||||||
related to WAL tracking.
|
related to WAL tracking.
|
||||||
|
|
||||||
The only safe files that can be deleted are the `LOG` files (all caps). These
|
The only safe files that can be deleted are the `LOG` files (all caps). These
|
||||||
are the real RocksDB telemetry/log files, however Continuwuity has already
|
are the real RocksDB telemetry/log files, however conduwuit has already
|
||||||
configured to only store up to 3 RocksDB `LOG` files due to generall being
|
configured to only store up to 3 RocksDB `LOG` files due to generall being
|
||||||
useless for average users unless troubleshooting something low-level. If you
|
useless for average users unless troubleshooting something low-level. If you
|
||||||
would like to store nearly none at all, see the `rocksdb_max_log_files`
|
would like to store nearly none at all, see the `rocksdb_max_log_files`
|
||||||
|
@ -88,7 +88,7 @@ still be joined together.
|
||||||
|
|
||||||
To restore a backup from an online RocksDB backup:
|
To restore a backup from an online RocksDB backup:
|
||||||
|
|
||||||
- shutdown Continuwuity
|
- shutdown conduwuit
|
||||||
- create a new directory for merging together the data
|
- create a new directory for merging together the data
|
||||||
- in the online backup created, copy all `.sst` files in
|
- in the online backup created, copy all `.sst` files in
|
||||||
`$DATABASE_BACKUP_PATH/shared_checksum` to your new directory
|
`$DATABASE_BACKUP_PATH/shared_checksum` to your new directory
|
||||||
|
@ -99,9 +99,9 @@ To restore a backup from an online RocksDB backup:
|
||||||
if you have multiple) to your new directory
|
if you have multiple) to your new directory
|
||||||
- set your `database_path` config option to your new directory, or replace your
|
- set your `database_path` config option to your new directory, or replace your
|
||||||
old one with the new one you crafted
|
old one with the new one you crafted
|
||||||
- start up Continuwuity again and it should open as normal
|
- start up conduwuit again and it should open as normal
|
||||||
|
|
||||||
If you'd like to do an offline backup, shutdown Continuwuity and copy your
|
If you'd like to do an offline backup, shutdown conduwuit and copy your
|
||||||
`database_path` directory elsewhere. This can be restored with no modifications
|
`database_path` directory elsewhere. This can be restored with no modifications
|
||||||
needed.
|
needed.
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ directory.
|
||||||
|
|
||||||
## Media
|
## Media
|
||||||
|
|
||||||
Media still needs various work, however Continuwuity implements media deletion via:
|
Media still needs various work, however conduwuit implements media deletion via:
|
||||||
|
|
||||||
- MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the
|
- MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the
|
||||||
event)
|
event)
|
||||||
|
@ -118,17 +118,17 @@ event)
|
||||||
- Delete remote media in the past `N` seconds/minutes via filesystem metadata on
|
- Delete remote media in the past `N` seconds/minutes via filesystem metadata on
|
||||||
the file created time (`btime`) or file modified time (`mtime`)
|
the file created time (`btime`) or file modified time (`mtime`)
|
||||||
|
|
||||||
See the `!admin media` command for further information. All media in Continuwuity
|
See the `!admin media` command for further information. All media in conduwuit
|
||||||
is stored at `$DATABASE_DIR/media`. This will be configurable soon.
|
is stored at `$DATABASE_DIR/media`. This will be configurable soon.
|
||||||
|
|
||||||
If you are finding yourself needing extensive granular control over media, we
|
If you are finding yourself needing extensive granular control over media, we
|
||||||
recommend looking into [Matrix Media
|
recommend looking into [Matrix Media
|
||||||
Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to
|
Repo](https://github.com/t2bot/matrix-media-repo). conduwuit intends to
|
||||||
implement various utilities for media, but MMR is dedicated to extensive media
|
implement various utilities for media, but MMR is dedicated to extensive media
|
||||||
management.
|
management.
|
||||||
|
|
||||||
Built-in S3 support is also planned, but for now using a "S3 filesystem" on
|
Built-in S3 support is also planned, but for now using a "S3 filesystem" on
|
||||||
`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and
|
`media/` works. conduwuit also sends a `Cache-Control` header of 1 year and
|
||||||
immutable for all media requests (download and thumbnail) to reduce unnecessary
|
immutable for all media requests (download and thumbnail) to reduce unnecessary
|
||||||
media requests from browsers, reduce bandwidth usage, and reduce load.
|
media requests from browsers, reduce bandwidth usage, and reduce load.
|
||||||
|
|
||||||
|
|
6
docs/static/_headers
vendored
6
docs/static/_headers
vendored
|
@ -1,6 +0,0 @@
|
||||||
/.well-known/matrix/*
|
|
||||||
Access-Control-Allow-Origin: *
|
|
||||||
Content-Type: application/json
|
|
||||||
/.well-known/continuwuity/*
|
|
||||||
Access-Control-Allow-Origin: *
|
|
||||||
Content-Type: application/json
|
|
9
docs/static/announcements.json
vendored
9
docs/static/announcements.json
vendored
|
@ -1,9 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "https://continuwuity.org/schema/announcements.schema.json",
|
|
||||||
"announcements": [
|
|
||||||
{
|
|
||||||
"id": 1,
|
|
||||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
31
docs/static/announcements.schema.json
vendored
31
docs/static/announcements.schema.json
vendored
|
@ -1,31 +0,0 @@
|
||||||
{
|
|
||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
|
||||||
"$id": "https://continwuity.org/schema/announcements.schema.json",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"updates": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"id": {
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"message": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"date": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"id",
|
|
||||||
"message"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": [
|
|
||||||
"updates"
|
|
||||||
]
|
|
||||||
}
|
|
1
docs/static/client
vendored
1
docs/static/client
vendored
|
@ -1 +0,0 @@
|
||||||
{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"}}
|
|
1
docs/static/server
vendored
1
docs/static/server
vendored
|
@ -1 +0,0 @@
|
||||||
{"m.server":"matrix.continuwuity.org:443"}
|
|
24
docs/static/support
vendored
24
docs/static/support
vendored
|
@ -1,24 +0,0 @@
|
||||||
{
|
|
||||||
"contacts": [
|
|
||||||
{
|
|
||||||
"email_address": "security@continuwuity.org",
|
|
||||||
"role": "m.role.security"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matrix_id": "@tom:continuwuity.org",
|
|
||||||
"email_address": "tom@tcpip.uk",
|
|
||||||
"role": "m.role.admin"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matrix_id": "@jade:continuwuity.org",
|
|
||||||
"email_address": "jade@continuwuity.org",
|
|
||||||
"role": "m.role.admin"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"matrix_id": "@nex:continuwuity.org",
|
|
||||||
"email_address": "nex@continuwuity.org",
|
|
||||||
"role": "m.role.admin"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"support_page": "https://continuwuity.org/introduction#contact"
|
|
||||||
}
|
|
|
@ -1,48 +1,47 @@
|
||||||
# Troubleshooting Continuwuity
|
# Troubleshooting conduwuit
|
||||||
|
|
||||||
> **Docker users ⚠️**
|
> ## Docker users ⚠️
|
||||||
>
|
>
|
||||||
> Docker can be difficult to use and debug. It's common for Docker
|
> Docker is extremely UX unfriendly. Because of this, a ton of issues or support
|
||||||
> misconfigurations to cause issues, particularly with networking and permissions.
|
> is actually Docker support, not conduwuit support. We also cannot document the
|
||||||
> Please check that your issues are not due to problems with your Docker setup.
|
> ever-growing list of Docker issues here.
|
||||||
|
>
|
||||||
|
> If you intend on asking for support and you are using Docker, **PLEASE**
|
||||||
|
> triple validate your issues are **NOT** because you have a misconfiguration in
|
||||||
|
> your Docker setup.
|
||||||
|
>
|
||||||
|
> If there are things like Compose file issues or Dockerhub image issues, those
|
||||||
|
> can still be mentioned as long as they're something we can fix.
|
||||||
|
|
||||||
## Continuwuity and Matrix issues
|
## conduwuit and Matrix issues
|
||||||
|
|
||||||
### Lost access to admin room
|
#### Lost access to admin room
|
||||||
|
|
||||||
You can reinvite yourself to the admin room through the following methods:
|
You can reinvite yourself to the admin room through the following methods:
|
||||||
|
- Use the `--execute "users make_user_admin <username>"` conduwuit binary
|
||||||
- Use the `--execute "users make_user_admin <username>"` Continuwuity binary
|
|
||||||
argument once to invite yourslf to the admin room on startup
|
argument once to invite yourslf to the admin room on startup
|
||||||
- Use the Continuwuity console/CLI to run the `users make_user_admin` command
|
- Use the conduwuit console/CLI to run the `users make_user_admin` command
|
||||||
- Or specify the `emergency_password` config option to allow you to temporarily
|
- Or specify the `emergency_password` config option to allow you to temporarily
|
||||||
log into the server account (`@conduit`) from a web client
|
log into the server account (`@conduit`) from a web client
|
||||||
|
|
||||||
## General potential issues
|
## General potential issues
|
||||||
|
|
||||||
### Potential DNS issues when using Docker
|
#### Potential DNS issues when using Docker
|
||||||
|
|
||||||
Docker's DNS setup for containers in a non-default network intercepts queries to
|
Docker has issues with its default DNS setup that may cause DNS to not be
|
||||||
enable resolving of container hostnames to IP addresses. However, due to
|
properly functional when running conduwuit, resulting in federation issues. The
|
||||||
performance issues with Docker's built-in resolver, this can cause DNS queries
|
symptoms of this have shown in excessively long room joins (30+ minutes) from
|
||||||
to take a long time to resolve, resulting in federation issues.
|
very long DNS timeouts, log entries of "mismatching responding nameservers",
|
||||||
|
|
||||||
This is particularly common with Docker Compose, as custom networks are easily
|
|
||||||
created and configured.
|
|
||||||
|
|
||||||
Symptoms of this include excessively long room joins (30+ minutes) from very
|
|
||||||
long DNS timeouts, log entries of "mismatching responding nameservers",
|
|
||||||
and/or partial or non-functional inbound/outbound federation.
|
and/or partial or non-functional inbound/outbound federation.
|
||||||
|
|
||||||
This is not a bug in continuwuity. Docker's default DNS resolver is not suitable
|
This is **not** a conduwuit issue, and is purely a Docker issue. It is not
|
||||||
for heavy DNS activity, which is normal for federated protocols like Matrix.
|
sustainable for heavy DNS activity which is normal for Matrix federation. The
|
||||||
|
workarounds for this are:
|
||||||
Workarounds:
|
|
||||||
|
|
||||||
- Use DNS over TCP via the config option `query_over_tcp_only = true`
|
- Use DNS over TCP via the config option `query_over_tcp_only = true`
|
||||||
- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`.
|
- Don't use Docker's default DNS setup and instead allow the container to use
|
||||||
|
and communicate with your host's DNS servers (host's `/etc/resolv.conf`)
|
||||||
|
|
||||||
### DNS No connections available error message
|
#### DNS No connections available error message
|
||||||
|
|
||||||
If you receive spurious amounts of error logs saying "DNS No connections
|
If you receive spurious amounts of error logs saying "DNS No connections
|
||||||
available", this is due to your DNS server (servers from `/etc/resolv.conf`)
|
available", this is due to your DNS server (servers from `/etc/resolv.conf`)
|
||||||
|
@ -65,7 +64,7 @@ very computationally expensive, and is extremely susceptible to denial of
|
||||||
service, especially on Matrix. Many servers also strangely have broken DNSSEC
|
service, especially on Matrix. Many servers also strangely have broken DNSSEC
|
||||||
setups and will result in non-functional federation.
|
setups and will result in non-functional federation.
|
||||||
|
|
||||||
Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but
|
conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but
|
||||||
the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch]
|
the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch]
|
||||||
may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors
|
may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors
|
||||||
config options and removing the `validator` module.
|
config options and removing the `validator` module.
|
||||||
|
@ -76,9 +75,9 @@ high load, and we have identified its DNS caching to not be very effective.
|
||||||
dnsmasq can possibly work, but it does **not** support TCP fallback which can be
|
dnsmasq can possibly work, but it does **not** support TCP fallback which can be
|
||||||
problematic when receiving large DNS responses such as from large SRV records.
|
problematic when receiving large DNS responses such as from large SRV records.
|
||||||
If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback`
|
If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback`
|
||||||
in Continuwuity config.
|
in conduwuit config.
|
||||||
|
|
||||||
Raising `dns_cache_entries` in Continuwuity config from the default can also assist
|
Raising `dns_cache_entries` in conduwuit config from the default can also assist
|
||||||
in DNS caching, but a full-fledged external caching resolver is better and more
|
in DNS caching, but a full-fledged external caching resolver is better and more
|
||||||
reliable.
|
reliable.
|
||||||
|
|
||||||
|
@ -92,13 +91,13 @@ reliability at a slight performance cost due to TCP overhead.
|
||||||
|
|
||||||
## RocksDB / database issues
|
## RocksDB / database issues
|
||||||
|
|
||||||
### Database corruption
|
#### Database corruption
|
||||||
|
|
||||||
If your database is corrupted *and* is failing to start (e.g. checksum
|
If your database is corrupted *and* is failing to start (e.g. checksum
|
||||||
mismatch), it may be recoverable but careful steps must be taken, and there is
|
mismatch), it may be recoverable but careful steps must be taken, and there is
|
||||||
no guarantee it may be recoverable.
|
no guarantee it may be recoverable.
|
||||||
|
|
||||||
The first thing that can be done is launching Continuwuity with the
|
The first thing that can be done is launching conduwuit with the
|
||||||
`rocksdb_repair` config option set to true. This will tell RocksDB to attempt to
|
`rocksdb_repair` config option set to true. This will tell RocksDB to attempt to
|
||||||
repair itself at launch. If this does not work, disable the option and continue
|
repair itself at launch. If this does not work, disable the option and continue
|
||||||
reading.
|
reading.
|
||||||
|
@ -110,7 +109,7 @@ RocksDB has the following recovery modes:
|
||||||
- `PointInTime`
|
- `PointInTime`
|
||||||
- `SkipAnyCorruptedRecord`
|
- `SkipAnyCorruptedRecord`
|
||||||
|
|
||||||
By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may
|
By default, conduwuit uses `TolerateCorruptedTailRecords` as generally these may
|
||||||
be due to bad federation and we can re-fetch the correct data over federation.
|
be due to bad federation and we can re-fetch the correct data over federation.
|
||||||
The RocksDB default is `PointInTime` which will attempt to restore a "snapshot"
|
The RocksDB default is `PointInTime` which will attempt to restore a "snapshot"
|
||||||
of the data when it was last known to be good. This data can be either a few
|
of the data when it was last known to be good. This data can be either a few
|
||||||
|
@ -127,12 +126,12 @@ if `PointInTime` does not work as a last ditch effort.
|
||||||
|
|
||||||
With this in mind:
|
With this in mind:
|
||||||
|
|
||||||
- First start Continuwuity with the `PointInTime` recovery method. See the [example
|
- First start conduwuit with the `PointInTime` recovery method. See the [example
|
||||||
config](configuration/examples.md) for how to do this using
|
config](configuration/examples.md) for how to do this using
|
||||||
`rocksdb_recovery_mode`
|
`rocksdb_recovery_mode`
|
||||||
- If your database successfully opens, clients are recommended to clear their
|
- If your database successfully opens, clients are recommended to clear their
|
||||||
client cache to account for the rollback
|
client cache to account for the rollback
|
||||||
- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as
|
- Leave your conduwuit running in `PointInTime` for at least 30-60 minutes so as
|
||||||
much possible corruption is restored
|
much possible corruption is restored
|
||||||
- If all goes will, you should be able to restore back to using
|
- If all goes will, you should be able to restore back to using
|
||||||
`TolerateCorruptedTailRecords` and you have successfully recovered your database
|
`TolerateCorruptedTailRecords` and you have successfully recovered your database
|
||||||
|
@ -143,16 +142,16 @@ Note that users should not really be debugging things. If you find yourself
|
||||||
debugging and find the issue, please let us know and/or how we can fix it.
|
debugging and find the issue, please let us know and/or how we can fix it.
|
||||||
Various debug commands can be found in `!admin debug`.
|
Various debug commands can be found in `!admin debug`.
|
||||||
|
|
||||||
### Debug/Trace log level
|
#### Debug/Trace log level
|
||||||
|
|
||||||
Continuwuity builds without debug or trace log levels at compile time by default
|
conduwuit builds without debug or trace log levels at compile time by default
|
||||||
for substantial performance gains in CPU usage and improved compile times. If
|
for substantial performance gains in CPU usage and improved compile times. If
|
||||||
you need to access debug/trace log levels, you will need to build without the
|
you need to access debug/trace log levels, you will need to build without the
|
||||||
`release_max_log_level` feature or use our provided static debug binaries.
|
`release_max_log_level` feature or use our provided static debug binaries.
|
||||||
|
|
||||||
### Changing log level dynamically
|
#### Changing log level dynamically
|
||||||
|
|
||||||
Continuwuity supports changing the tracing log environment filter on-the-fly using
|
conduwuit supports changing the tracing log environment filter on-the-fly using
|
||||||
the admin command `!admin debug change-log-level <log env filter>`. This accepts
|
the admin command `!admin debug change-log-level <log env filter>`. This accepts
|
||||||
a string **without quotes** the same format as the `log` config option.
|
a string **without quotes** the same format as the `log` config option.
|
||||||
|
|
||||||
|
@ -167,9 +166,9 @@ load, simply pass the `--reset` flag.
|
||||||
|
|
||||||
`!admin debug change-log-level --reset`
|
`!admin debug change-log-level --reset`
|
||||||
|
|
||||||
### Pinging servers
|
#### Pinging servers
|
||||||
|
|
||||||
Continuwuity can ping other servers using `!admin debug ping <server>`. This takes
|
conduwuit can ping other servers using `!admin debug ping <server>`. This takes
|
||||||
a server name and goes through the server discovery process and queries
|
a server name and goes through the server discovery process and queries
|
||||||
`/_matrix/federation/v1/version`. Errors are outputted.
|
`/_matrix/federation/v1/version`. Errors are outputted.
|
||||||
|
|
||||||
|
@ -178,15 +177,15 @@ server performance on either side as that endpoint is completely unauthenticated
|
||||||
and simply fetches a string on a static JSON endpoint. It is very low cost both
|
and simply fetches a string on a static JSON endpoint. It is very low cost both
|
||||||
bandwidth and computationally.
|
bandwidth and computationally.
|
||||||
|
|
||||||
### Allocator memory stats
|
#### Allocator memory stats
|
||||||
|
|
||||||
When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you
|
When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you
|
||||||
can see Continuwuity's high-level allocator stats by using
|
can see conduwuit's high-level allocator stats by using
|
||||||
`!admin server memory-usage` at the bottom.
|
`!admin server memory-usage` at the bottom.
|
||||||
|
|
||||||
If you are a developer, you can also view the raw jemalloc statistics with
|
If you are a developer, you can also view the raw jemalloc statistics with
|
||||||
`!admin debug memory-stats`. Please note that this output is extremely large
|
`!admin debug memory-stats`. Please note that this output is extremely large
|
||||||
which may only be visible in the Continuwuity console CLI due to PDU size limits,
|
which may only be visible in the conduwuit console CLI due to PDU size limits,
|
||||||
and is not easy for non-developers to understand.
|
and is not easy for non-developers to understand.
|
||||||
|
|
||||||
[unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html
|
[unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Setting up TURN/STURN
|
# Setting up TURN/STURN
|
||||||
|
|
||||||
In order to make or receive calls, a TURN server is required. Continuwuity suggests
|
In order to make or receive calls, a TURN server is required. conduwuit suggests
|
||||||
using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also
|
using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also
|
||||||
available as a Docker image.
|
available as a Docker image.
|
||||||
|
|
||||||
|
@ -17,9 +17,9 @@ realm=<your server domain>
|
||||||
A common way to generate a suitable alphanumeric secret key is by using `pwgen
|
A common way to generate a suitable alphanumeric secret key is by using `pwgen
|
||||||
-s 64 1`.
|
-s 64 1`.
|
||||||
|
|
||||||
These same values need to be set in Continuwuity. See the [example
|
These same values need to be set in conduwuit. See the [example
|
||||||
config](configuration/examples.md) in the TURN section for configuring these and
|
config](configuration/examples.md) in the TURN section for configuring these and
|
||||||
restart Continuwuity after.
|
restart conduwuit after.
|
||||||
|
|
||||||
`turn_secret` or a path to `turn_secret_file` must have a value of your
|
`turn_secret` or a path to `turn_secret_file` must have a value of your
|
||||||
coturn `static-auth-secret`, or use `turn_username` and `turn_password`
|
coturn `static-auth-secret`, or use `turn_username` and `turn_password`
|
||||||
|
@ -34,7 +34,7 @@ If you are using TURN over TLS, you can replace `turn:` with `turns:` in the
|
||||||
TURN over TLS. This is highly recommended.
|
TURN over TLS. This is highly recommended.
|
||||||
|
|
||||||
If you need unauthenticated access to the TURN URIs, or some clients may be
|
If you need unauthenticated access to the TURN URIs, or some clients may be
|
||||||
having trouble, you can enable `turn_guest_access` in Continuwuity which disables
|
having trouble, you can enable `turn_guest_access` in conduwuit which disables
|
||||||
authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer`
|
authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer`
|
||||||
|
|
||||||
### Run
|
### Run
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
file = ./rust-toolchain.toml;
|
file = ./rust-toolchain.toml;
|
||||||
|
|
||||||
# See also `rust-toolchain.toml`
|
# See also `rust-toolchain.toml`
|
||||||
sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI=";
|
sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU=";
|
||||||
};
|
};
|
||||||
|
|
||||||
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||||
|
|
||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.86.0"
|
channel = "1.85.0"
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
components = [
|
components = [
|
||||||
# For rust-analyzer
|
# For rust-analyzer
|
||||||
|
|
|
@ -17,61 +17,12 @@ crate-type = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
brotli_compression = [
|
|
||||||
"conduwuit-api/brotli_compression",
|
|
||||||
"conduwuit-core/brotli_compression",
|
|
||||||
"conduwuit-service/brotli_compression",
|
|
||||||
]
|
|
||||||
gzip_compression = [
|
|
||||||
"conduwuit-api/gzip_compression",
|
|
||||||
"conduwuit-core/gzip_compression",
|
|
||||||
"conduwuit-service/gzip_compression",
|
|
||||||
]
|
|
||||||
io_uring = [
|
|
||||||
"conduwuit-api/io_uring",
|
|
||||||
"conduwuit-database/io_uring",
|
|
||||||
"conduwuit-service/io_uring",
|
|
||||||
]
|
|
||||||
jemalloc = [
|
|
||||||
"conduwuit-api/jemalloc",
|
|
||||||
"conduwuit-core/jemalloc",
|
|
||||||
"conduwuit-database/jemalloc",
|
|
||||||
"conduwuit-service/jemalloc",
|
|
||||||
]
|
|
||||||
jemalloc_conf = [
|
|
||||||
"conduwuit-api/jemalloc_conf",
|
|
||||||
"conduwuit-core/jemalloc_conf",
|
|
||||||
"conduwuit-database/jemalloc_conf",
|
|
||||||
"conduwuit-service/jemalloc_conf",
|
|
||||||
]
|
|
||||||
jemalloc_prof = [
|
|
||||||
"conduwuit-api/jemalloc_prof",
|
|
||||||
"conduwuit-core/jemalloc_prof",
|
|
||||||
"conduwuit-database/jemalloc_prof",
|
|
||||||
"conduwuit-service/jemalloc_prof",
|
|
||||||
]
|
|
||||||
jemalloc_stats = [
|
|
||||||
"conduwuit-api/jemalloc_stats",
|
|
||||||
"conduwuit-core/jemalloc_stats",
|
|
||||||
"conduwuit-database/jemalloc_stats",
|
|
||||||
"conduwuit-service/jemalloc_stats",
|
|
||||||
]
|
|
||||||
release_max_log_level = [
|
release_max_log_level = [
|
||||||
"conduwuit-api/release_max_log_level",
|
|
||||||
"conduwuit-core/release_max_log_level",
|
|
||||||
"conduwuit-database/release_max_log_level",
|
|
||||||
"conduwuit-service/release_max_log_level",
|
|
||||||
"tracing/max_level_trace",
|
"tracing/max_level_trace",
|
||||||
"tracing/release_max_level_info",
|
"tracing/release_max_level_info",
|
||||||
"log/max_level_trace",
|
"log/max_level_trace",
|
||||||
"log/release_max_level_info",
|
"log/release_max_level_info",
|
||||||
]
|
]
|
||||||
zstd_compression = [
|
|
||||||
"conduwuit-api/zstd_compression",
|
|
||||||
"conduwuit-core/zstd_compression",
|
|
||||||
"conduwuit-database/zstd_compression",
|
|
||||||
"conduwuit-service/zstd_compression",
|
|
||||||
]
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@ use clap::Parser;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context,
|
appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command,
|
||||||
debug, debug::DebugCommand, federation, federation::FederationCommand, media,
|
debug, debug::DebugCommand, federation, federation::FederationCommand, media,
|
||||||
media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server,
|
media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server,
|
||||||
server::ServerCommand, user, user::UserCommand,
|
server::ServerCommand, user, user::UserCommand,
|
||||||
|
@ -49,18 +49,20 @@ pub(super) enum AdminCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, name = "command")]
|
#[tracing::instrument(skip_all, name = "command")]
|
||||||
pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result {
|
||||||
use AdminCommand::*;
|
use AdminCommand::*;
|
||||||
|
|
||||||
match command {
|
match command {
|
||||||
| Appservices(command) => appservice::process(command, context).await,
|
| Appservices(command) => appservice::process(command, context).await?,
|
||||||
| Media(command) => media::process(command, context).await,
|
| Media(command) => media::process(command, context).await?,
|
||||||
| Users(command) => user::process(command, context).await,
|
| Users(command) => user::process(command, context).await?,
|
||||||
| Rooms(command) => room::process(command, context).await,
|
| Rooms(command) => room::process(command, context).await?,
|
||||||
| Federation(command) => federation::process(command, context).await,
|
| Federation(command) => federation::process(command, context).await?,
|
||||||
| Server(command) => server::process(command, context).await,
|
| Server(command) => server::process(command, context).await?,
|
||||||
| Debug(command) => debug::process(command, context).await,
|
| Debug(command) => debug::process(command, context).await?,
|
||||||
| Query(command) => query::process(command, context).await,
|
| Query(command) => query::process(command, context).await?,
|
||||||
| Check(command) => check::process(command, context).await,
|
| Check(command) => check::process(command, context).await?,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,80 +1,84 @@
|
||||||
use conduwuit::{Err, Result, checked};
|
use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent};
|
||||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
|
||||||
|
|
||||||
use crate::admin_command;
|
use crate::{Result, admin_command};
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn register(&self) -> Result {
|
pub(super) async fn register(&self) -> Result<RoomMessageEventContent> {
|
||||||
let body = &self.body;
|
if self.body.len() < 2
|
||||||
let body_len = self.body.len();
|
|| !self.body[0].trim().starts_with("```")
|
||||||
if body_len < 2
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||||
|| !body[0].trim().starts_with("```")
|
|
||||||
|| body.last().unwrap_or(&"").trim() != "```"
|
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let range = 1..checked!(body_len - 1)?;
|
let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
||||||
let appservice_config_body = body[range].join("\n");
|
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config_body);
|
||||||
let parsed_config = serde_yaml::from_str(&appservice_config_body);
|
|
||||||
match parsed_config {
|
match parsed_config {
|
||||||
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
|
|
||||||
| Ok(registration) => match self
|
| Ok(registration) => match self
|
||||||
.services
|
.services
|
||||||
.appservice
|
.appservice
|
||||||
.register_appservice(®istration, &appservice_config_body)
|
.register_appservice(®istration, &appservice_config_body)
|
||||||
.await
|
.await
|
||||||
.map(|()| registration.id)
|
|
||||||
{
|
{
|
||||||
| Err(e) => return Err!("Failed to register appservice: {e}"),
|
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
| Ok(id) => write!(self, "Appservice registered with ID: {id}"),
|
"Appservice registered with ID: {}",
|
||||||
|
registration.id
|
||||||
|
))),
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to register appservice: {e}"
|
||||||
|
))),
|
||||||
},
|
},
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Could not parse appservice config as YAML: {e}"
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn unregister(&self, appservice_identifier: String) -> Result {
|
pub(super) async fn unregister(
|
||||||
|
&self,
|
||||||
|
appservice_identifier: String,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
match self
|
match self
|
||||||
.services
|
.services
|
||||||
.appservice
|
.appservice
|
||||||
.unregister_appservice(&appservice_identifier)
|
.unregister_appservice(&appservice_identifier)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Err(e) => return Err!("Failed to unregister appservice: {e}"),
|
| Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")),
|
||||||
| Ok(()) => write!(self, "Appservice unregistered."),
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to unregister appservice: {e}"
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result {
|
pub(super) async fn show_appservice_config(
|
||||||
|
&self,
|
||||||
|
appservice_identifier: String,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
match self
|
match self
|
||||||
.services
|
.services
|
||||||
.appservice
|
.appservice
|
||||||
.get_registration(&appservice_identifier)
|
.get_registration(&appservice_identifier)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| None => return Err!("Appservice does not exist."),
|
|
||||||
| Some(config) => {
|
| Some(config) => {
|
||||||
let config_str = serde_yaml::to_string(&config)?;
|
let config_str = serde_yaml::to_string(&config)
|
||||||
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
|
.expect("config should've been validated on register");
|
||||||
|
let output =
|
||||||
|
format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",);
|
||||||
|
Ok(RoomMessageEventContent::notice_markdown(output))
|
||||||
},
|
},
|
||||||
|
| None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn list_registered(&self) -> Result {
|
pub(super) async fn list_registered(&self) -> Result<RoomMessageEventContent> {
|
||||||
self.services
|
let appservices = self.services.appservice.iter_ids().await;
|
||||||
.appservice
|
let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", "));
|
||||||
.iter_ids()
|
Ok(RoomMessageEventContent::text_plain(output))
|
||||||
.collect()
|
|
||||||
.map(Ok)
|
|
||||||
.and_then(|appservices: Vec<_>| {
|
|
||||||
let len = appservices.len();
|
|
||||||
let list = appservices.join(", ");
|
|
||||||
write!(self, "Appservices ({len}): {list}")
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use conduwuit_macros::implement;
|
use conduwuit_macros::implement;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
|
use ruma::events::room::message::RoomMessageEventContent;
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
/// Uses the iterator in `src/database/key_value/users.rs` to iterator over
|
/// Uses the iterator in `src/database/key_value/users.rs` to iterator over
|
||||||
/// every user in our database (remote and local). Reports total count, any
|
/// every user in our database (remote and local). Reports total count, any
|
||||||
/// errors if there were any, etc
|
/// errors if there were any, etc
|
||||||
#[implement(Context, params = "<'_>")]
|
#[implement(Command, params = "<'_>")]
|
||||||
pub(super) async fn check_all_users(&self) -> Result {
|
pub(super) async fn check_all_users(&self) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let users = self.services.users.iter().collect::<Vec<_>>().await;
|
let users = self.services.users.iter().collect::<Vec<_>>().await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
@ -17,10 +18,11 @@ pub(super) async fn check_all_users(&self) -> Result {
|
||||||
let err_count = users.iter().filter(|_user| false).count();
|
let err_count = users.iter().filter(|_user| false).count();
|
||||||
let ok_count = users.iter().filter(|_user| true).count();
|
let ok_count = users.iter().filter(|_user| true).count();
|
||||||
|
|
||||||
self.write_str(&format!(
|
let message = format!(
|
||||||
"Database query completed in {query_time:?}:\n\n```\nTotal entries: \
|
"Database query completed in {query_time:?}:\n\n```\nTotal entries: \
|
||||||
{total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \
|
{total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \
|
||||||
{ok_count:?}\n```"
|
{ok_count:?}\n```"
|
||||||
))
|
);
|
||||||
.await
|
|
||||||
|
Ok(RoomMessageEventContent::notice_markdown(message))
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,13 @@ use std::{fmt, time::SystemTime};
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
use futures::{
|
use futures::{
|
||||||
Future, FutureExt, TryFutureExt,
|
Future, FutureExt,
|
||||||
io::{AsyncWriteExt, BufWriter},
|
io::{AsyncWriteExt, BufWriter},
|
||||||
lock::Mutex,
|
lock::Mutex,
|
||||||
};
|
};
|
||||||
use ruma::EventId;
|
use ruma::EventId;
|
||||||
|
|
||||||
pub(crate) struct Context<'a> {
|
pub(crate) struct Command<'a> {
|
||||||
pub(crate) services: &'a Services,
|
pub(crate) services: &'a Services,
|
||||||
pub(crate) body: &'a [&'a str],
|
pub(crate) body: &'a [&'a str],
|
||||||
pub(crate) timer: SystemTime,
|
pub(crate) timer: SystemTime,
|
||||||
|
@ -17,14 +17,14 @@ pub(crate) struct Context<'a> {
|
||||||
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
|
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Context<'_> {
|
impl Command<'_> {
|
||||||
pub(crate) fn write_fmt(
|
pub(crate) fn write_fmt(
|
||||||
&self,
|
&self,
|
||||||
arguments: fmt::Arguments<'_>,
|
arguments: fmt::Arguments<'_>,
|
||||||
) -> impl Future<Output = Result> + Send + '_ + use<'_> {
|
) -> impl Future<Output = Result> + Send + '_ + use<'_> {
|
||||||
let buf = format!("{arguments}");
|
let buf = format!("{arguments}");
|
||||||
self.output.lock().then(async move |mut output| {
|
self.output.lock().then(|mut output| async move {
|
||||||
output.write_all(buf.as_bytes()).map_err(Into::into).await
|
output.write_all(buf.as_bytes()).await.map_err(Into::into)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,8 +32,8 @@ impl Context<'_> {
|
||||||
&'a self,
|
&'a self,
|
||||||
s: &'a str,
|
s: &'a str,
|
||||||
) -> impl Future<Output = Result> + Send + 'a {
|
) -> impl Future<Output = Result> + Send + 'a {
|
||||||
self.output.lock().then(async move |mut output| {
|
self.output.lock().then(move |mut output| async move {
|
||||||
output.write_all(s.as_bytes()).map_err(Into::into).await
|
output.write_all(s.as_bytes()).await.map_err(Into::into)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -6,9 +6,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug_error, err, info,
|
Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils,
|
||||||
matrix::pdu::{PduEvent, PduId, RawPduId},
|
|
||||||
trace, utils,
|
|
||||||
utils::{
|
utils::{
|
||||||
stream::{IterStream, ReadyExt},
|
stream::{IterStream, ReadyExt},
|
||||||
string::EMPTY,
|
string::EMPTY,
|
||||||
|
@ -17,9 +15,10 @@ use conduwuit::{
|
||||||
};
|
};
|
||||||
use futures::{FutureExt, StreamExt, TryStreamExt};
|
use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId,
|
||||||
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
|
ServerName,
|
||||||
api::federation::event::get_room_state,
|
api::{client::error::ErrorKind, federation::event::get_room_state},
|
||||||
|
events::room::message::RoomMessageEventContent,
|
||||||
};
|
};
|
||||||
use service::rooms::{
|
use service::rooms::{
|
||||||
short::{ShortEventId, ShortRoomId},
|
short::{ShortEventId, ShortRoomId},
|
||||||
|
@ -30,24 +29,28 @@ use tracing_subscriber::EnvFilter;
|
||||||
use crate::admin_command;
|
use crate::admin_command;
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn echo(&self, message: Vec<String>) -> Result {
|
pub(super) async fn echo(&self, message: Vec<String>) -> Result<RoomMessageEventContent> {
|
||||||
let message = message.join(" ");
|
let message = message.join(" ");
|
||||||
self.write_str(&message).await
|
|
||||||
|
Ok(RoomMessageEventContent::notice_plain(message))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result {
|
pub(super) async fn get_auth_chain(
|
||||||
|
&self,
|
||||||
|
event_id: Box<EventId>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else {
|
let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else {
|
||||||
return Err!("Event not found.");
|
return Ok(RoomMessageEventContent::notice_plain("Event not found."));
|
||||||
};
|
};
|
||||||
|
|
||||||
let room_id_str = event
|
let room_id_str = event
|
||||||
.get("room_id")
|
.get("room_id")
|
||||||
.and_then(CanonicalJsonValue::as_str)
|
.and_then(|val| val.as_str())
|
||||||
.ok_or_else(|| err!(Database("Invalid event in database")))?;
|
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
|
||||||
|
|
||||||
let room_id = <&RoomId>::try_from(room_id_str)
|
let room_id = <&RoomId>::try_from(room_id_str)
|
||||||
.map_err(|_| err!(Database("Invalid room id field in event in database")))?;
|
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let count = self
|
let count = self
|
||||||
|
@ -60,39 +63,51 @@ pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result {
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let elapsed = start.elapsed();
|
let elapsed = start.elapsed();
|
||||||
let out = format!("Loaded auth chain with length {count} in {elapsed:?}");
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Loaded auth chain with length {count} in {elapsed:?}"
|
||||||
self.write_str(&out).await
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn parse_pdu(&self) -> Result {
|
pub(super) async fn parse_pdu(&self) -> Result<RoomMessageEventContent> {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
|
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
|
||||||
match serde_json::from_str(&string) {
|
match serde_json::from_str(&string) {
|
||||||
| Err(e) => return Err!("Invalid json in command body: {e}"),
|
|
||||||
| Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
|
| Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
|
||||||
| Err(e) => return Err!("Could not parse PDU JSON: {e:?}"),
|
|
||||||
| Ok(hash) => {
|
| Ok(hash) => {
|
||||||
let event_id = OwnedEventId::parse(format!("${hash}"));
|
let event_id = OwnedEventId::parse(format!("${hash}"));
|
||||||
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
|
|
||||||
| Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"),
|
match serde_json::from_value::<PduEvent>(
|
||||||
| Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"),
|
serde_json::to_value(value).expect("value is json"),
|
||||||
|
) {
|
||||||
|
| Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"EventId: {event_id:?}\n{pdu:#?}"
|
||||||
|
))),
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"EventId: {event_id:?}\nCould not parse event: {e}"
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Could not parse PDU JSON: {e:?}"
|
||||||
|
))),
|
||||||
},
|
},
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Invalid json in command body: {e}"
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
|
pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
|
||||||
let mut outlier = false;
|
let mut outlier = false;
|
||||||
let mut pdu_json = self
|
let mut pdu_json = self
|
||||||
.services
|
.services
|
||||||
|
@ -107,18 +122,21 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
|
||||||
}
|
}
|
||||||
|
|
||||||
match pdu_json {
|
match pdu_json {
|
||||||
| Err(_) => return Err!("PDU not found locally."),
|
|
||||||
| Ok(json) => {
|
| Ok(json) => {
|
||||||
let text = serde_json::to_string_pretty(&json)?;
|
let json_text =
|
||||||
let msg = if outlier {
|
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
|
||||||
"Outlier (Rejected / Soft Failed) PDU found in our database"
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
} else {
|
"{}\n```json\n{}\n```",
|
||||||
"PDU found in our database"
|
if outlier {
|
||||||
};
|
"Outlier (Rejected / Soft Failed) PDU found in our database"
|
||||||
write!(self, "{msg}\n```json\n{text}\n```",)
|
} else {
|
||||||
|
"PDU found in our database"
|
||||||
|
},
|
||||||
|
json_text
|
||||||
|
)))
|
||||||
},
|
},
|
||||||
|
| Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -126,7 +144,7 @@ pub(super) async fn get_short_pdu(
|
||||||
&self,
|
&self,
|
||||||
shortroomid: ShortRoomId,
|
shortroomid: ShortRoomId,
|
||||||
shorteventid: ShortEventId,
|
shorteventid: ShortEventId,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let pdu_id: RawPduId = PduId {
|
let pdu_id: RawPduId = PduId {
|
||||||
shortroomid,
|
shortroomid,
|
||||||
shorteventid: shorteventid.into(),
|
shorteventid: shorteventid.into(),
|
||||||
|
@ -141,33 +159,41 @@ pub(super) async fn get_short_pdu(
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
match pdu_json {
|
match pdu_json {
|
||||||
| Err(_) => return Err!("PDU not found locally."),
|
|
||||||
| Ok(json) => {
|
| Ok(json) => {
|
||||||
let json_text = serde_json::to_string_pretty(&json)?;
|
let json_text =
|
||||||
write!(self, "```json\n{json_text}\n```")
|
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
|
||||||
|
Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",)))
|
||||||
},
|
},
|
||||||
|
| Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result {
|
pub(super) async fn get_remote_pdu_list(
|
||||||
|
&self,
|
||||||
|
server: Box<ServerName>,
|
||||||
|
force: bool,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
if !self.services.server.config.allow_federation {
|
if !self.services.server.config.allow_federation {
|
||||||
return Err!("Federation is disabled on this homeserver.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Federation is disabled on this homeserver.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if server == self.services.globals.server_name() {
|
if server == self.services.globals.server_name() {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
||||||
fetching local PDUs from the database.",
|
fetching local PDUs from the database.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let list = self
|
let list = self
|
||||||
|
@ -181,19 +207,18 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
|
||||||
let mut failed_count: usize = 0;
|
let mut failed_count: usize = 0;
|
||||||
let mut success_count: usize = 0;
|
let mut success_count: usize = 0;
|
||||||
|
|
||||||
for event_id in list {
|
for pdu in list {
|
||||||
if force {
|
if force {
|
||||||
match self
|
match self.get_remote_pdu(Box::from(pdu), server.clone()).await {
|
||||||
.get_remote_pdu(event_id.to_owned(), server.clone())
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
failed_count = failed_count.saturating_add(1);
|
failed_count = failed_count.saturating_add(1);
|
||||||
self.services
|
self.services
|
||||||
.admin
|
.admin
|
||||||
.send_text(&format!("Failed to get remote PDU, ignoring error: {e}"))
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
.await;
|
"Failed to get remote PDU, ignoring error: {e}"
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
warn!("Failed to get remote PDU, ignoring error: {e}");
|
warn!("Failed to get remote PDU, ignoring error: {e}");
|
||||||
},
|
},
|
||||||
| _ => {
|
| _ => {
|
||||||
|
@ -201,48 +226,44 @@ pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: b
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
self.get_remote_pdu(event_id.to_owned(), server.clone())
|
self.get_remote_pdu(Box::from(pdu), server.clone()).await?;
|
||||||
.await?;
|
|
||||||
success_count = success_count.saturating_add(1);
|
success_count = success_count.saturating_add(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let out =
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures");
|
"Fetched {success_count} remote PDUs successfully with {failed_count} failures"
|
||||||
|
)))
|
||||||
self.write_str(&out).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_remote_pdu(
|
pub(super) async fn get_remote_pdu(
|
||||||
&self,
|
&self,
|
||||||
event_id: OwnedEventId,
|
event_id: Box<EventId>,
|
||||||
server: OwnedServerName,
|
server: Box<ServerName>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if !self.services.server.config.allow_federation {
|
if !self.services.server.config.allow_federation {
|
||||||
return Err!("Federation is disabled on this homeserver.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Federation is disabled on this homeserver.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if server == self.services.globals.server_name() {
|
if server == self.services.globals.server_name() {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
||||||
fetching local PDUs.",
|
fetching local PDUs.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.services
|
.services
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request {
|
.send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request {
|
||||||
event_id: event_id.clone(),
|
event_id: event_id.clone().into(),
|
||||||
include_unredacted_content: None,
|
include_unredacted_content: None,
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Err(e) =>
|
|
||||||
return Err!(
|
|
||||||
"Remote server did not have PDU or failed sending request to remote server: {e}"
|
|
||||||
),
|
|
||||||
| Ok(response) => {
|
| Ok(response) => {
|
||||||
let json: CanonicalJsonObject =
|
let json: CanonicalJsonObject =
|
||||||
serde_json::from_str(response.pdu.get()).map_err(|e| {
|
serde_json::from_str(response.pdu.get()).map_err(|e| {
|
||||||
|
@ -250,9 +271,10 @@ pub(super) async fn get_remote_pdu(
|
||||||
"Requested event ID {event_id} from server but failed to convert from \
|
"Requested event ID {event_id} from server but failed to convert from \
|
||||||
RawValue to CanonicalJsonObject (malformed event/response?): {e}"
|
RawValue to CanonicalJsonObject (malformed event/response?): {e}"
|
||||||
);
|
);
|
||||||
err!(Request(Unknown(
|
Error::BadRequest(
|
||||||
"Received response from server but failed to parse PDU"
|
ErrorKind::Unknown,
|
||||||
)))
|
"Received response from server but failed to parse PDU",
|
||||||
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
trace!("Attempting to parse PDU: {:?}", &response.pdu);
|
trace!("Attempting to parse PDU: {:?}", &response.pdu);
|
||||||
|
@ -262,7 +284,6 @@ pub(super) async fn get_remote_pdu(
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
.parse_incoming_pdu(&response.pdu)
|
.parse_incoming_pdu(&response.pdu)
|
||||||
.boxed()
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let (event_id, value, room_id) = match parsed_result {
|
let (event_id, value, room_id) = match parsed_result {
|
||||||
|
@ -270,7 +291,9 @@ pub(super) async fn get_remote_pdu(
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
warn!("Failed to parse PDU: {e}");
|
warn!("Failed to parse PDU: {e}");
|
||||||
info!("Full PDU: {:?}", &response.pdu);
|
info!("Full PDU: {:?}", &response.pdu);
|
||||||
return Err!("Failed to parse PDU remote server {server} sent us: {e}");
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to parse PDU remote server {server} sent us: {e}"
|
||||||
|
)));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -282,18 +305,30 @@ pub(super) async fn get_remote_pdu(
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.backfill_pdu(&server, response.pdu)
|
.backfill_pdu(&server, response.pdu)
|
||||||
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let text = serde_json::to_string_pretty(&json)?;
|
let json_text =
|
||||||
let msg = "Got PDU from specified server and handled as backfilled";
|
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
|
||||||
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
|
|
||||||
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
|
"{}\n```json\n{}\n```",
|
||||||
|
"Got PDU from specified server and handled as backfilled PDU successfully. \
|
||||||
|
Event body:",
|
||||||
|
json_text
|
||||||
|
)))
|
||||||
},
|
},
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Remote server did not have PDU or failed sending request to remote server: {e}"
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
|
pub(super) async fn get_room_state(
|
||||||
|
&self,
|
||||||
|
room: OwnedRoomOrAliasId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let room_id = self.services.rooms.alias.resolve(&room).await?;
|
let room_id = self.services.rooms.alias.resolve(&room).await?;
|
||||||
let room_state: Vec<_> = self
|
let room_state: Vec<_> = self
|
||||||
.services
|
.services
|
||||||
|
@ -305,24 +340,28 @@ pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if room_state.is_empty() {
|
if room_state.is_empty() {
|
||||||
return Err!("Unable to find room state in our database (vector is empty)",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Unable to find room state in our database (vector is empty)",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let json = serde_json::to_string_pretty(&room_state).map_err(|e| {
|
let json = serde_json::to_string_pretty(&room_state).map_err(|e| {
|
||||||
err!(Database(
|
warn!("Failed converting room state vector in our database to pretty JSON: {e}");
|
||||||
|
Error::bad_database(
|
||||||
"Failed to convert room state events to pretty JSON, possible invalid room state \
|
"Failed to convert room state events to pretty JSON, possible invalid room state \
|
||||||
events in our database {e}",
|
events in our database",
|
||||||
))
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let out = format!("```json\n{json}\n```");
|
Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json}\n```")))
|
||||||
self.write_str(&out).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
|
pub(super) async fn ping(&self, server: Box<ServerName>) -> Result<RoomMessageEventContent> {
|
||||||
if server == self.services.globals.server_name() {
|
if server == self.services.globals.server_name() {
|
||||||
return Err!("Not allowed to send federation requests to ourselves.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Not allowed to send federation requests to ourselves.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -336,27 +375,35 @@ pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Err(e) => {
|
|
||||||
return Err!("Failed sending federation request to specified server:\n\n{e}");
|
|
||||||
},
|
|
||||||
| Ok(response) => {
|
| Ok(response) => {
|
||||||
let ping_time = timer.elapsed();
|
let ping_time = timer.elapsed();
|
||||||
|
|
||||||
let json_text_res = serde_json::to_string_pretty(&response.server);
|
let json_text_res = serde_json::to_string_pretty(&response.server);
|
||||||
|
|
||||||
let out = if let Ok(json) = json_text_res {
|
if let Ok(json) = json_text_res {
|
||||||
format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```")
|
return Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
} else {
|
"Got response which took {ping_time:?} time:\n```json\n{json}\n```"
|
||||||
format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}")
|
)));
|
||||||
};
|
}
|
||||||
|
|
||||||
write!(self, "{out}")
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Got non-JSON response which took {ping_time:?} time:\n{response:?}"
|
||||||
|
)))
|
||||||
|
},
|
||||||
|
| Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Failed sending federation request to specified server from ping debug command: \
|
||||||
|
{e}"
|
||||||
|
);
|
||||||
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed sending federation request to specified server:\n\n{e}",
|
||||||
|
)))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn force_device_list_updates(&self) -> Result {
|
pub(super) async fn force_device_list_updates(&self) -> Result<RoomMessageEventContent> {
|
||||||
// Force E2EE device list updates for all users
|
// Force E2EE device list updates for all users
|
||||||
self.services
|
self.services
|
||||||
.users
|
.users
|
||||||
|
@ -364,17 +411,27 @@ pub(super) async fn force_device_list_updates(&self) -> Result {
|
||||||
.for_each(|user_id| self.services.users.mark_device_key_update(user_id))
|
.for_each(|user_id| self.services.users.mark_device_key_update(user_id))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
write!(self, "Marked all devices for all users as having new keys to update").await
|
Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Marked all devices for all users as having new keys to update",
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool) -> Result {
|
pub(super) async fn change_log_level(
|
||||||
|
&self,
|
||||||
|
filter: Option<String>,
|
||||||
|
reset: bool,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let handles = &["console"];
|
let handles = &["console"];
|
||||||
|
|
||||||
if reset {
|
if reset {
|
||||||
let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) {
|
let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) {
|
||||||
| Ok(s) => s,
|
| Ok(s) => s,
|
||||||
| Err(e) => return Err!("Log level from config appears to be invalid now: {e}"),
|
| Err(e) => {
|
||||||
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Log level from config appears to be invalid now: {e}"
|
||||||
|
)));
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
match self
|
match self
|
||||||
|
@ -384,12 +441,16 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
|
||||||
.reload
|
.reload
|
||||||
.reload(&old_filter_layer, Some(handles))
|
.reload(&old_filter_layer, Some(handles))
|
||||||
{
|
{
|
||||||
| Err(e) =>
|
|
||||||
return Err!("Failed to modify and reload the global tracing log level: {e}"),
|
|
||||||
| Ok(()) => {
|
| Ok(()) => {
|
||||||
let value = &self.services.server.config.log;
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
let out = format!("Successfully changed log level back to config value {value}");
|
"Successfully changed log level back to config value {}",
|
||||||
return self.write_str(&out).await;
|
self.services.server.config.log
|
||||||
|
)));
|
||||||
|
},
|
||||||
|
| Err(e) => {
|
||||||
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to modify and reload the global tracing log level: {e}"
|
||||||
|
)));
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -397,7 +458,11 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
|
||||||
if let Some(filter) = filter {
|
if let Some(filter) = filter {
|
||||||
let new_filter_layer = match EnvFilter::try_new(filter) {
|
let new_filter_layer = match EnvFilter::try_new(filter) {
|
||||||
| Ok(s) => s,
|
| Ok(s) => s,
|
||||||
| Err(e) => return Err!("Invalid log level filter specified: {e}"),
|
| Err(e) => {
|
||||||
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Invalid log level filter specified: {e}"
|
||||||
|
)));
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
match self
|
match self
|
||||||
|
@ -407,75 +472,90 @@ pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool)
|
||||||
.reload
|
.reload
|
||||||
.reload(&new_filter_layer, Some(handles))
|
.reload(&new_filter_layer, Some(handles))
|
||||||
{
|
{
|
||||||
| Ok(()) => return self.write_str("Successfully changed log level").await,
|
| Ok(()) => {
|
||||||
| Err(e) =>
|
return Ok(RoomMessageEventContent::text_plain("Successfully changed log level"));
|
||||||
return Err!("Failed to modify and reload the global tracing log level: {e}"),
|
},
|
||||||
|
| Err(e) => {
|
||||||
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to modify and reload the global tracing log level: {e}"
|
||||||
|
)));
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Err!("No log level was specified.")
|
Ok(RoomMessageEventContent::text_plain("No log level was specified."))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn sign_json(&self) -> Result {
|
pub(super) async fn sign_json(&self) -> Result<RoomMessageEventContent> {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
||||||
match serde_json::from_str(&string) {
|
match serde_json::from_str(&string) {
|
||||||
| Err(e) => return Err!("Invalid json: {e}"),
|
|
||||||
| Ok(mut value) => {
|
| Ok(mut value) => {
|
||||||
self.services.server_keys.sign_json(&mut value)?;
|
self.services
|
||||||
let json_text = serde_json::to_string_pretty(&value)?;
|
.server_keys
|
||||||
write!(self, "{json_text}")
|
.sign_json(&mut value)
|
||||||
|
.expect("our request json is what ruma expects");
|
||||||
|
let json_text =
|
||||||
|
serde_json::to_string_pretty(&value).expect("canonical json is valid json");
|
||||||
|
Ok(RoomMessageEventContent::text_plain(json_text))
|
||||||
},
|
},
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn verify_json(&self) -> Result {
|
pub(super) async fn verify_json(&self) -> Result<RoomMessageEventContent> {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
|
||||||
match serde_json::from_str::<CanonicalJsonObject>(&string) {
|
match serde_json::from_str::<CanonicalJsonObject>(&string) {
|
||||||
| Err(e) => return Err!("Invalid json: {e}"),
|
|
||||||
| Ok(value) => match self.services.server_keys.verify_json(&value, None).await {
|
| Ok(value) => match self.services.server_keys.verify_json(&value, None).await {
|
||||||
| Err(e) => return Err!("Signature verification failed: {e}"),
|
| Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")),
|
||||||
| Ok(()) => write!(self, "Signature correct"),
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Signature verification failed: {e}"
|
||||||
|
))),
|
||||||
},
|
},
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
|
pub(super) async fn verify_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
|
||||||
use ruma::signatures::Verified;
|
|
||||||
|
|
||||||
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
|
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
|
||||||
|
|
||||||
event.remove("event_id");
|
event.remove("event_id");
|
||||||
let msg = match self.services.server_keys.verify_event(&event, None).await {
|
let msg = match self.services.server_keys.verify_event(&event, None).await {
|
||||||
|
| Ok(ruma::signatures::Verified::Signatures) =>
|
||||||
|
"signatures OK, but content hash failed (redaction).",
|
||||||
|
| Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.",
|
||||||
| Err(e) => return Err(e),
|
| Err(e) => return Err(e),
|
||||||
| Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
|
|
||||||
| Ok(Verified::All) => "signatures and hashes OK.",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
self.write_str(msg).await
|
Ok(RoomMessageEventContent::notice_plain(msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
pub(super) async fn first_pdu_in_room(
|
||||||
|
&self,
|
||||||
|
room_id: Box<RoomId>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
if !self
|
if !self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -483,7 +563,9 @@ pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
||||||
.server_in_room(&self.services.server.name, &room_id)
|
.server_in_room(&self.services.server.name, &room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("We are not participating in the room / we don't know about the room ID.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"We are not participating in the room / we don't know about the room ID.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let first_pdu = self
|
let first_pdu = self
|
||||||
|
@ -492,15 +574,17 @@ pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
||||||
.timeline
|
.timeline
|
||||||
.first_pdu_in_room(&room_id)
|
.first_pdu_in_room(&room_id)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| err!(Database("Failed to find the first PDU in database")))?;
|
.map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?;
|
||||||
|
|
||||||
let out = format!("{first_pdu:?}");
|
Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}")))
|
||||||
self.write_str(&out).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
pub(super) async fn latest_pdu_in_room(
|
||||||
|
&self,
|
||||||
|
room_id: Box<RoomId>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
if !self
|
if !self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -508,7 +592,9 @@ pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
||||||
.server_in_room(&self.services.server.name, &room_id)
|
.server_in_room(&self.services.server.name, &room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("We are not participating in the room / we don't know about the room ID.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"We are not participating in the room / we don't know about the room ID.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let latest_pdu = self
|
let latest_pdu = self
|
||||||
|
@ -517,19 +603,18 @@ pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
|
||||||
.timeline
|
.timeline
|
||||||
.latest_pdu_in_room(&room_id)
|
.latest_pdu_in_room(&room_id)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
|
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
|
||||||
|
|
||||||
let out = format!("{latest_pdu:?}");
|
Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}")))
|
||||||
self.write_str(&out).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub(super) async fn force_set_room_state_from_server(
|
pub(super) async fn force_set_room_state_from_server(
|
||||||
&self,
|
&self,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if !self
|
if !self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -537,7 +622,9 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.server_in_room(&self.services.server.name, &room_id)
|
.server_in_room(&self.services.server.name, &room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("We are not participating in the room / we don't know about the room ID.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"We are not participating in the room / we don't know about the room ID.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let first_pdu = self
|
let first_pdu = self
|
||||||
|
@ -546,7 +633,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.timeline
|
.timeline
|
||||||
.latest_pdu_in_room(&room_id)
|
.latest_pdu_in_room(&room_id)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
|
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
|
||||||
|
|
||||||
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
|
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
|
||||||
|
|
||||||
|
@ -556,9 +643,10 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.services
|
.services
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(&server_name, get_room_state::v1::Request {
|
.send_federation_request(&server_name, get_room_state::v1::Request {
|
||||||
room_id: room_id.clone(),
|
room_id: room_id.clone().into(),
|
||||||
event_id: first_pdu.event_id.clone(),
|
event_id: first_pdu.event_id.clone(),
|
||||||
})
|
})
|
||||||
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
for pdu in remote_state_response.pdus.clone() {
|
for pdu in remote_state_response.pdus.clone() {
|
||||||
|
@ -567,6 +655,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
.parse_incoming_pdu(&pdu)
|
.parse_incoming_pdu(&pdu)
|
||||||
|
.boxed()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Ok(t) => t,
|
| Ok(t) => t,
|
||||||
|
@ -630,6 +719,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.rooms
|
.rooms
|
||||||
.event_handler
|
.event_handler
|
||||||
.resolve_state(&room_id, &room_version, state)
|
.resolve_state(&room_id, &room_version, state)
|
||||||
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
info!("Forcing new room state");
|
info!("Forcing new room state");
|
||||||
|
@ -645,7 +735,6 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
|
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
|
||||||
|
|
||||||
self.services
|
self.services
|
||||||
.rooms
|
.rooms
|
||||||
.state
|
.state
|
||||||
|
@ -662,18 +751,21 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.update_joined_count(&room_id)
|
.update_joined_count(&room_id)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
self.write_str("Successfully forced the room state from the requested remote server.")
|
drop(state_lock);
|
||||||
.await
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Successfully forced the room state from the requested remote server.",
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_signing_keys(
|
pub(super) async fn get_signing_keys(
|
||||||
&self,
|
&self,
|
||||||
server_name: Option<OwnedServerName>,
|
server_name: Option<Box<ServerName>>,
|
||||||
notary: Option<OwnedServerName>,
|
notary: Option<Box<ServerName>>,
|
||||||
query: bool,
|
query: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
|
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
|
||||||
|
|
||||||
if let Some(notary) = notary {
|
if let Some(notary) = notary {
|
||||||
let signing_keys = self
|
let signing_keys = self
|
||||||
|
@ -682,8 +774,9 @@ pub(super) async fn get_signing_keys(
|
||||||
.notary_request(¬ary, &server_name)
|
.notary_request(¬ary, &server_name)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let out = format!("```rs\n{signing_keys:#?}\n```");
|
return Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
return self.write_str(&out).await;
|
"```rs\n{signing_keys:#?}\n```"
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let signing_keys = if query {
|
let signing_keys = if query {
|
||||||
|
@ -698,13 +791,17 @@ pub(super) async fn get_signing_keys(
|
||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
|
||||||
let out = format!("```rs\n{signing_keys:#?}\n```");
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
self.write_str(&out).await
|
"```rs\n{signing_keys:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_verify_keys(&self, server_name: Option<OwnedServerName>) -> Result {
|
pub(super) async fn get_verify_keys(
|
||||||
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
|
&self,
|
||||||
|
server_name: Option<Box<ServerName>>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
|
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
|
||||||
|
|
||||||
let keys = self
|
let keys = self
|
||||||
.services
|
.services
|
||||||
|
@ -719,24 +816,26 @@ pub(super) async fn get_verify_keys(&self, server_name: Option<OwnedServerName>)
|
||||||
writeln!(out, "| {key_id} | {key:?} |")?;
|
writeln!(out, "| {key_id} | {key:?} |")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&out).await
|
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn resolve_true_destination(
|
pub(super) async fn resolve_true_destination(
|
||||||
&self,
|
&self,
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
no_cache: bool,
|
no_cache: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if !self.services.server.config.allow_federation {
|
if !self.services.server.config.allow_federation {
|
||||||
return Err!("Federation is disabled on this homeserver.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Federation is disabled on this homeserver.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if server_name == self.services.server.name {
|
if server_name == self.services.server.name {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
|
||||||
fetching local PDUs.",
|
fetching local PDUs.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let actual = self
|
let actual = self
|
||||||
|
@ -745,12 +844,13 @@ pub(super) async fn resolve_true_destination(
|
||||||
.resolve_actual_dest(&server_name, !no_cache)
|
.resolve_actual_dest(&server_name, !no_cache)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host);
|
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host,);
|
||||||
self.write_str(&msg).await
|
|
||||||
|
Ok(RoomMessageEventContent::text_markdown(msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result {
|
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result<RoomMessageEventContent> {
|
||||||
const OPTS: &str = "abcdefghijklmnopqrstuvwxyz";
|
const OPTS: &str = "abcdefghijklmnopqrstuvwxyz";
|
||||||
|
|
||||||
let opts: String = OPTS
|
let opts: String = OPTS
|
||||||
|
@ -769,12 +869,13 @@ pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result {
|
||||||
self.write_str("```\n").await?;
|
self.write_str("```\n").await?;
|
||||||
self.write_str(&stats).await?;
|
self.write_str(&stats).await?;
|
||||||
self.write_str("\n```").await?;
|
self.write_str("\n```").await?;
|
||||||
Ok(())
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(tokio_unstable)]
|
#[cfg(tokio_unstable)]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn runtime_metrics(&self) -> Result {
|
pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
|
||||||
let out = self.services.server.metrics.runtime_metrics().map_or_else(
|
let out = self.services.server.metrics.runtime_metrics().map_or_else(
|
||||||
|| "Runtime metrics are not available.".to_owned(),
|
|| "Runtime metrics are not available.".to_owned(),
|
||||||
|metrics| {
|
|metrics| {
|
||||||
|
@ -787,51 +888,51 @@ pub(super) async fn runtime_metrics(&self) -> Result {
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
self.write_str(&out).await
|
Ok(RoomMessageEventContent::text_markdown(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(tokio_unstable))]
|
#[cfg(not(tokio_unstable))]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn runtime_metrics(&self) -> Result {
|
pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
|
||||||
self.write_str("Runtime metrics require building with `tokio_unstable`.")
|
Ok(RoomMessageEventContent::text_markdown(
|
||||||
.await
|
"Runtime metrics require building with `tokio_unstable`.",
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(tokio_unstable)]
|
#[cfg(tokio_unstable)]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn runtime_interval(&self) -> Result {
|
pub(super) async fn runtime_interval(&self) -> Result<RoomMessageEventContent> {
|
||||||
let out = self.services.server.metrics.runtime_interval().map_or_else(
|
let out = self.services.server.metrics.runtime_interval().map_or_else(
|
||||||
|| "Runtime metrics are not available.".to_owned(),
|
|| "Runtime metrics are not available.".to_owned(),
|
||||||
|metrics| format!("```rs\n{metrics:#?}\n```"),
|
|metrics| format!("```rs\n{metrics:#?}\n```"),
|
||||||
);
|
);
|
||||||
|
|
||||||
self.write_str(&out).await
|
Ok(RoomMessageEventContent::text_markdown(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(tokio_unstable))]
|
#[cfg(not(tokio_unstable))]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn runtime_interval(&self) -> Result {
|
pub(super) async fn runtime_interval(&self) -> Result<RoomMessageEventContent> {
|
||||||
self.write_str("Runtime metrics require building with `tokio_unstable`.")
|
Ok(RoomMessageEventContent::text_markdown(
|
||||||
.await
|
"Runtime metrics require building with `tokio_unstable`.",
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn time(&self) -> Result {
|
pub(super) async fn time(&self) -> Result<RoomMessageEventContent> {
|
||||||
let now = SystemTime::now();
|
let now = SystemTime::now();
|
||||||
let now = utils::time::format(now, "%+");
|
Ok(RoomMessageEventContent::text_markdown(utils::time::format(now, "%+")))
|
||||||
|
|
||||||
self.write_str(&now).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn list_dependencies(&self, names: bool) -> Result {
|
pub(super) async fn list_dependencies(&self, names: bool) -> Result<RoomMessageEventContent> {
|
||||||
if names {
|
if names {
|
||||||
let out = info::cargo::dependencies_names().join(" ");
|
let out = info::cargo::dependencies_names().join(" ");
|
||||||
return self.write_str(&out).await;
|
return Ok(RoomMessageEventContent::notice_markdown(out));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut out = String::new();
|
|
||||||
let deps = info::cargo::dependencies();
|
let deps = info::cargo::dependencies();
|
||||||
|
let mut out = String::new();
|
||||||
writeln!(out, "| name | version | features |")?;
|
writeln!(out, "| name | version | features |")?;
|
||||||
writeln!(out, "| ---- | ------- | -------- |")?;
|
writeln!(out, "| ---- | ------- | -------- |")?;
|
||||||
for (name, dep) in deps {
|
for (name, dep) in deps {
|
||||||
|
@ -842,11 +943,10 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result {
|
||||||
} else {
|
} else {
|
||||||
String::new()
|
String::new()
|
||||||
};
|
};
|
||||||
|
|
||||||
writeln!(out, "| {name} | {version} | {feats} |")?;
|
writeln!(out, "| {name} | {version} | {feats} |")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&out).await
|
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -854,7 +954,7 @@ pub(super) async fn database_stats(
|
||||||
&self,
|
&self,
|
||||||
property: Option<String>,
|
property: Option<String>,
|
||||||
map: Option<String>,
|
map: Option<String>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let map_name = map.as_ref().map_or(EMPTY, String::as_str);
|
let map_name = map.as_ref().map_or(EMPTY, String::as_str);
|
||||||
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
|
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
|
||||||
self.services
|
self.services
|
||||||
|
@ -866,11 +966,17 @@ pub(super) async fn database_stats(
|
||||||
let res = map.property(&property).expect("invalid property");
|
let res = map.property(&property).expect("invalid property");
|
||||||
writeln!(self, "##### {name}:\n```\n{}\n```", res.trim())
|
writeln!(self, "##### {name}:\n```\n{}\n```", res.trim())
|
||||||
})
|
})
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::notice_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn database_files(&self, map: Option<String>, level: Option<i32>) -> Result {
|
pub(super) async fn database_files(
|
||||||
|
&self,
|
||||||
|
map: Option<String>,
|
||||||
|
level: Option<i32>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let mut files: Vec<_> = self.services.db.db.file_list().collect::<Result<_>>()?;
|
let mut files: Vec<_> = self.services.db.db.file_list().collect::<Result<_>>()?;
|
||||||
|
|
||||||
files.sort_by_key(|f| f.name.clone());
|
files.sort_by_key(|f| f.name.clone());
|
||||||
|
@ -897,12 +1003,16 @@ pub(super) async fn database_files(&self, map: Option<String>, level: Option<i32
|
||||||
file.column_family_name,
|
file.column_family_name,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::notice_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn trim_memory(&self) -> Result {
|
pub(super) async fn trim_memory(&self) -> Result<RoomMessageEventContent> {
|
||||||
conduwuit::alloc::trim(None)?;
|
conduwuit::alloc::trim(None)?;
|
||||||
|
|
||||||
writeln!(self, "done").await
|
writeln!(self, "done").await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::notice_plain(""))
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ pub(crate) mod tester;
|
||||||
|
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName};
|
use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName};
|
||||||
use service::rooms::short::{ShortEventId, ShortRoomId};
|
use service::rooms::short::{ShortEventId, ShortRoomId};
|
||||||
|
|
||||||
use self::tester::TesterCommand;
|
use self::tester::TesterCommand;
|
||||||
|
@ -20,7 +20,7 @@ pub(super) enum DebugCommand {
|
||||||
/// - Get the auth_chain of a PDU
|
/// - Get the auth_chain of a PDU
|
||||||
GetAuthChain {
|
GetAuthChain {
|
||||||
/// An event ID (the $ character followed by the base64 reference hash)
|
/// An event ID (the $ character followed by the base64 reference hash)
|
||||||
event_id: OwnedEventId,
|
event_id: Box<EventId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Parse and print a PDU from a JSON
|
/// - Parse and print a PDU from a JSON
|
||||||
|
@ -35,7 +35,7 @@ pub(super) enum DebugCommand {
|
||||||
/// - Retrieve and print a PDU by EventID from the conduwuit database
|
/// - Retrieve and print a PDU by EventID from the conduwuit database
|
||||||
GetPdu {
|
GetPdu {
|
||||||
/// An event ID (a $ followed by the base64 reference hash)
|
/// An event ID (a $ followed by the base64 reference hash)
|
||||||
event_id: OwnedEventId,
|
event_id: Box<EventId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Retrieve and print a PDU by PduId from the conduwuit database
|
/// - Retrieve and print a PDU by PduId from the conduwuit database
|
||||||
|
@ -52,11 +52,11 @@ pub(super) enum DebugCommand {
|
||||||
/// (following normal event auth rules, handles it as an incoming PDU).
|
/// (following normal event auth rules, handles it as an incoming PDU).
|
||||||
GetRemotePdu {
|
GetRemotePdu {
|
||||||
/// An event ID (a $ followed by the base64 reference hash)
|
/// An event ID (a $ followed by the base64 reference hash)
|
||||||
event_id: OwnedEventId,
|
event_id: Box<EventId>,
|
||||||
|
|
||||||
/// Argument for us to attempt to fetch the event from the
|
/// Argument for us to attempt to fetch the event from the
|
||||||
/// specified remote server.
|
/// specified remote server.
|
||||||
server: OwnedServerName,
|
server: Box<ServerName>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Same as `get-remote-pdu` but accepts a codeblock newline delimited
|
/// - Same as `get-remote-pdu` but accepts a codeblock newline delimited
|
||||||
|
@ -64,7 +64,7 @@ pub(super) enum DebugCommand {
|
||||||
GetRemotePduList {
|
GetRemotePduList {
|
||||||
/// Argument for us to attempt to fetch all the events from the
|
/// Argument for us to attempt to fetch all the events from the
|
||||||
/// specified remote server.
|
/// specified remote server.
|
||||||
server: OwnedServerName,
|
server: Box<ServerName>,
|
||||||
|
|
||||||
/// If set, ignores errors, else stops at the first error/failure.
|
/// If set, ignores errors, else stops at the first error/failure.
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
|
@ -88,10 +88,10 @@ pub(super) enum DebugCommand {
|
||||||
|
|
||||||
/// - Get and display signing keys from local cache or remote server.
|
/// - Get and display signing keys from local cache or remote server.
|
||||||
GetSigningKeys {
|
GetSigningKeys {
|
||||||
server_name: Option<OwnedServerName>,
|
server_name: Option<Box<ServerName>>,
|
||||||
|
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
notary: Option<OwnedServerName>,
|
notary: Option<Box<ServerName>>,
|
||||||
|
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
query: bool,
|
query: bool,
|
||||||
|
@ -99,14 +99,14 @@ pub(super) enum DebugCommand {
|
||||||
|
|
||||||
/// - Get and display signing keys from local cache or remote server.
|
/// - Get and display signing keys from local cache or remote server.
|
||||||
GetVerifyKeys {
|
GetVerifyKeys {
|
||||||
server_name: Option<OwnedServerName>,
|
server_name: Option<Box<ServerName>>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Sends a federation request to the remote server's
|
/// - Sends a federation request to the remote server's
|
||||||
/// `/_matrix/federation/v1/version` endpoint and measures the latency it
|
/// `/_matrix/federation/v1/version` endpoint and measures the latency it
|
||||||
/// took for the server to respond
|
/// took for the server to respond
|
||||||
Ping {
|
Ping {
|
||||||
server: OwnedServerName,
|
server: Box<ServerName>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Forces device lists for all local and remote users to be updated (as
|
/// - Forces device lists for all local and remote users to be updated (as
|
||||||
|
@ -141,21 +141,21 @@ pub(super) enum DebugCommand {
|
||||||
///
|
///
|
||||||
/// This re-verifies a PDU existing in the database found by ID.
|
/// This re-verifies a PDU existing in the database found by ID.
|
||||||
VerifyPdu {
|
VerifyPdu {
|
||||||
event_id: OwnedEventId,
|
event_id: Box<EventId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Prints the very first PDU in the specified room (typically
|
/// - Prints the very first PDU in the specified room (typically
|
||||||
/// m.room.create)
|
/// m.room.create)
|
||||||
FirstPduInRoom {
|
FirstPduInRoom {
|
||||||
/// The room ID
|
/// The room ID
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Prints the latest ("last") PDU in the specified room (typically a
|
/// - Prints the latest ("last") PDU in the specified room (typically a
|
||||||
/// message)
|
/// message)
|
||||||
LatestPduInRoom {
|
LatestPduInRoom {
|
||||||
/// The room ID
|
/// The room ID
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Forcefully replaces the room state of our local copy of the specified
|
/// - Forcefully replaces the room state of our local copy of the specified
|
||||||
|
@ -174,9 +174,9 @@ pub(super) enum DebugCommand {
|
||||||
/// `/_matrix/federation/v1/state/{roomId}`.
|
/// `/_matrix/federation/v1/state/{roomId}`.
|
||||||
ForceSetRoomStateFromServer {
|
ForceSetRoomStateFromServer {
|
||||||
/// The impacted room ID
|
/// The impacted room ID
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
/// The server we will use to query the room state for
|
/// The server we will use to query the room state for
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Runs a server name through conduwuit's true destination resolution
|
/// - Runs a server name through conduwuit's true destination resolution
|
||||||
|
@ -184,7 +184,7 @@ pub(super) enum DebugCommand {
|
||||||
///
|
///
|
||||||
/// Useful for debugging well-known issues
|
/// Useful for debugging well-known issues
|
||||||
ResolveTrueDestination {
|
ResolveTrueDestination {
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
|
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
no_cache: bool,
|
no_cache: bool,
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::Err;
|
||||||
|
use ruma::events::room::message::RoomMessageEventContent;
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{Result, admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
#[admin_command_dispatch]
|
#[admin_command_dispatch]
|
||||||
#[derive(Debug, clap::Subcommand)]
|
#[derive(Debug, clap::Subcommand)]
|
||||||
|
@ -13,14 +14,14 @@ pub(crate) enum TesterCommand {
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn panic(&self) -> Result {
|
async fn panic(&self) -> Result<RoomMessageEventContent> {
|
||||||
|
|
||||||
panic!("panicked")
|
panic!("panicked")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn failure(&self) -> Result {
|
async fn failure(&self) -> Result<RoomMessageEventContent> {
|
||||||
|
|
||||||
Err!("failed")
|
Err!("failed")
|
||||||
}
|
}
|
||||||
|
@ -28,20 +29,20 @@ async fn failure(&self) -> Result {
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn tester(&self) -> Result {
|
async fn tester(&self) -> Result<RoomMessageEventContent> {
|
||||||
|
|
||||||
self.write_str("Ok").await
|
Ok(RoomMessageEventContent::notice_plain("legacy"))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn timer(&self) -> Result {
|
async fn timer(&self) -> Result<RoomMessageEventContent> {
|
||||||
let started = std::time::Instant::now();
|
let started = std::time::Instant::now();
|
||||||
timed(self.body);
|
timed(self.body);
|
||||||
|
|
||||||
let elapsed = started.elapsed();
|
let elapsed = started.elapsed();
|
||||||
self.write_str(&format!("completed in {elapsed:#?}")).await
|
Ok(RoomMessageEventContent::notice_plain(format!("completed in {elapsed:#?}")))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(never)]
|
#[inline(never)]
|
||||||
|
|
|
@ -1,48 +1,49 @@
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
use ruma::{
|
||||||
|
OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{admin_command, get_room_info};
|
use crate::{admin_command, get_room_info};
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result {
|
pub(super) async fn disable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
|
||||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||||
self.write_str("Room disabled.").await
|
Ok(RoomMessageEventContent::text_plain("Room disabled."))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result {
|
pub(super) async fn enable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
|
||||||
self.services.rooms.metadata.disable_room(&room_id, false);
|
self.services.rooms.metadata.disable_room(&room_id, false);
|
||||||
self.write_str("Room enabled.").await
|
Ok(RoomMessageEventContent::text_plain("Room enabled."))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn incoming_federation(&self) -> Result {
|
pub(super) async fn incoming_federation(&self) -> Result<RoomMessageEventContent> {
|
||||||
let msg = {
|
let map = self
|
||||||
let map = self
|
.services
|
||||||
.services
|
.rooms
|
||||||
.rooms
|
.event_handler
|
||||||
.event_handler
|
.federation_handletime
|
||||||
.federation_handletime
|
.read()
|
||||||
.read()
|
.expect("locked");
|
||||||
.expect("locked");
|
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
||||||
|
|
||||||
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
for (r, (e, i)) in map.iter() {
|
||||||
for (r, (e, i)) in map.iter() {
|
let elapsed = i.elapsed();
|
||||||
let elapsed = i.elapsed();
|
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
|
||||||
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
msg
|
Ok(RoomMessageEventContent::text_plain(&msg))
|
||||||
};
|
|
||||||
|
|
||||||
self.write_str(&msg).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result {
|
pub(super) async fn fetch_support_well_known(
|
||||||
|
&self,
|
||||||
|
server_name: Box<ServerName>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let response = self
|
let response = self
|
||||||
.services
|
.services
|
||||||
.client
|
.client
|
||||||
|
@ -54,44 +55,54 @@ pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName
|
||||||
let text = response.text().await?;
|
let text = response.text().await?;
|
||||||
|
|
||||||
if text.is_empty() {
|
if text.is_empty() {
|
||||||
return Err!("Response text/body is empty.");
|
return Ok(RoomMessageEventContent::text_plain("Response text/body is empty."));
|
||||||
}
|
}
|
||||||
|
|
||||||
if text.len() > 1500 {
|
if text.len() > 1500 {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Response text/body is over 1500 characters, assuming no support well-known.",
|
"Response text/body is over 1500 characters, assuming no support well-known.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let json: serde_json::Value = match serde_json::from_str(&text) {
|
let json: serde_json::Value = match serde_json::from_str(&text) {
|
||||||
| Ok(json) => json,
|
| Ok(json) => json,
|
||||||
| Err(_) => {
|
| Err(_) => {
|
||||||
return Err!("Response text/body is not valid JSON.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Response text/body is not valid JSON.",
|
||||||
|
));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let pretty_json: String = match serde_json::to_string_pretty(&json) {
|
let pretty_json: String = match serde_json::to_string_pretty(&json) {
|
||||||
| Ok(json) => json,
|
| Ok(json) => json,
|
||||||
| Err(_) => {
|
| Err(_) => {
|
||||||
return Err!("Response text/body is not valid JSON.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Response text/body is not valid JSON.",
|
||||||
|
));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Got JSON response:\n\n```json\n{pretty_json}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result {
|
pub(super) async fn remote_user_in_rooms(
|
||||||
|
&self,
|
||||||
|
user_id: Box<UserId>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
if user_id.server_name() == self.services.server.name {
|
if user_id.server_name() == self.services.server.name {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"User belongs to our server, please use `list-joined-rooms` user admin command \
|
"User belongs to our server, please use `list-joined-rooms` user admin command \
|
||||||
instead.",
|
instead.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.services.users.exists(&user_id).await {
|
if !self.services.users.exists(&user_id).await {
|
||||||
return Err!("Remote user does not exist in our database.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Remote user does not exist in our database.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut rooms: Vec<(OwnedRoomId, u64, String)> = self
|
let mut rooms: Vec<(OwnedRoomId, u64, String)> = self
|
||||||
|
@ -104,19 +115,21 @@ pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if rooms.is_empty() {
|
if rooms.is_empty() {
|
||||||
return Err!("User is not in any rooms.");
|
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
|
||||||
}
|
}
|
||||||
|
|
||||||
rooms.sort_by_key(|r| r.1);
|
rooms.sort_by_key(|r| r.1);
|
||||||
rooms.reverse();
|
rooms.reverse();
|
||||||
|
|
||||||
let num = rooms.len();
|
let output = format!(
|
||||||
let body = rooms
|
"Rooms {user_id} shares with us ({}):\n```\n{}\n```",
|
||||||
.iter()
|
rooms.len(),
|
||||||
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
|
rooms
|
||||||
.collect::<Vec<_>>()
|
.iter()
|
||||||
.join("\n");
|
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n")
|
||||||
|
);
|
||||||
|
|
||||||
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",))
|
Ok(RoomMessageEventContent::text_markdown(output))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ mod commands;
|
||||||
|
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
use ruma::{RoomId, ServerName, UserId};
|
||||||
|
|
||||||
use crate::admin_command_dispatch;
|
use crate::admin_command_dispatch;
|
||||||
|
|
||||||
|
@ -14,12 +14,12 @@ pub(super) enum FederationCommand {
|
||||||
|
|
||||||
/// - Disables incoming federation handling for a room.
|
/// - Disables incoming federation handling for a room.
|
||||||
DisableRoom {
|
DisableRoom {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Enables incoming federation handling for a room again.
|
/// - Enables incoming federation handling for a room again.
|
||||||
EnableRoom {
|
EnableRoom {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Fetch `/.well-known/matrix/support` from the specified server
|
/// - Fetch `/.well-known/matrix/support` from the specified server
|
||||||
|
@ -32,11 +32,11 @@ pub(super) enum FederationCommand {
|
||||||
/// moderation, and security inquiries. This command provides a way to
|
/// moderation, and security inquiries. This command provides a way to
|
||||||
/// easily fetch that information.
|
/// easily fetch that information.
|
||||||
FetchSupportWellKnown {
|
FetchSupportWellKnown {
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Lists all the rooms we share/track with the specified *remote* user
|
/// - Lists all the rooms we share/track with the specified *remote* user
|
||||||
RemoteUserInRooms {
|
RemoteUserInRooms {
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,22 +1,26 @@
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug, debug_info, debug_warn, error, info, trace,
|
Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago,
|
||||||
utils::time::parse_timepoint_ago, warn,
|
|
||||||
};
|
};
|
||||||
use conduwuit_service::media::Dim;
|
use conduwuit_service::media::Dim;
|
||||||
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
|
use ruma::{
|
||||||
|
EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName,
|
||||||
|
events::room::message::RoomMessageEventContent,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{admin_command, utils::parse_local_user_id};
|
use crate::{admin_command, utils::parse_local_user_id};
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn delete(
|
pub(super) async fn delete(
|
||||||
&self,
|
&self,
|
||||||
mxc: Option<OwnedMxcUri>,
|
mxc: Option<Box<MxcUri>>,
|
||||||
event_id: Option<OwnedEventId>,
|
event_id: Option<Box<EventId>>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if event_id.is_some() && mxc.is_some() {
|
if event_id.is_some() && mxc.is_some() {
|
||||||
return Err!("Please specify either an MXC or an event ID, not both.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Please specify either an MXC or an event ID, not both.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(mxc) = mxc {
|
if let Some(mxc) = mxc {
|
||||||
|
@ -26,7 +30,9 @@ pub(super) async fn delete(
|
||||||
.delete(&mxc.as_str().try_into()?)
|
.delete(&mxc.as_str().try_into()?)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Err!("Deleted the MXC from our database and on our filesystem.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Deleted the MXC from our database and on our filesystem.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(event_id) = event_id {
|
if let Some(event_id) = event_id {
|
||||||
|
@ -107,36 +113,41 @@ pub(super) async fn delete(
|
||||||
let final_url = url.to_string().replace('"', "");
|
let final_url = url.to_string().replace('"', "");
|
||||||
mxc_urls.push(final_url);
|
mxc_urls.push(final_url);
|
||||||
} else {
|
} else {
|
||||||
warn!(
|
info!(
|
||||||
"Found a URL in the event ID {event_id} but did not \
|
"Found a URL in the event ID {event_id} but did not \
|
||||||
start with mxc://, ignoring"
|
start with mxc://, ignoring"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
error!("No \"url\" key in \"file\" key.");
|
info!("No \"url\" key in \"file\" key.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Event ID does not have a \"content\" key or failed parsing the \
|
"Event ID does not have a \"content\" key or failed parsing the \
|
||||||
event ID JSON.",
|
event ID JSON.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Event ID does not have a \"content\" key, this is not a message or an \
|
"Event ID does not have a \"content\" key, this is not a message or an \
|
||||||
event type that contains media.",
|
event type that contains media.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
| _ => {
|
| _ => {
|
||||||
return Err!("Event ID does not exist or is not known to us.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Event ID does not exist or is not known to us.",
|
||||||
|
));
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if mxc_urls.is_empty() {
|
if mxc_urls.is_empty() {
|
||||||
return Err!("Parsed event ID but found no MXC URLs.",);
|
info!("Parsed event ID {event_id} but did not contain any MXC URLs.");
|
||||||
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Parsed event ID but found no MXC URLs.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut mxc_deletion_count: usize = 0;
|
let mut mxc_deletion_count: usize = 0;
|
||||||
|
@ -159,27 +170,27 @@ pub(super) async fn delete(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return self
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
.write_str(&format!(
|
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \
|
||||||
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \
|
event ID {event_id}."
|
||||||
from event ID {event_id}."
|
)));
|
||||||
))
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Err!(
|
Ok(RoomMessageEventContent::text_plain(
|
||||||
"Please specify either an MXC using --mxc or an event ID using --event-id of the \
|
"Please specify either an MXC using --mxc or an event ID using --event-id of the \
|
||||||
message containing an image. See --help for details."
|
message containing an image. See --help for details.",
|
||||||
)
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn delete_list(&self) -> Result {
|
pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut failed_parsed_mxcs: usize = 0;
|
let mut failed_parsed_mxcs: usize = 0;
|
||||||
|
@ -193,6 +204,7 @@ pub(super) async fn delete_list(&self) -> Result {
|
||||||
.try_into()
|
.try_into()
|
||||||
.inspect_err(|e| {
|
.inspect_err(|e| {
|
||||||
debug_warn!("Failed to parse user-provided MXC URI: {e}");
|
debug_warn!("Failed to parse user-provided MXC URI: {e}");
|
||||||
|
|
||||||
failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1);
|
failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1);
|
||||||
})
|
})
|
||||||
.ok()
|
.ok()
|
||||||
|
@ -215,11 +227,10 @@ pub(super) async fn delete_list(&self) -> Result {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \
|
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \
|
||||||
and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.",
|
and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.",
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -229,9 +240,11 @@ pub(super) async fn delete_past_remote_media(
|
||||||
before: bool,
|
before: bool,
|
||||||
after: bool,
|
after: bool,
|
||||||
yes_i_want_to_delete_local_media: bool,
|
yes_i_want_to_delete_local_media: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if before && after {
|
if before && after {
|
||||||
return Err!("Please only pick one argument, --before or --after.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Please only pick one argument, --before or --after.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
assert!(!(before && after), "--before and --after should not be specified together");
|
assert!(!(before && after), "--before and --after should not be specified together");
|
||||||
|
|
||||||
|
@ -247,28 +260,35 @@ pub(super) async fn delete_past_remote_media(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
.await
|
"Deleted {deleted_count} total files.",
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn delete_all_from_user(&self, username: String) -> Result {
|
pub(super) async fn delete_all_from_user(
|
||||||
|
&self,
|
||||||
|
username: String,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_local_user_id(self.services, &username)?;
|
let user_id = parse_local_user_id(self.services, &username)?;
|
||||||
|
|
||||||
let deleted_count = self.services.media.delete_from_user(&user_id).await?;
|
let deleted_count = self.services.media.delete_from_user(&user_id).await?;
|
||||||
|
|
||||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
.await
|
"Deleted {deleted_count} total files.",
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn delete_all_from_server(
|
pub(super) async fn delete_all_from_server(
|
||||||
&self,
|
&self,
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
yes_i_want_to_delete_local_media: bool,
|
yes_i_want_to_delete_local_media: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media {
|
if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media {
|
||||||
return Err!("This command only works for remote media by default.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"This command only works for remote media by default.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let Ok(all_mxcs) = self
|
let Ok(all_mxcs) = self
|
||||||
|
@ -278,7 +298,9 @@ pub(super) async fn delete_all_from_server(
|
||||||
.await
|
.await
|
||||||
.inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}"))
|
.inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}"))
|
||||||
else {
|
else {
|
||||||
return Err!("Failed to get MXC URIs from our database",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Failed to get MXC URIs from our database",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut deleted_count: usize = 0;
|
let mut deleted_count: usize = 0;
|
||||||
|
@ -314,16 +336,17 @@ pub(super) async fn delete_all_from_server(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&format!("Deleted {deleted_count} total files.",))
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
.await
|
"Deleted {deleted_count} total files.",
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result {
|
pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result<RoomMessageEventContent> {
|
||||||
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
||||||
let metadata = self.services.media.get_metadata(&mxc).await;
|
let metadata = self.services.media.get_metadata(&mxc).await;
|
||||||
|
|
||||||
self.write_str(&format!("```\n{metadata:#?}\n```")).await
|
Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```")))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -332,7 +355,7 @@ pub(super) async fn get_remote_file(
|
||||||
mxc: OwnedMxcUri,
|
mxc: OwnedMxcUri,
|
||||||
server: Option<OwnedServerName>,
|
server: Option<OwnedServerName>,
|
||||||
timeout: u32,
|
timeout: u32,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
||||||
let timeout = Duration::from_millis(timeout.into());
|
let timeout = Duration::from_millis(timeout.into());
|
||||||
let mut result = self
|
let mut result = self
|
||||||
|
@ -345,8 +368,8 @@ pub(super) async fn get_remote_file(
|
||||||
let len = result.content.as_ref().expect("content").len();
|
let len = result.content.as_ref().expect("content").len();
|
||||||
result.content.as_mut().expect("content").clear();
|
result.content.as_mut().expect("content").clear();
|
||||||
|
|
||||||
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
|
let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```");
|
||||||
.await
|
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -357,7 +380,7 @@ pub(super) async fn get_remote_thumbnail(
|
||||||
timeout: u32,
|
timeout: u32,
|
||||||
width: u32,
|
width: u32,
|
||||||
height: u32,
|
height: u32,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
|
||||||
let timeout = Duration::from_millis(timeout.into());
|
let timeout = Duration::from_millis(timeout.into());
|
||||||
let dim = Dim::new(width, height, None);
|
let dim = Dim::new(width, height, None);
|
||||||
|
@ -371,6 +394,6 @@ pub(super) async fn get_remote_thumbnail(
|
||||||
let len = result.content.as_ref().expect("content").len();
|
let len = result.content.as_ref().expect("content").len();
|
||||||
result.content.as_mut().expect("content").clear();
|
result.content.as_mut().expect("content").clear();
|
||||||
|
|
||||||
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
|
let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```");
|
||||||
.await
|
Ok(RoomMessageEventContent::notice_markdown(out))
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ mod commands;
|
||||||
|
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName};
|
use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName};
|
||||||
|
|
||||||
use crate::admin_command_dispatch;
|
use crate::admin_command_dispatch;
|
||||||
|
|
||||||
|
@ -15,12 +15,12 @@ pub(super) enum MediaCommand {
|
||||||
Delete {
|
Delete {
|
||||||
/// The MXC URL to delete
|
/// The MXC URL to delete
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
mxc: Option<OwnedMxcUri>,
|
mxc: Option<Box<MxcUri>>,
|
||||||
|
|
||||||
/// - The message event ID which contains the media and thumbnail MXC
|
/// - The message event ID which contains the media and thumbnail MXC
|
||||||
/// URLs
|
/// URLs
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
event_id: Option<OwnedEventId>,
|
event_id: Option<Box<EventId>>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Deletes a codeblock list of MXC URLs from our database and on the
|
/// - Deletes a codeblock list of MXC URLs from our database and on the
|
||||||
|
@ -57,7 +57,7 @@ pub(super) enum MediaCommand {
|
||||||
/// - Deletes all remote media from the specified remote server. This will
|
/// - Deletes all remote media from the specified remote server. This will
|
||||||
/// always ignore errors by default.
|
/// always ignore errors by default.
|
||||||
DeleteAllFromServer {
|
DeleteAllFromServer {
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
|
|
||||||
/// Long argument to delete local media
|
/// Long argument to delete local media
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#![allow(clippy::too_many_arguments)]
|
#![allow(clippy::too_many_arguments)]
|
||||||
|
|
||||||
pub(crate) mod admin;
|
pub(crate) mod admin;
|
||||||
pub(crate) mod context;
|
pub(crate) mod command;
|
||||||
pub(crate) mod processor;
|
pub(crate) mod processor;
|
||||||
mod tests;
|
mod tests;
|
||||||
pub(crate) mod utils;
|
pub(crate) mod utils;
|
||||||
|
@ -23,9 +23,13 @@ extern crate conduwuit_api as api;
|
||||||
extern crate conduwuit_core as conduwuit;
|
extern crate conduwuit_core as conduwuit;
|
||||||
extern crate conduwuit_service as service;
|
extern crate conduwuit_service as service;
|
||||||
|
|
||||||
|
pub(crate) use conduwuit::Result;
|
||||||
pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch};
|
pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
pub(crate) use crate::{context::Context, utils::get_room_info};
|
pub(crate) use crate::{
|
||||||
|
command::Command,
|
||||||
|
utils::{escape_html, get_room_info},
|
||||||
|
};
|
||||||
|
|
||||||
pub(crate) const PAGE_SIZE: usize = 100;
|
pub(crate) const PAGE_SIZE: usize = 100;
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ use service::{
|
||||||
use tracing::Level;
|
use tracing::Level;
|
||||||
use tracing_subscriber::{EnvFilter, filter::LevelFilter};
|
use tracing_subscriber::{EnvFilter, filter::LevelFilter};
|
||||||
|
|
||||||
use crate::{admin, admin::AdminCommand, context::Context};
|
use crate::{Command, admin, admin::AdminCommand};
|
||||||
|
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) }
|
pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) }
|
||||||
|
@ -58,7 +58,7 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
|
||||||
| Ok(parsed) => parsed,
|
| Ok(parsed) => parsed,
|
||||||
};
|
};
|
||||||
|
|
||||||
let context = Context {
|
let context = Command {
|
||||||
services: &services,
|
services: &services,
|
||||||
body: &body,
|
body: &body,
|
||||||
timer: SystemTime::now(),
|
timer: SystemTime::now(),
|
||||||
|
@ -103,7 +103,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
||||||
|
|
||||||
/// Parse and process a message from the admin room
|
/// Parse and process a message from the admin room
|
||||||
async fn process(
|
async fn process(
|
||||||
context: &Context<'_>,
|
context: &Command<'_>,
|
||||||
command: AdminCommand,
|
command: AdminCommand,
|
||||||
args: &[String],
|
args: &[String],
|
||||||
) -> (Result, String) {
|
) -> (Result, String) {
|
||||||
|
@ -132,7 +132,7 @@ async fn process(
|
||||||
(result, output)
|
(result, output)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn capture_create(context: &Context<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
|
fn capture_create(context: &Command<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
|
||||||
let env_config = &context.services.server.config.admin_log_capture;
|
let env_config = &context.services.server.config.admin_log_capture;
|
||||||
let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| {
|
let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| {
|
||||||
warn!("admin_log_capture filter invalid: {e:?}");
|
warn!("admin_log_capture filter invalid: {e:?}");
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomId, OwnedUserId};
|
use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
|
@ -12,31 +12,31 @@ pub(crate) enum AccountDataCommand {
|
||||||
/// - Returns all changes to the account data that happened after `since`.
|
/// - Returns all changes to the account data that happened after `since`.
|
||||||
ChangesSince {
|
ChangesSince {
|
||||||
/// Full user ID
|
/// Full user ID
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
/// UNIX timestamp since (u64)
|
/// UNIX timestamp since (u64)
|
||||||
since: u64,
|
since: u64,
|
||||||
/// Optional room ID of the account data
|
/// Optional room ID of the account data
|
||||||
room_id: Option<OwnedRoomId>,
|
room_id: Option<Box<RoomId>>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Searches the account data for a specific kind.
|
/// - Searches the account data for a specific kind.
|
||||||
AccountDataGet {
|
AccountDataGet {
|
||||||
/// Full user ID
|
/// Full user ID
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
/// Account data event type
|
/// Account data event type
|
||||||
kind: String,
|
kind: String,
|
||||||
/// Optional room ID of the account data
|
/// Optional room ID of the account data
|
||||||
room_id: Option<OwnedRoomId>,
|
room_id: Option<Box<RoomId>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn changes_since(
|
async fn changes_since(
|
||||||
&self,
|
&self,
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
since: u64,
|
since: u64,
|
||||||
room_id: Option<OwnedRoomId>,
|
room_id: Option<Box<RoomId>>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results: Vec<_> = self
|
let results: Vec<_> = self
|
||||||
.services
|
.services
|
||||||
|
@ -46,17 +46,18 @@ async fn changes_since(
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn account_data_get(
|
async fn account_data_get(
|
||||||
&self,
|
&self,
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
kind: String,
|
kind: String,
|
||||||
room_id: Option<OwnedRoomId>,
|
room_id: Option<Box<RoomId>>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results = self
|
let results = self
|
||||||
.services
|
.services
|
||||||
|
@ -65,6 +66,7 @@ async fn account_data_get(
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use futures::TryStreamExt;
|
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/appservice.rs
|
/// All the getters and iterators from src/database/key_value/appservice.rs
|
||||||
|
@ -10,7 +9,7 @@ pub(crate) enum AppserviceCommand {
|
||||||
/// - Gets the appservice registration info/details from the ID as a string
|
/// - Gets the appservice registration info/details from the ID as a string
|
||||||
GetRegistration {
|
GetRegistration {
|
||||||
/// Appservice registration ID
|
/// Appservice registration ID
|
||||||
appservice_id: String,
|
appservice_id: Box<str>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Gets all appservice registrations with their ID and registration info
|
/// - Gets all appservice registrations with their ID and registration info
|
||||||
|
@ -18,7 +17,7 @@ pub(crate) enum AppserviceCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All the getters and iterators from src/database/key_value/appservice.rs
|
/// All the getters and iterators from src/database/key_value/appservice.rs
|
||||||
pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
|
|
||||||
match subcommand {
|
match subcommand {
|
||||||
|
@ -32,7 +31,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>
|
||||||
},
|
},
|
||||||
| AppserviceCommand::All => {
|
| AppserviceCommand::All => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?;
|
let results = services.appservice.all().await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::OwnedServerName;
|
use ruma::ServerName;
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/globals.rs
|
/// All the getters and iterators from src/database/key_value/globals.rs
|
||||||
|
@ -11,17 +11,17 @@ pub(crate) enum GlobalsCommand {
|
||||||
|
|
||||||
CurrentCount,
|
CurrentCount,
|
||||||
|
|
||||||
LastCheckForAnnouncementsId,
|
LastCheckForUpdatesId,
|
||||||
|
|
||||||
/// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
|
/// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
|
||||||
/// for the server.
|
/// for the server.
|
||||||
SigningKeysFor {
|
SigningKeysFor {
|
||||||
origin: OwnedServerName,
|
origin: Box<ServerName>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All the getters and iterators from src/database/key_value/globals.rs
|
/// All the getters and iterators from src/database/key_value/globals.rs
|
||||||
pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
|
|
||||||
match subcommand {
|
match subcommand {
|
||||||
|
@ -39,12 +39,9 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -
|
||||||
|
|
||||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||||
},
|
},
|
||||||
| GlobalsCommand::LastCheckForAnnouncementsId => {
|
| GlobalsCommand::LastCheckForUpdatesId => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results = services
|
let results = services.updates.last_check_for_updates_id().await;
|
||||||
.announcements
|
|
||||||
.last_check_for_announcements_id()
|
|
||||||
.await;
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::OwnedUserId;
|
use ruma::UserId;
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/presence.rs
|
/// All the getters and iterators from src/database/key_value/presence.rs
|
||||||
|
@ -11,7 +11,7 @@ pub(crate) enum PresenceCommand {
|
||||||
/// - Returns the latest presence event for the given user.
|
/// - Returns the latest presence event for the given user.
|
||||||
GetPresence {
|
GetPresence {
|
||||||
/// Full user ID
|
/// Full user ID
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Iterator of the most recent presence updates that happened after the
|
/// - Iterator of the most recent presence updates that happened after the
|
||||||
|
@ -23,7 +23,7 @@ pub(crate) enum PresenceCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All the getters and iterators in key_value/presence.rs
|
/// All the getters and iterators in key_value/presence.rs
|
||||||
pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
|
|
||||||
match subcommand {
|
match subcommand {
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::OwnedUserId;
|
use ruma::UserId;
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub(crate) enum PusherCommand {
|
pub(crate) enum PusherCommand {
|
||||||
/// - Returns all the pushers for the user.
|
/// - Returns all the pushers for the user.
|
||||||
GetPushers {
|
GetPushers {
|
||||||
/// Full user ID
|
/// Full user ID
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
|
|
||||||
match subcommand {
|
match subcommand {
|
||||||
|
|
|
@ -11,6 +11,7 @@ use conduwuit::{
|
||||||
use conduwuit_database::Map;
|
use conduwuit_database::Map;
|
||||||
use conduwuit_service::Services;
|
use conduwuit_service::Services;
|
||||||
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
|
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
|
||||||
|
use ruma::events::room::message::RoomMessageEventContent;
|
||||||
use tokio::time::Instant;
|
use tokio::time::Instant;
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
@ -169,7 +170,7 @@ pub(super) async fn compact(
|
||||||
into: Option<usize>,
|
into: Option<usize>,
|
||||||
parallelism: Option<usize>,
|
parallelism: Option<usize>,
|
||||||
exhaustive: bool,
|
exhaustive: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
use conduwuit_database::compact::Options;
|
use conduwuit_database::compact::Options;
|
||||||
|
|
||||||
let default_all_maps: Option<_> = map.is_none().then(|| {
|
let default_all_maps: Option<_> = map.is_none().then(|| {
|
||||||
|
@ -220,11 +221,17 @@ pub(super) async fn compact(
|
||||||
let results = results.await;
|
let results = results.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_count(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
pub(super) async fn raw_count(
|
||||||
|
&self,
|
||||||
|
map: Option<String>,
|
||||||
|
prefix: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||||
|
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
|
@ -235,11 +242,17 @@ pub(super) async fn raw_count(&self, map: Option<String>, prefix: Option<String>
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```"))
|
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```"))
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_keys(&self, map: String, prefix: Option<String>) -> Result {
|
pub(super) async fn raw_keys(
|
||||||
|
&self,
|
||||||
|
map: String,
|
||||||
|
prefix: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
writeln!(self, "```").boxed().await?;
|
writeln!(self, "```").boxed().await?;
|
||||||
|
|
||||||
let map = self.services.db.get(map.as_str())?;
|
let map = self.services.db.get(map.as_str())?;
|
||||||
|
@ -253,12 +266,18 @@ pub(super) async fn raw_keys(&self, map: String, prefix: Option<String>) -> Resu
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
let out = format!("\n```\n\nQuery completed in {query_time:?}");
|
||||||
.await
|
self.write_str(out.as_str()).await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_keys_sizes(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
pub(super) async fn raw_keys_sizes(
|
||||||
|
&self,
|
||||||
|
map: Option<String>,
|
||||||
|
prefix: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||||
|
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
|
@ -275,12 +294,18 @@ pub(super) async fn raw_keys_sizes(&self, map: Option<String>, prefix: Option<St
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"))
|
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
|
||||||
.await
|
self.write_str(result.as_str()).await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_keys_total(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
pub(super) async fn raw_keys_total(
|
||||||
|
&self,
|
||||||
|
map: Option<String>,
|
||||||
|
prefix: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||||
|
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
|
@ -293,12 +318,19 @@ pub(super) async fn raw_keys_total(&self, map: Option<String>, prefix: Option<St
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_vals_sizes(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
pub(super) async fn raw_vals_sizes(
|
||||||
|
&self,
|
||||||
|
map: Option<String>,
|
||||||
|
prefix: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||||
|
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
|
@ -316,12 +348,18 @@ pub(super) async fn raw_vals_sizes(&self, map: Option<String>, prefix: Option<St
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"))
|
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
|
||||||
.await
|
self.write_str(result.as_str()).await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_vals_total(&self, map: Option<String>, prefix: Option<String>) -> Result {
|
pub(super) async fn raw_vals_total(
|
||||||
|
&self,
|
||||||
|
map: Option<String>,
|
||||||
|
prefix: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
let prefix = prefix.as_deref().unwrap_or(EMPTY);
|
||||||
|
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
|
@ -335,12 +373,19 @@ pub(super) async fn raw_vals_total(&self, map: Option<String>, prefix: Option<St
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_iter(&self, map: String, prefix: Option<String>) -> Result {
|
pub(super) async fn raw_iter(
|
||||||
|
&self,
|
||||||
|
map: String,
|
||||||
|
prefix: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
writeln!(self, "```").await?;
|
writeln!(self, "```").await?;
|
||||||
|
|
||||||
let map = self.services.db.get(&map)?;
|
let map = self.services.db.get(&map)?;
|
||||||
|
@ -356,7 +401,9 @@ pub(super) async fn raw_iter(&self, map: String, prefix: Option<String>) -> Resu
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -365,7 +412,7 @@ pub(super) async fn raw_keys_from(
|
||||||
map: String,
|
map: String,
|
||||||
start: String,
|
start: String,
|
||||||
limit: Option<usize>,
|
limit: Option<usize>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
writeln!(self, "```").await?;
|
writeln!(self, "```").await?;
|
||||||
|
|
||||||
let map = self.services.db.get(&map)?;
|
let map = self.services.db.get(&map)?;
|
||||||
|
@ -379,7 +426,9 @@ pub(super) async fn raw_keys_from(
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
|
||||||
.await
|
.await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -388,7 +437,7 @@ pub(super) async fn raw_iter_from(
|
||||||
map: String,
|
map: String,
|
||||||
start: String,
|
start: String,
|
||||||
limit: Option<usize>,
|
limit: Option<usize>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let map = self.services.db.get(&map)?;
|
let map = self.services.db.get(&map)?;
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
let result = map
|
let result = map
|
||||||
|
@ -400,38 +449,41 @@ pub(super) async fn raw_iter_from(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_del(&self, map: String, key: String) -> Result {
|
pub(super) async fn raw_del(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
|
||||||
let map = self.services.db.get(&map)?;
|
let map = self.services.db.get(&map)?;
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
map.remove(&key);
|
map.remove(&key);
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
self.write_str(&format!("Operation completed in {query_time:?}"))
|
|
||||||
.await
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
|
"Operation completed in {query_time:?}"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_get(&self, map: String, key: String) -> Result {
|
pub(super) async fn raw_get(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
|
||||||
let map = self.services.db.get(&map)?;
|
let map = self.services.db.get(&map)?;
|
||||||
let timer = Instant::now();
|
let timer = Instant::now();
|
||||||
let handle = map.get(&key).await?;
|
let handle = map.get(&key).await?;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
let result = String::from_utf8_lossy(&handle);
|
let result = String::from_utf8_lossy(&handle);
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"))
|
|
||||||
.await
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
|
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn raw_maps(&self) -> Result {
|
pub(super) async fn raw_maps(&self) -> Result<RoomMessageEventContent> {
|
||||||
let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect();
|
let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect();
|
||||||
|
|
||||||
self.write_str(&format!("{list:#?}")).await
|
Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}")))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn with_maps_or<'a>(
|
fn with_maps_or<'a>(
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{Result, utils::time};
|
use conduwuit::{Result, utils::time};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::OwnedServerName;
|
use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
|
@ -21,7 +21,10 @@ pub(crate) enum ResolverCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn destinations_cache(&self, server_name: Option<OwnedServerName>) -> Result {
|
async fn destinations_cache(
|
||||||
|
&self,
|
||||||
|
server_name: Option<OwnedServerName>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
use service::resolver::cache::CachedDest;
|
use service::resolver::cache::CachedDest;
|
||||||
|
|
||||||
writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?;
|
writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?;
|
||||||
|
@ -41,11 +44,11 @@ async fn destinations_cache(&self, server_name: Option<OwnedServerName>) -> Resu
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(RoomMessageEventContent::notice_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn overrides_cache(&self, server_name: Option<String>) -> Result {
|
async fn overrides_cache(&self, server_name: Option<String>) -> Result<RoomMessageEventContent> {
|
||||||
use service::resolver::cache::CachedOverride;
|
use service::resolver::cache::CachedOverride;
|
||||||
|
|
||||||
writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?;
|
writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?;
|
||||||
|
@ -67,5 +70,5 @@ async fn overrides_cache(&self, server_name: Option<String>) -> Result {
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(RoomMessageEventContent::notice_plain(""))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,22 +1,22 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomAliasId, OwnedRoomId};
|
use ruma::{RoomAliasId, RoomId};
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
|
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
|
||||||
pub(crate) enum RoomAliasCommand {
|
pub(crate) enum RoomAliasCommand {
|
||||||
ResolveLocalAlias {
|
ResolveLocalAlias {
|
||||||
/// Full room alias
|
/// Full room alias
|
||||||
alias: OwnedRoomAliasId,
|
alias: Box<RoomAliasId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Iterator of all our local room aliases for the room ID
|
/// - Iterator of all our local room aliases for the room ID
|
||||||
LocalAliasesForRoom {
|
LocalAliasesForRoom {
|
||||||
/// Full room ID
|
/// Full room ID
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Iterator of all our local aliases in our database with their room IDs
|
/// - Iterator of all our local aliases in our database with their room IDs
|
||||||
|
@ -24,7 +24,7 @@ pub(crate) enum RoomAliasCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All the getters and iterators in src/database/key_value/rooms/alias.rs
|
/// All the getters and iterators in src/database/key_value/rooms/alias.rs
|
||||||
pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
|
|
||||||
match subcommand {
|
match subcommand {
|
||||||
|
|
|
@ -1,85 +1,85 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::{Error, Result};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
|
use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub(crate) enum RoomStateCacheCommand {
|
pub(crate) enum RoomStateCacheCommand {
|
||||||
ServerInRoom {
|
ServerInRoom {
|
||||||
server: OwnedServerName,
|
server: Box<ServerName>,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomServers {
|
RoomServers {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
ServerRooms {
|
ServerRooms {
|
||||||
server: OwnedServerName,
|
server: Box<ServerName>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomMembers {
|
RoomMembers {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
LocalUsersInRoom {
|
LocalUsersInRoom {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
ActiveLocalUsersInRoom {
|
ActiveLocalUsersInRoom {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomJoinedCount {
|
RoomJoinedCount {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomInvitedCount {
|
RoomInvitedCount {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomUserOnceJoined {
|
RoomUserOnceJoined {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomMembersInvited {
|
RoomMembersInvited {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
GetInviteCount {
|
GetInviteCount {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
GetLeftCount {
|
GetLeftCount {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomsJoined {
|
RoomsJoined {
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomsLeft {
|
RoomsLeft {
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
RoomsInvited {
|
RoomsInvited {
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
InviteState {
|
InviteState {
|
||||||
user_id: OwnedUserId,
|
user_id: Box<UserId>,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
|
|
||||||
match subcommand {
|
let c = match subcommand {
|
||||||
| RoomStateCacheCommand::ServerInRoom { server, room_id } => {
|
| RoomStateCacheCommand::ServerInRoom { server, room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = services
|
let result = services
|
||||||
|
@ -89,11 +89,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomServers { room_id } => {
|
| RoomStateCacheCommand::RoomServers { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -106,11 +104,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::ServerRooms { server } => {
|
| RoomStateCacheCommand::ServerRooms { server } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -123,11 +119,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomMembers { room_id } => {
|
| RoomStateCacheCommand::RoomMembers { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -140,11 +134,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::LocalUsersInRoom { room_id } => {
|
| RoomStateCacheCommand::LocalUsersInRoom { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -157,11 +149,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => {
|
| RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -174,22 +164,18 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomJoinedCount { room_id } => {
|
| RoomStateCacheCommand::RoomJoinedCount { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results = services.rooms.state_cache.room_joined_count(&room_id).await;
|
let results = services.rooms.state_cache.room_joined_count(&room_id).await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomInvitedCount { room_id } => {
|
| RoomStateCacheCommand::RoomInvitedCount { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -200,11 +186,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomUserOnceJoined { room_id } => {
|
| RoomStateCacheCommand::RoomUserOnceJoined { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -217,11 +201,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomMembersInvited { room_id } => {
|
| RoomStateCacheCommand::RoomMembersInvited { room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -234,11 +216,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::GetInviteCount { room_id, user_id } => {
|
| RoomStateCacheCommand::GetInviteCount { room_id, user_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -249,11 +229,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::GetLeftCount { room_id, user_id } => {
|
| RoomStateCacheCommand::GetLeftCount { room_id, user_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -264,11 +242,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomsJoined { user_id } => {
|
| RoomStateCacheCommand::RoomsJoined { user_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -281,11 +257,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomsInvited { user_id } => {
|
| RoomStateCacheCommand::RoomsInvited { user_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -297,11 +271,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::RoomsLeft { user_id } => {
|
| RoomStateCacheCommand::RoomsLeft { user_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -313,11 +285,9 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| RoomStateCacheCommand::InviteState { user_id, room_id } => {
|
| RoomStateCacheCommand::InviteState { user_id, room_id } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
|
@ -328,11 +298,13 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
}
|
}?;
|
||||||
|
|
||||||
|
context.write_str(c.body()).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{PduCount, Result, utils::stream::TryTools};
|
use conduwuit::{PduCount, Result, utils::stream::TryTools};
|
||||||
use futures::TryStreamExt;
|
use futures::TryStreamExt;
|
||||||
use ruma::OwnedRoomOrAliasId;
|
use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ pub(crate) enum RoomTimelineCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result {
|
pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result<RoomMessageEventContent> {
|
||||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||||
|
|
||||||
let result = self
|
let result = self
|
||||||
|
@ -34,7 +34,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result {
|
||||||
.last_timeline_count(None, &room_id)
|
.last_timeline_count(None, &room_id)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!("{result:#?}")).await
|
Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}")))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -43,7 +43,7 @@ pub(super) async fn pdus(
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
from: Option<String>,
|
from: Option<String>,
|
||||||
limit: Option<usize>,
|
limit: Option<usize>,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||||
|
|
||||||
let from: Option<PduCount> = from.as_deref().map(str::parse).transpose()?;
|
let from: Option<PduCount> = from.as_deref().map(str::parse).transpose()?;
|
||||||
|
@ -57,5 +57,5 @@ pub(super) async fn pdus(
|
||||||
.try_collect()
|
.try_collect()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!("{result:#?}")).await
|
Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}")))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedServerName, OwnedUserId};
|
use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent};
|
||||||
use service::sending::Destination;
|
use service::sending::Destination;
|
||||||
|
|
||||||
use crate::Context;
|
use crate::Command;
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
/// All the getters and iterators from src/database/key_value/sending.rs
|
/// All the getters and iterators from src/database/key_value/sending.rs
|
||||||
|
@ -27,9 +27,9 @@ pub(crate) enum SendingCommand {
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
appservice_id: Option<String>,
|
appservice_id: Option<String>,
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
server_name: Option<OwnedServerName>,
|
server_name: Option<Box<ServerName>>,
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
user_id: Option<OwnedUserId>,
|
user_id: Option<Box<UserId>>,
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
push_key: Option<String>,
|
push_key: Option<String>,
|
||||||
},
|
},
|
||||||
|
@ -49,20 +49,30 @@ pub(crate) enum SendingCommand {
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
appservice_id: Option<String>,
|
appservice_id: Option<String>,
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
server_name: Option<OwnedServerName>,
|
server_name: Option<Box<ServerName>>,
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
user_id: Option<OwnedUserId>,
|
user_id: Option<Box<UserId>>,
|
||||||
#[arg(short, long)]
|
#[arg(short, long)]
|
||||||
push_key: Option<String>,
|
push_key: Option<String>,
|
||||||
},
|
},
|
||||||
|
|
||||||
GetLatestEduCount {
|
GetLatestEduCount {
|
||||||
server_name: OwnedServerName,
|
server_name: Box<ServerName>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All the getters and iterators in key_value/sending.rs
|
/// All the getters and iterators in key_value/sending.rs
|
||||||
pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result {
|
||||||
|
let c = reprocess(subcommand, context).await?;
|
||||||
|
context.write_str(c.body()).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// All the getters and iterators in key_value/sending.rs
|
||||||
|
pub(super) async fn reprocess(
|
||||||
|
subcommand: SendingCommand,
|
||||||
|
context: &Command<'_>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
|
|
||||||
match subcommand {
|
match subcommand {
|
||||||
|
@ -72,11 +82,9 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -
|
||||||
let active_requests = results.collect::<Vec<_>>().await;
|
let active_requests = results.collect::<Vec<_>>().await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| SendingCommand::QueuedRequests {
|
| SendingCommand::QueuedRequests {
|
||||||
appservice_id,
|
appservice_id,
|
||||||
|
@ -89,19 +97,19 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -
|
||||||
&& user_id.is_none()
|
&& user_id.is_none()
|
||||||
&& push_key.is_none()
|
&& push_key.is_none()
|
||||||
{
|
{
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results = match (appservice_id, server_name, user_id, push_key) {
|
let results = match (appservice_id, server_name, user_id, push_key) {
|
||||||
| (Some(appservice_id), None, None, None) => {
|
| (Some(appservice_id), None, None, None) => {
|
||||||
if appservice_id.is_empty() {
|
if appservice_id.is_empty() {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -112,42 +120,40 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -
|
||||||
| (None, Some(server_name), None, None) => services
|
| (None, Some(server_name), None, None) => services
|
||||||
.sending
|
.sending
|
||||||
.db
|
.db
|
||||||
.queued_requests(&Destination::Federation(server_name)),
|
.queued_requests(&Destination::Federation(server_name.into())),
|
||||||
| (None, None, Some(user_id), Some(push_key)) => {
|
| (None, None, Some(user_id), Some(push_key)) => {
|
||||||
if push_key.is_empty() {
|
if push_key.is_empty() {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
.sending
|
.sending
|
||||||
.db
|
.db
|
||||||
.queued_requests(&Destination::Push(user_id, push_key))
|
.queued_requests(&Destination::Push(user_id.into(), push_key))
|
||||||
},
|
},
|
||||||
| (Some(_), Some(_), Some(_), Some(_)) => {
|
| (Some(_), Some(_), Some(_), Some(_)) => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. Not all of them See --help for more details.",
|
specified via arguments. Not all of them See --help for more details.",
|
||||||
);
|
));
|
||||||
},
|
},
|
||||||
| _ => {
|
| _ => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let queued_requests = results.collect::<Vec<_>>().await;
|
let queued_requests = results.collect::<Vec<_>>().await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| SendingCommand::ActiveRequestsFor {
|
| SendingCommand::ActiveRequestsFor {
|
||||||
appservice_id,
|
appservice_id,
|
||||||
|
@ -160,20 +166,20 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -
|
||||||
&& user_id.is_none()
|
&& user_id.is_none()
|
||||||
&& push_key.is_none()
|
&& push_key.is_none()
|
||||||
{
|
{
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results = match (appservice_id, server_name, user_id, push_key) {
|
let results = match (appservice_id, server_name, user_id, push_key) {
|
||||||
| (Some(appservice_id), None, None, None) => {
|
| (Some(appservice_id), None, None, None) => {
|
||||||
if appservice_id.is_empty() {
|
if appservice_id.is_empty() {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
|
@ -184,53 +190,49 @@ pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -
|
||||||
| (None, Some(server_name), None, None) => services
|
| (None, Some(server_name), None, None) => services
|
||||||
.sending
|
.sending
|
||||||
.db
|
.db
|
||||||
.active_requests_for(&Destination::Federation(server_name)),
|
.active_requests_for(&Destination::Federation(server_name.into())),
|
||||||
| (None, None, Some(user_id), Some(push_key)) => {
|
| (None, None, Some(user_id), Some(push_key)) => {
|
||||||
if push_key.is_empty() {
|
if push_key.is_empty() {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services
|
services
|
||||||
.sending
|
.sending
|
||||||
.db
|
.db
|
||||||
.active_requests_for(&Destination::Push(user_id, push_key))
|
.active_requests_for(&Destination::Push(user_id.into(), push_key))
|
||||||
},
|
},
|
||||||
| (Some(_), Some(_), Some(_), Some(_)) => {
|
| (Some(_), Some(_), Some(_), Some(_)) => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. Not all of them See --help for more details.",
|
specified via arguments. Not all of them See --help for more details.",
|
||||||
);
|
));
|
||||||
},
|
},
|
||||||
| _ => {
|
| _ => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"An appservice ID, server name, or a user ID with push key must be \
|
"An appservice ID, server name, or a user ID with push key must be \
|
||||||
specified via arguments. See --help for more details.",
|
specified via arguments. See --help for more details.",
|
||||||
);
|
));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let active_requests = results.collect::<Vec<_>>().await;
|
let active_requests = results.collect::<Vec<_>>().await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
| SendingCommand::GetLatestEduCount { server_name } => {
|
| SendingCommand::GetLatestEduCount { server_name } => {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let results = services.sending.db.get_latest_educount(&server_name).await;
|
let results = services.sending.db.get_latest_educount(&server_name).await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
context
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.write_str(&format!(
|
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
||||||
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
|
)))
|
||||||
))
|
|
||||||
.await
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::{OwnedEventId, OwnedRoomOrAliasId};
|
use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
|
@ -18,7 +18,10 @@ pub(crate) enum ShortCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result {
|
pub(super) async fn short_event_id(
|
||||||
|
&self,
|
||||||
|
event_id: OwnedEventId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let shortid = self
|
let shortid = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -26,14 +29,17 @@ pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result {
|
||||||
.get_shorteventid(&event_id)
|
.get_shorteventid(&event_id)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!("{shortid:#?}")).await
|
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result {
|
pub(super) async fn short_room_id(
|
||||||
|
&self,
|
||||||
|
room_id: OwnedRoomOrAliasId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||||
|
|
||||||
let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?;
|
let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?;
|
||||||
|
|
||||||
self.write_str(&format!("{shortid:#?}")).await
|
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use futures::stream::StreamExt;
|
use futures::stream::StreamExt;
|
||||||
use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId};
|
use ruma::{
|
||||||
|
OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
|
@ -97,7 +99,11 @@ pub(crate) enum UsersCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result {
|
async fn get_shared_rooms(
|
||||||
|
&self,
|
||||||
|
user_a: OwnedUserId,
|
||||||
|
user_b: OwnedUserId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result: Vec<_> = self
|
let result: Vec<_> = self
|
||||||
.services
|
.services
|
||||||
|
@ -109,8 +115,9 @@ async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Re
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -120,7 +127,7 @@ async fn get_backup_session(
|
||||||
version: String,
|
version: String,
|
||||||
room_id: OwnedRoomId,
|
room_id: OwnedRoomId,
|
||||||
session_id: String,
|
session_id: String,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -129,8 +136,9 @@ async fn get_backup_session(
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -139,7 +147,7 @@ async fn get_room_backups(
|
||||||
user_id: OwnedUserId,
|
user_id: OwnedUserId,
|
||||||
version: String,
|
version: String,
|
||||||
room_id: OwnedRoomId,
|
room_id: OwnedRoomId,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -148,22 +156,32 @@ async fn get_room_backups(
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result {
|
async fn get_all_backups(
|
||||||
|
&self,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
version: String,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self.services.key_backups.get_all(&user_id, &version).await;
|
let result = self.services.key_backups.get_all(&user_id, &version).await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result {
|
async fn get_backup_algorithm(
|
||||||
|
&self,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
version: String,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -172,12 +190,16 @@ async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> R
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result {
|
async fn get_latest_backup_version(
|
||||||
|
&self,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -186,33 +208,36 @@ async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result {
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result {
|
async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self.services.key_backups.get_latest_backup(&user_id).await;
|
let result = self.services.key_backups.get_latest_backup(&user_id).await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn iter_users(&self) -> Result {
|
async fn iter_users(&self) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result: Vec<OwnedUserId> = self.services.users.stream().map(Into::into).collect().await;
|
let result: Vec<OwnedUserId> = self.services.users.stream().map(Into::into).collect().await;
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn iter_users2(&self) -> Result {
|
async fn iter_users2(&self) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result: Vec<_> = self.services.users.stream().collect().await;
|
let result: Vec<_> = self.services.users.stream().collect().await;
|
||||||
let result: Vec<_> = result
|
let result: Vec<_> = result
|
||||||
|
@ -223,32 +248,35 @@ async fn iter_users2(&self) -> Result {
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn count_users(&self) -> Result {
|
async fn count_users(&self) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self.services.users.count().await;
|
let result = self.services.users.count().await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn password_hash(&self, user_id: OwnedUserId) -> Result {
|
async fn password_hash(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self.services.users.password_hash(&user_id).await;
|
let result = self.services.users.password_hash(&user_id).await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn list_devices(&self, user_id: OwnedUserId) -> Result {
|
async fn list_devices(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let devices = self
|
let devices = self
|
||||||
.services
|
.services
|
||||||
|
@ -260,12 +288,13 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result {
|
||||||
|
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result {
|
async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let devices = self
|
let devices = self
|
||||||
.services
|
.services
|
||||||
|
@ -275,12 +304,17 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result {
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
async fn get_device_metadata(
|
||||||
|
&self,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
device_id: OwnedDeviceId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let device = self
|
let device = self
|
||||||
.services
|
.services
|
||||||
|
@ -289,22 +323,28 @@ async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDevice
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_devices_version(&self, user_id: OwnedUserId) -> Result {
|
async fn get_devices_version(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let device = self.services.users.get_devicelist_version(&user_id).await;
|
let device = self.services.users.get_devicelist_version(&user_id).await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
async fn count_one_time_keys(
|
||||||
|
&self,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
device_id: OwnedDeviceId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -313,12 +353,17 @@ async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDevice
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
async fn get_device_keys(
|
||||||
|
&self,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
device_id: OwnedDeviceId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -327,22 +372,24 @@ async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId)
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result {
|
async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self.services.users.get_user_signing_key(&user_id).await;
|
let result = self.services.users.get_user_signing_key(&user_id).await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_master_key(&self, user_id: OwnedUserId) -> Result {
|
async fn get_master_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -351,12 +398,17 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result {
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
|
async fn get_to_device_events(
|
||||||
|
&self,
|
||||||
|
user_id: OwnedUserId,
|
||||||
|
device_id: OwnedDeviceId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let timer = tokio::time::Instant::now();
|
let timer = tokio::time::Instant::now();
|
||||||
let result = self
|
let result = self
|
||||||
.services
|
.services
|
||||||
|
@ -366,6 +418,7 @@ async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDevic
|
||||||
.await;
|
.await;
|
||||||
let query_time = timer.elapsed();
|
let query_time = timer.elapsed();
|
||||||
|
|
||||||
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,13 @@
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomAliasId, OwnedRoomId};
|
use ruma::{
|
||||||
|
OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::Context;
|
use crate::{Command, escape_html};
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub(crate) enum RoomAliasCommand {
|
pub(crate) enum RoomAliasCommand {
|
||||||
|
@ -16,7 +18,7 @@ pub(crate) enum RoomAliasCommand {
|
||||||
force: bool,
|
force: bool,
|
||||||
|
|
||||||
/// The room id to set the alias on
|
/// The room id to set the alias on
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
|
|
||||||
/// The alias localpart to use (`alias`, not `#alias:servername.tld`)
|
/// The alias localpart to use (`alias`, not `#alias:servername.tld`)
|
||||||
room_alias_localpart: String,
|
room_alias_localpart: String,
|
||||||
|
@ -38,11 +40,21 @@ pub(crate) enum RoomAliasCommand {
|
||||||
/// - List aliases currently being used
|
/// - List aliases currently being used
|
||||||
List {
|
List {
|
||||||
/// If set, only list the aliases for this room
|
/// If set, only list the aliases for this room
|
||||||
room_id: Option<OwnedRoomId>,
|
room_id: Option<Box<RoomId>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result {
|
||||||
|
let c = reprocess(command, context).await?;
|
||||||
|
context.write_str(c.body()).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn reprocess(
|
||||||
|
command: RoomAliasCommand,
|
||||||
|
context: &Command<'_>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
let server_user = &services.globals.server_user;
|
let server_user = &services.globals.server_user;
|
||||||
|
|
||||||
|
@ -55,7 +67,9 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
|
||||||
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) {
|
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) {
|
||||||
| Ok(alias) => alias,
|
| Ok(alias) => alias,
|
||||||
| Err(err) => {
|
| Err(err) => {
|
||||||
return Err!("Failed to parse alias: {err}");
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to parse alias: {err}"
|
||||||
|
)));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
match command {
|
match command {
|
||||||
|
@ -67,50 +81,60 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
|
||||||
&room_id,
|
&room_id,
|
||||||
server_user,
|
server_user,
|
||||||
) {
|
) {
|
||||||
| Err(err) => Err!("Failed to remove alias: {err}"),
|
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
| Ok(()) =>
|
"Successfully overwrote alias (formerly {id})"
|
||||||
context
|
))),
|
||||||
.write_str(&format!(
|
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Successfully overwrote alias (formerly {id})"
|
"Failed to remove alias: {err}"
|
||||||
))
|
))),
|
||||||
.await,
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
| (false, Ok(id)) => Err!(
|
| (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Refusing to overwrite in use alias for {id}, use -f or --force to \
|
"Refusing to overwrite in use alias for {id}, use -f or --force to \
|
||||||
overwrite"
|
overwrite"
|
||||||
),
|
))),
|
||||||
| (_, Err(_)) => {
|
| (_, Err(_)) => {
|
||||||
match services.rooms.alias.set_alias(
|
match services.rooms.alias.set_alias(
|
||||||
&room_alias,
|
&room_alias,
|
||||||
&room_id,
|
&room_id,
|
||||||
server_user,
|
server_user,
|
||||||
) {
|
) {
|
||||||
| Err(err) => Err!("Failed to remove alias: {err}"),
|
| Ok(()) => Ok(RoomMessageEventContent::text_plain(
|
||||||
| Ok(()) => context.write_str("Successfully set alias").await,
|
"Successfully set alias",
|
||||||
|
)),
|
||||||
|
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to remove alias: {err}"
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
| RoomAliasCommand::Remove { .. } => {
|
| RoomAliasCommand::Remove { .. } => {
|
||||||
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
||||||
| Err(_) => Err!("Alias isn't in use."),
|
|
||||||
| Ok(id) => match services
|
| Ok(id) => match services
|
||||||
.rooms
|
.rooms
|
||||||
.alias
|
.alias
|
||||||
.remove_alias(&room_alias, server_user)
|
.remove_alias(&room_alias, server_user)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Err(err) => Err!("Failed to remove alias: {err}"),
|
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
| Ok(()) =>
|
"Removed alias from {id}"
|
||||||
context.write_str(&format!("Removed alias from {id}")).await,
|
))),
|
||||||
|
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to remove alias: {err}"
|
||||||
|
))),
|
||||||
},
|
},
|
||||||
|
| Err(_) =>
|
||||||
|
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
| RoomAliasCommand::Which { .. } => {
|
| RoomAliasCommand::Which { .. } => {
|
||||||
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
match services.rooms.alias.resolve_local_alias(&room_alias).await {
|
||||||
| Err(_) => Err!("Alias isn't in use."),
|
| Ok(id) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
| Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await,
|
"Alias resolves to {id}"
|
||||||
|
))),
|
||||||
|
| Err(_) =>
|
||||||
|
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
| RoomAliasCommand::List { .. } => unreachable!(),
|
| RoomAliasCommand::List { .. } => unreachable!(),
|
||||||
|
@ -132,8 +156,15 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
|
||||||
output
|
output
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let html_list = aliases.iter().fold(String::new(), |mut output, alias| {
|
||||||
|
writeln!(output, "<li>{}</li>", escape_html(alias.as_ref()))
|
||||||
|
.expect("should be able to write to string buffer");
|
||||||
|
output
|
||||||
|
});
|
||||||
|
|
||||||
let plain = format!("Aliases for {room_id}:\n{plain_list}");
|
let plain = format!("Aliases for {room_id}:\n{plain_list}");
|
||||||
context.write_str(&plain).await
|
let html = format!("Aliases for {room_id}:\n<ul>{html_list}</ul>");
|
||||||
|
Ok(RoomMessageEventContent::text_html(plain, html))
|
||||||
} else {
|
} else {
|
||||||
let aliases = services
|
let aliases = services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -152,8 +183,23 @@ pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) ->
|
||||||
output
|
output
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let html_list = aliases
|
||||||
|
.iter()
|
||||||
|
.fold(String::new(), |mut output, (alias, id)| {
|
||||||
|
writeln!(
|
||||||
|
output,
|
||||||
|
"<li><code>{}</code> -> #{}:{}</li>",
|
||||||
|
escape_html(alias.as_ref()),
|
||||||
|
escape_html(id),
|
||||||
|
server_name
|
||||||
|
)
|
||||||
|
.expect("should be able to write to string buffer");
|
||||||
|
output
|
||||||
|
});
|
||||||
|
|
||||||
let plain = format!("Aliases:\n{plain_list}");
|
let plain = format!("Aliases:\n{plain_list}");
|
||||||
context.write_str(&plain).await
|
let html = format!("Aliases:\n<ul>{html_list}</ul>");
|
||||||
|
Ok(RoomMessageEventContent::text_html(plain, html))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::OwnedRoomId;
|
use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::{PAGE_SIZE, admin_command, get_room_info};
|
use crate::{PAGE_SIZE, admin_command, get_room_info};
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ pub(super) async fn list_rooms(
|
||||||
exclude_disabled: bool,
|
exclude_disabled: bool,
|
||||||
exclude_banned: bool,
|
exclude_banned: bool,
|
||||||
no_details: bool,
|
no_details: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
// TODO: i know there's a way to do this with clap, but i can't seem to find it
|
// TODO: i know there's a way to do this with clap, but i can't seem to find it
|
||||||
let page = page.unwrap_or(1);
|
let page = page.unwrap_or(1);
|
||||||
let mut rooms = self
|
let mut rooms = self
|
||||||
|
@ -41,28 +41,29 @@ pub(super) async fn list_rooms(
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
if rooms.is_empty() {
|
if rooms.is_empty() {
|
||||||
return Err!("No more rooms.");
|
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let body = rooms
|
let output_plain = format!(
|
||||||
.iter()
|
"Rooms ({}):\n```\n{}\n```",
|
||||||
.map(|(id, members, name)| {
|
rooms.len(),
|
||||||
if no_details {
|
rooms
|
||||||
|
.iter()
|
||||||
|
.map(|(id, members, name)| if no_details {
|
||||||
format!("{id}")
|
format!("{id}")
|
||||||
} else {
|
} else {
|
||||||
format!("{id}\tMembers: {members}\tName: {name}")
|
format!("{id}\tMembers: {members}\tName: {name}")
|
||||||
}
|
})
|
||||||
})
|
.collect::<Vec<_>>()
|
||||||
.collect::<Vec<_>>()
|
.join("\n")
|
||||||
.join("\n");
|
);
|
||||||
|
|
||||||
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),))
|
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result {
|
pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result<RoomMessageEventContent> {
|
||||||
let result = self.services.rooms.metadata.exists(&room_id).await;
|
let result = self.services.rooms.metadata.exists(&room_id).await;
|
||||||
|
|
||||||
self.write_str(&format!("{result}")).await
|
Ok(RoomMessageEventContent::notice_markdown(format!("{result}")))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,22 +1,22 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{Err, Result};
|
use conduwuit::Result;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::OwnedRoomId;
|
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::{Context, PAGE_SIZE, get_room_info};
|
use crate::{Command, PAGE_SIZE, get_room_info};
|
||||||
|
|
||||||
#[derive(Debug, Subcommand)]
|
#[derive(Debug, Subcommand)]
|
||||||
pub(crate) enum RoomDirectoryCommand {
|
pub(crate) enum RoomDirectoryCommand {
|
||||||
/// - Publish a room to the room directory
|
/// - Publish a room to the room directory
|
||||||
Publish {
|
Publish {
|
||||||
/// The room id of the room to publish
|
/// The room id of the room to publish
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Unpublish a room to the room directory
|
/// - Unpublish a room to the room directory
|
||||||
Unpublish {
|
Unpublish {
|
||||||
/// The room id of the room to unpublish
|
/// The room id of the room to unpublish
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - List rooms that are published
|
/// - List rooms that are published
|
||||||
|
@ -25,16 +25,25 @@ pub(crate) enum RoomDirectoryCommand {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result {
|
pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result {
|
||||||
|
let c = reprocess(command, context).await?;
|
||||||
|
context.write_str(c.body()).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn reprocess(
|
||||||
|
command: RoomDirectoryCommand,
|
||||||
|
context: &Command<'_>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let services = context.services;
|
let services = context.services;
|
||||||
match command {
|
match command {
|
||||||
| RoomDirectoryCommand::Publish { room_id } => {
|
| RoomDirectoryCommand::Publish { room_id } => {
|
||||||
services.rooms.directory.set_public(&room_id);
|
services.rooms.directory.set_public(&room_id);
|
||||||
context.write_str("Room published").await
|
Ok(RoomMessageEventContent::notice_plain("Room published"))
|
||||||
},
|
},
|
||||||
| RoomDirectoryCommand::Unpublish { room_id } => {
|
| RoomDirectoryCommand::Unpublish { room_id } => {
|
||||||
services.rooms.directory.set_not_public(&room_id);
|
services.rooms.directory.set_not_public(&room_id);
|
||||||
context.write_str("Room unpublished").await
|
Ok(RoomMessageEventContent::notice_plain("Room unpublished"))
|
||||||
},
|
},
|
||||||
| RoomDirectoryCommand::List { page } => {
|
| RoomDirectoryCommand::List { page } => {
|
||||||
// TODO: i know there's a way to do this with clap, but i can't seem to find it
|
// TODO: i know there's a way to do this with clap, but i can't seem to find it
|
||||||
|
@ -57,18 +66,20 @@ pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if rooms.is_empty() {
|
if rooms.is_empty() {
|
||||||
return Err!("No more rooms.");
|
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let body = rooms
|
let output = format!(
|
||||||
.iter()
|
"Rooms (page {page}):\n```\n{}\n```",
|
||||||
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
|
rooms
|
||||||
.collect::<Vec<_>>()
|
.iter()
|
||||||
.join("\n");
|
.map(|(id, members, name)| format!(
|
||||||
|
"{id} | Members: {members} | Name: {name}"
|
||||||
context
|
))
|
||||||
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",))
|
.collect::<Vec<_>>()
|
||||||
.await
|
.join("\n")
|
||||||
|
);
|
||||||
|
Ok(RoomMessageEventContent::text_markdown(output))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{Err, Result, utils::ReadyExt};
|
use conduwuit::{Result, utils::ReadyExt};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::OwnedRoomId;
|
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch};
|
use crate::{admin_command, admin_command_dispatch};
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ use crate::{admin_command, admin_command_dispatch};
|
||||||
pub(crate) enum RoomInfoCommand {
|
pub(crate) enum RoomInfoCommand {
|
||||||
/// - List joined members in a room
|
/// - List joined members in a room
|
||||||
ListJoinedMembers {
|
ListJoinedMembers {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
|
|
||||||
/// Lists only our local users in the specified room
|
/// Lists only our local users in the specified room
|
||||||
#[arg(long)]
|
#[arg(long)]
|
||||||
|
@ -22,12 +22,16 @@ pub(crate) enum RoomInfoCommand {
|
||||||
/// Room topics can be huge, so this is in its
|
/// Room topics can be huge, so this is in its
|
||||||
/// own separate command
|
/// own separate command
|
||||||
ViewRoomTopic {
|
ViewRoomTopic {
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result {
|
async fn list_joined_members(
|
||||||
|
&self,
|
||||||
|
room_id: Box<RoomId>,
|
||||||
|
local_only: bool,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let room_name = self
|
let room_name = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -60,19 +64,22 @@ async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> R
|
||||||
.collect()
|
.collect()
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let num = member_info.len();
|
let output_plain = format!(
|
||||||
let body = member_info
|
"{} Members in Room \"{}\":\n```\n{}\n```",
|
||||||
.into_iter()
|
member_info.len(),
|
||||||
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
|
room_name,
|
||||||
.collect::<Vec<_>>()
|
member_info
|
||||||
.join("\n");
|
.into_iter()
|
||||||
|
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n")
|
||||||
|
);
|
||||||
|
|
||||||
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",))
|
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result {
|
async fn view_room_topic(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
|
||||||
let Ok(room_topic) = self
|
let Ok(room_topic) = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -80,9 +87,10 @@ async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result {
|
||||||
.get_room_topic(&room_id)
|
.get_room_topic(&room_id)
|
||||||
.await
|
.await
|
||||||
else {
|
else {
|
||||||
return Err!("Room does not have a room topic set.");
|
return Ok(RoomMessageEventContent::text_plain("Room does not have a room topic set."));
|
||||||
};
|
};
|
||||||
|
|
||||||
self.write_str(&format!("Room topic:\n```\n{room_topic}\n```"))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"Room topic:\n```\n{room_topic}\n```"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
use api::client::leave_room;
|
use api::client::leave_room;
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug,
|
Result, debug,
|
||||||
utils::{IterStream, ReadyExt},
|
utils::{IterStream, ReadyExt},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
|
use ruma::{
|
||||||
|
OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId,
|
||||||
|
events::room::message::RoomMessageEventContent,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
use crate::{admin_command, admin_command_dispatch, get_room_info};
|
||||||
|
|
||||||
|
@ -21,7 +24,7 @@ pub(crate) enum RoomModerationCommand {
|
||||||
BanRoom {
|
BanRoom {
|
||||||
/// The room in the format of `!roomid:example.com` or a room alias in
|
/// The room in the format of `!roomid:example.com` or a room alias in
|
||||||
/// the format of `#roomalias:example.com`
|
/// the format of `#roomalias:example.com`
|
||||||
room: OwnedRoomOrAliasId,
|
room: Box<RoomOrAliasId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Bans a list of rooms (room IDs and room aliases) from a newline
|
/// - Bans a list of rooms (room IDs and room aliases) from a newline
|
||||||
|
@ -33,7 +36,7 @@ pub(crate) enum RoomModerationCommand {
|
||||||
UnbanRoom {
|
UnbanRoom {
|
||||||
/// The room in the format of `!roomid:example.com` or a room alias in
|
/// The room in the format of `!roomid:example.com` or a room alias in
|
||||||
/// the format of `#roomalias:example.com`
|
/// the format of `#roomalias:example.com`
|
||||||
room: OwnedRoomOrAliasId,
|
room: Box<RoomOrAliasId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - List of all rooms we have banned
|
/// - List of all rooms we have banned
|
||||||
|
@ -46,14 +49,14 @@ pub(crate) enum RoomModerationCommand {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
|
||||||
debug!("Got room alias or ID: {}", room);
|
debug!("Got room alias or ID: {}", room);
|
||||||
|
|
||||||
let admin_room_alias = &self.services.globals.admin_alias;
|
let admin_room_alias = &self.services.globals.admin_alias;
|
||||||
|
|
||||||
if let Ok(admin_room_id) = self.services.admin.get_admin_room().await {
|
if let Ok(admin_room_id) = self.services.admin.get_admin_room().await {
|
||||||
if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) {
|
if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) {
|
||||||
return Err!("Not allowed to ban the admin room.");
|
return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room."));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,11 +64,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
let room_id = match RoomId::parse(&room) {
|
let room_id = match RoomId::parse(&room) {
|
||||||
| Ok(room_id) => room_id,
|
| Ok(room_id) => room_id,
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||||
(`#roomalias:example.com`): {e}"
|
(`#roomalias:example.com`): {e}"
|
||||||
);
|
)));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,11 +80,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
let room_alias = match RoomAliasId::parse(&room) {
|
let room_alias = match RoomAliasId::parse(&room) {
|
||||||
| Ok(room_alias) => room_alias,
|
| Ok(room_alias) => room_alias,
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||||
(`#roomalias:example.com`): {e}"
|
(`#roomalias:example.com`): {e}"
|
||||||
);
|
)));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -120,9 +123,9 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
room_id
|
room_id
|
||||||
},
|
},
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::notice_plain(format!(
|
||||||
"Failed to resolve room alias {room_alias} to a room ID: {e}"
|
"Failed to resolve room alias {room_alias} to a room ID: {e}"
|
||||||
);
|
)));
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -132,11 +135,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
|
|
||||||
room_id
|
room_id
|
||||||
} else {
|
} else {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Room specified is not a room ID or room alias. Please note that this requires a \
|
"Room specified is not a room ID or room alias. Please note that this requires a \
|
||||||
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||||
(`#roomalias:example.com`)",
|
(`#roomalias:example.com`)",
|
||||||
);
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!("Making all users leave the room {room_id} and forgetting it");
|
debug!("Making all users leave the room {room_id} and forgetting it");
|
||||||
|
@ -182,19 +185,20 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
|
|
||||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||||
|
|
||||||
self.write_str(
|
Ok(RoomMessageEventContent::text_plain(
|
||||||
"Room banned, removed all our local users, and disabled incoming federation with room.",
|
"Room banned, removed all our local users, and disabled incoming federation with room.",
|
||||||
)
|
))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn ban_list_of_rooms(&self) -> Result {
|
async fn ban_list_of_rooms(&self) -> Result<RoomMessageEventContent> {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let rooms_s = self
|
let rooms_s = self
|
||||||
|
@ -352,24 +356,23 @@ async fn ban_list_of_rooms(&self) -> Result {
|
||||||
self.services.rooms.metadata.disable_room(&room_id, true);
|
self.services.rooms.metadata.disable_room(&room_id, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \
|
"Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \
|
||||||
disabled incoming federation with the room."
|
disabled incoming federation with the room."
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
|
||||||
let room_id = if room.is_room_id() {
|
let room_id = if room.is_room_id() {
|
||||||
let room_id = match RoomId::parse(&room) {
|
let room_id = match RoomId::parse(&room) {
|
||||||
| Ok(room_id) => room_id,
|
| Ok(room_id) => room_id,
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||||
(`#roomalias:example.com`): {e}"
|
(`#roomalias:example.com`): {e}"
|
||||||
);
|
)));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -381,11 +384,11 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
let room_alias = match RoomAliasId::parse(&room) {
|
let room_alias = match RoomAliasId::parse(&room) {
|
||||||
| Ok(room_alias) => room_alias,
|
| Ok(room_alias) => room_alias,
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Failed to parse room ID {room}. Please note that this requires a full room \
|
"Failed to parse room ID {room}. Please note that this requires a full room \
|
||||||
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||||
(`#roomalias:example.com`): {e}"
|
(`#roomalias:example.com`): {e}"
|
||||||
);
|
)));
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -424,7 +427,9 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
room_id
|
room_id
|
||||||
},
|
},
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
return Err!("Failed to resolve room alias {room} to a room ID: {e}");
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to resolve room alias {room} to a room ID: {e}"
|
||||||
|
)));
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -434,20 +439,19 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
|
||||||
|
|
||||||
room_id
|
room_id
|
||||||
} else {
|
} else {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Room specified is not a room ID or room alias. Please note that this requires a \
|
"Room specified is not a room ID or room alias. Please note that this requires a \
|
||||||
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
|
||||||
(`#roomalias:example.com`)",
|
(`#roomalias:example.com`)",
|
||||||
);
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
self.services.rooms.metadata.disable_room(&room_id, false);
|
self.services.rooms.metadata.disable_room(&room_id, false);
|
||||||
self.write_str("Room unbanned and federation re-enabled.")
|
Ok(RoomMessageEventContent::text_plain("Room unbanned and federation re-enabled."))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
async fn list_banned_rooms(&self, no_details: bool) -> Result {
|
async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventContent> {
|
||||||
let room_ids: Vec<OwnedRoomId> = self
|
let room_ids: Vec<OwnedRoomId> = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -458,7 +462,7 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result {
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if room_ids.is_empty() {
|
if room_ids.is_empty() {
|
||||||
return Err!("No rooms are banned.");
|
return Ok(RoomMessageEventContent::text_plain("No rooms are banned."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut rooms = room_ids
|
let mut rooms = room_ids
|
||||||
|
@ -471,20 +475,19 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result {
|
||||||
rooms.sort_by_key(|r| r.1);
|
rooms.sort_by_key(|r| r.1);
|
||||||
rooms.reverse();
|
rooms.reverse();
|
||||||
|
|
||||||
let num = rooms.len();
|
let output_plain = format!(
|
||||||
|
"Rooms Banned ({}):\n```\n{}\n```",
|
||||||
let body = rooms
|
rooms.len(),
|
||||||
.iter()
|
rooms
|
||||||
.map(|(id, members, name)| {
|
.iter()
|
||||||
if no_details {
|
.map(|(id, members, name)| if no_details {
|
||||||
format!("{id}")
|
format!("{id}")
|
||||||
} else {
|
} else {
|
||||||
format!("{id}\tMembers: {members}\tName: {name}")
|
format!("{id}\tMembers: {members}\tName: {name}")
|
||||||
}
|
})
|
||||||
})
|
.collect::<Vec<_>>()
|
||||||
.collect::<Vec<_>>()
|
.join("\n")
|
||||||
.join("\n");
|
);
|
||||||
|
|
||||||
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",))
|
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,16 +1,12 @@
|
||||||
use std::{fmt::Write, path::PathBuf, sync::Arc};
|
use std::{fmt::Write, path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
use conduwuit::{
|
use conduwuit::{Err, Result, info, utils::time, warn};
|
||||||
Err, Result, info,
|
use ruma::events::room::message::RoomMessageEventContent;
|
||||||
utils::{stream::IterStream, time},
|
|
||||||
warn,
|
|
||||||
};
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
|
|
||||||
use crate::admin_command;
|
use crate::admin_command;
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn uptime(&self) -> Result {
|
pub(super) async fn uptime(&self) -> Result<RoomMessageEventContent> {
|
||||||
let elapsed = self
|
let elapsed = self
|
||||||
.services
|
.services
|
||||||
.server
|
.server
|
||||||
|
@ -19,36 +15,47 @@ pub(super) async fn uptime(&self) -> Result {
|
||||||
.expect("standard duration");
|
.expect("standard duration");
|
||||||
|
|
||||||
let result = time::pretty(elapsed);
|
let result = time::pretty(elapsed);
|
||||||
self.write_str(&format!("{result}.")).await
|
Ok(RoomMessageEventContent::notice_plain(format!("{result}.")))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn show_config(&self) -> Result {
|
pub(super) async fn show_config(&self) -> Result<RoomMessageEventContent> {
|
||||||
self.write_str(&format!("{}", *self.services.server.config))
|
// Construct and send the response
|
||||||
.await
|
Ok(RoomMessageEventContent::text_markdown(format!(
|
||||||
|
"{}",
|
||||||
|
*self.services.server.config
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn reload_config(&self, path: Option<PathBuf>) -> Result {
|
pub(super) async fn reload_config(
|
||||||
|
&self,
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let path = path.as_deref().into_iter();
|
let path = path.as_deref().into_iter();
|
||||||
self.services.config.reload(path)?;
|
self.services.config.reload(path)?;
|
||||||
|
|
||||||
self.write_str("Successfully reconfigured.").await
|
Ok(RoomMessageEventContent::text_plain("Successfully reconfigured."))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result {
|
pub(super) async fn list_features(
|
||||||
|
&self,
|
||||||
|
available: bool,
|
||||||
|
enabled: bool,
|
||||||
|
comma: bool,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let delim = if comma { "," } else { " " };
|
let delim = if comma { "," } else { " " };
|
||||||
if enabled && !available {
|
if enabled && !available {
|
||||||
let features = info::rustc::features().join(delim);
|
let features = info::rustc::features().join(delim);
|
||||||
let out = format!("`\n{features}\n`");
|
let out = format!("`\n{features}\n`");
|
||||||
return self.write_str(&out).await;
|
return Ok(RoomMessageEventContent::text_markdown(out));
|
||||||
}
|
}
|
||||||
|
|
||||||
if available && !enabled {
|
if available && !enabled {
|
||||||
let features = info::cargo::features().join(delim);
|
let features = info::cargo::features().join(delim);
|
||||||
let out = format!("`\n{features}\n`");
|
let out = format!("`\n{features}\n`");
|
||||||
return self.write_str(&out).await;
|
return Ok(RoomMessageEventContent::text_markdown(out));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut features = String::new();
|
let mut features = String::new();
|
||||||
|
@ -61,76 +68,77 @@ pub(super) async fn list_features(&self, available: bool, enabled: bool, comma:
|
||||||
writeln!(features, "{emoji} {feature} {remark}")?;
|
writeln!(features, "{emoji} {feature} {remark}")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&features).await
|
Ok(RoomMessageEventContent::text_markdown(features))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn memory_usage(&self) -> Result {
|
pub(super) async fn memory_usage(&self) -> Result<RoomMessageEventContent> {
|
||||||
let services_usage = self.services.memory_usage().await?;
|
let services_usage = self.services.memory_usage().await?;
|
||||||
let database_usage = self.services.db.db.memory_usage()?;
|
let database_usage = self.services.db.db.memory_usage()?;
|
||||||
let allocator_usage =
|
let allocator_usage =
|
||||||
conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}"));
|
conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}"));
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}",
|
"Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}",
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn clear_caches(&self) -> Result {
|
pub(super) async fn clear_caches(&self) -> Result<RoomMessageEventContent> {
|
||||||
self.services.clear_cache().await;
|
self.services.clear_cache().await;
|
||||||
|
|
||||||
self.write_str("Done.").await
|
Ok(RoomMessageEventContent::text_plain("Done."))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn list_backups(&self) -> Result {
|
pub(super) async fn list_backups(&self) -> Result<RoomMessageEventContent> {
|
||||||
self.services
|
let result = self.services.db.db.backup_list()?;
|
||||||
.db
|
|
||||||
.db
|
if result.is_empty() {
|
||||||
.backup_list()?
|
Ok(RoomMessageEventContent::text_plain("No backups found."))
|
||||||
.try_stream()
|
} else {
|
||||||
.try_for_each(|result| write!(self, "{result}"))
|
Ok(RoomMessageEventContent::text_plain(result))
|
||||||
.await
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn backup_database(&self) -> Result {
|
pub(super) async fn backup_database(&self) -> Result<RoomMessageEventContent> {
|
||||||
let db = Arc::clone(&self.services.db);
|
let db = Arc::clone(&self.services.db);
|
||||||
let result = self
|
let mut result = self
|
||||||
.services
|
.services
|
||||||
.server
|
.server
|
||||||
.runtime()
|
.runtime()
|
||||||
.spawn_blocking(move || match db.db.backup() {
|
.spawn_blocking(move || match db.db.backup() {
|
||||||
| Ok(()) => "Done".to_owned(),
|
| Ok(()) => String::new(),
|
||||||
| Err(e) => format!("Failed: {e}"),
|
| Err(e) => e.to_string(),
|
||||||
})
|
})
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let count = self.services.db.db.backup_count()?;
|
if result.is_empty() {
|
||||||
self.write_str(&format!("{result}. Currently have {count} backups."))
|
result = self.services.db.db.backup_list()?;
|
||||||
.await
|
}
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::notice_markdown(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result {
|
pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result<RoomMessageEventContent> {
|
||||||
let message = message.join(" ");
|
let message = message.join(" ");
|
||||||
self.services.admin.send_text(&message).await;
|
self.services.admin.send_text(&message).await;
|
||||||
|
|
||||||
self.write_str("Notice was sent to #admins").await
|
Ok(RoomMessageEventContent::notice_plain("Notice was sent to #admins"))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn reload_mods(&self) -> Result {
|
pub(super) async fn reload_mods(&self) -> Result<RoomMessageEventContent> {
|
||||||
self.services.server.reload()?;
|
self.services.server.reload()?;
|
||||||
|
|
||||||
self.write_str("Reloading server...").await
|
Ok(RoomMessageEventContent::notice_plain("Reloading server..."))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
pub(super) async fn restart(&self, force: bool) -> Result {
|
pub(super) async fn restart(&self, force: bool) -> Result<RoomMessageEventContent> {
|
||||||
use conduwuit::utils::sys::current_exe_deleted;
|
use conduwuit::utils::sys::current_exe_deleted;
|
||||||
|
|
||||||
if !force && current_exe_deleted() {
|
if !force && current_exe_deleted() {
|
||||||
|
@ -142,13 +150,13 @@ pub(super) async fn restart(&self, force: bool) -> Result {
|
||||||
|
|
||||||
self.services.server.restart()?;
|
self.services.server.restart()?;
|
||||||
|
|
||||||
self.write_str("Restarting server...").await
|
Ok(RoomMessageEventContent::notice_plain("Restarting server..."))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn shutdown(&self) -> Result {
|
pub(super) async fn shutdown(&self) -> Result<RoomMessageEventContent> {
|
||||||
warn!("shutdown command");
|
warn!("shutdown command");
|
||||||
self.services.server.shutdown()?;
|
self.services.server.shutdown()?;
|
||||||
|
|
||||||
self.write_str("Shutting down server...").await
|
Ok(RoomMessageEventContent::notice_plain("Shutting down server..."))
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ pub(super) enum ServerCommand {
|
||||||
/// - Print database memory usage statistics
|
/// - Print database memory usage statistics
|
||||||
MemoryUsage,
|
MemoryUsage,
|
||||||
|
|
||||||
/// - Clears all of Continuwuity's caches
|
/// - Clears all of Conduwuit's caches
|
||||||
ClearCaches,
|
ClearCaches,
|
||||||
|
|
||||||
/// - Performs an online backup of the database (only available for RocksDB
|
/// - Performs an online backup of the database (only available for RocksDB
|
||||||
|
|
|
@ -2,18 +2,18 @@ use std::{collections::BTreeMap, fmt::Write as _};
|
||||||
|
|
||||||
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
|
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
PduBuilder, Result, debug, debug_warn, error, info, is_equal_to,
|
||||||
matrix::pdu::PduBuilder,
|
|
||||||
utils::{self, ReadyExt},
|
utils::{self, ReadyExt},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
|
EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId,
|
||||||
events::{
|
events::{
|
||||||
RoomAccountDataEventType, StateEventType,
|
RoomAccountDataEventType, StateEventType,
|
||||||
room::{
|
room::{
|
||||||
|
message::RoomMessageEventContent,
|
||||||
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
|
||||||
redaction::RoomRedactionEventContent,
|
redaction::RoomRedactionEventContent,
|
||||||
},
|
},
|
||||||
|
@ -30,7 +30,7 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25;
|
||||||
const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin.";
|
const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin.";
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn list_users(&self) -> Result {
|
pub(super) async fn list_users(&self) -> Result<RoomMessageEventContent> {
|
||||||
let users: Vec<_> = self
|
let users: Vec<_> = self
|
||||||
.services
|
.services
|
||||||
.users
|
.users
|
||||||
|
@ -43,22 +43,30 @@ pub(super) async fn list_users(&self) -> Result {
|
||||||
plain_msg += users.join("\n").as_str();
|
plain_msg += users.join("\n").as_str();
|
||||||
plain_msg += "\n```";
|
plain_msg += "\n```";
|
||||||
|
|
||||||
self.write_str(&plain_msg).await
|
self.write_str(plain_msg.as_str()).await?;
|
||||||
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn create_user(&self, username: String, password: Option<String>) -> Result {
|
pub(super) async fn create_user(
|
||||||
|
&self,
|
||||||
|
username: String,
|
||||||
|
password: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
// Validate user id
|
// Validate user id
|
||||||
let user_id = parse_local_user_id(self.services, &username)?;
|
let user_id = parse_local_user_id(self.services, &username)?;
|
||||||
|
|
||||||
if let Err(e) = user_id.validate_strict() {
|
if let Err(e) = user_id.validate_strict() {
|
||||||
if self.services.config.emergency_password.is_none() {
|
if self.services.config.emergency_password.is_none() {
|
||||||
return Err!("Username {user_id} contains disallowed characters or spaces: {e}");
|
return Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Username {user_id} contains disallowed characters or spaces: {e}"
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.services.users.exists(&user_id).await {
|
if self.services.users.exists(&user_id).await {
|
||||||
return Err!("User {user_id} already exists");
|
return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists")));
|
||||||
}
|
}
|
||||||
|
|
||||||
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
||||||
|
@ -80,7 +88,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||||
.new_user_displayname_suffix
|
.new_user_displayname_suffix
|
||||||
.is_empty()
|
.is_empty()
|
||||||
{
|
{
|
||||||
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?;
|
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)
|
||||||
|
.expect("should be able to write to string buffer");
|
||||||
}
|
}
|
||||||
|
|
||||||
self.services
|
self.services
|
||||||
|
@ -100,17 +109,15 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||||
content: ruma::events::push_rules::PushRulesEventContent {
|
content: ruma::events::push_rules::PushRulesEventContent {
|
||||||
global: ruma::push::Ruleset::server_default(&user_id),
|
global: ruma::push::Ruleset::server_default(&user_id),
|
||||||
},
|
},
|
||||||
})?,
|
})
|
||||||
|
.expect("to json value always works"),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if !self.services.server.config.auto_join_rooms.is_empty() {
|
if !self.services.server.config.auto_join_rooms.is_empty() {
|
||||||
for room in &self.services.server.config.auto_join_rooms {
|
for room in &self.services.server.config.auto_join_rooms {
|
||||||
let Ok(room_id) = self.services.rooms.alias.resolve(room).await else {
|
let Ok(room_id) = self.services.rooms.alias.resolve(room).await else {
|
||||||
error!(
|
error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping");
|
||||||
%user_id,
|
|
||||||
"Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"
|
|
||||||
);
|
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -146,17 +153,18 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||||
info!("Automatically joined room {room} for user {user_id}");
|
info!("Automatically joined room {room} for user {user_id}");
|
||||||
},
|
},
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
|
self.services
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed to automatically join room {room} for user {user_id}: \
|
||||||
|
{e}"
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
// don't return this error so we don't fail registrations
|
// don't return this error so we don't fail registrations
|
||||||
error!(
|
error!(
|
||||||
"Failed to automatically join room {room} for user {user_id}: {e}"
|
"Failed to automatically join room {room} for user {user_id}: {e}"
|
||||||
);
|
);
|
||||||
self.services
|
|
||||||
.admin
|
|
||||||
.send_text(&format!(
|
|
||||||
"Failed to automatically join room {room} for user {user_id}: \
|
|
||||||
{e}"
|
|
||||||
))
|
|
||||||
.await;
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -183,18 +191,25 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||||
debug!("create_user admin command called without an admin room being available");
|
debug!("create_user admin command called without an admin room being available");
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`"))
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
.await
|
"Created user with user_id: {user_id} and password: `{password}`"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result {
|
pub(super) async fn deactivate(
|
||||||
|
&self,
|
||||||
|
no_leave_rooms: bool,
|
||||||
|
user_id: String,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
// Validate user id
|
// Validate user id
|
||||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||||
|
|
||||||
// don't deactivate the server service account
|
// don't deactivate the server service account
|
||||||
if user_id == self.services.globals.server_user {
|
if user_id == self.services.globals.server_user {
|
||||||
return Err!("Not allowed to deactivate the server service account.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Not allowed to deactivate the server service account.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.services.users.deactivate_account(&user_id).await?;
|
self.services.users.deactivate_account(&user_id).await?;
|
||||||
|
@ -202,8 +217,11 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) ->
|
||||||
if !no_leave_rooms {
|
if !no_leave_rooms {
|
||||||
self.services
|
self.services
|
||||||
.admin
|
.admin
|
||||||
.send_text(&format!("Making {user_id} leave all rooms after deactivation..."))
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
.await;
|
"Making {user_id} leave all rooms after deactivation..."
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
|
|
||||||
let all_joined_rooms: Vec<OwnedRoomId> = self
|
let all_joined_rooms: Vec<OwnedRoomId> = self
|
||||||
.services
|
.services
|
||||||
|
@ -220,19 +238,24 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) ->
|
||||||
leave_all_rooms(self.services, &user_id).await;
|
leave_all_rooms(self.services, &user_id).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&format!("User {user_id} has been deactivated"))
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
.await
|
"User {user_id} has been deactivated"
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn reset_password(&self, username: String, password: Option<String>) -> Result {
|
pub(super) async fn reset_password(
|
||||||
|
&self,
|
||||||
|
username: String,
|
||||||
|
password: Option<String>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_local_user_id(self.services, &username)?;
|
let user_id = parse_local_user_id(self.services, &username)?;
|
||||||
|
|
||||||
if user_id == self.services.globals.server_user {
|
if user_id == self.services.globals.server_user {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
"Not allowed to set the password for the server account. Please use the emergency \
|
"Not allowed to set the password for the server account. Please use the emergency \
|
||||||
password config option.",
|
password config option.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
|
||||||
|
@ -242,20 +265,28 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
|
||||||
.users
|
.users
|
||||||
.set_password(&user_id, Some(new_password.as_str()))
|
.set_password(&user_id, Some(new_password.as_str()))
|
||||||
{
|
{
|
||||||
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
| Ok(()) =>
|
"Successfully reset the password for user {user_id}: `{new_password}`"
|
||||||
write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"),
|
))),
|
||||||
|
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Couldn't reset the password for user {user_id}: {e}"
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
|
pub(super) async fn deactivate_all(
|
||||||
|
&self,
|
||||||
|
no_leave_rooms: bool,
|
||||||
|
force: bool,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let usernames = self
|
let usernames = self
|
||||||
|
@ -269,23 +300,15 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
|
||||||
|
|
||||||
for username in usernames {
|
for username in usernames {
|
||||||
match parse_active_local_user_id(self.services, username).await {
|
match parse_active_local_user_id(self.services, username).await {
|
||||||
| Err(e) => {
|
|
||||||
self.services
|
|
||||||
.admin
|
|
||||||
.send_text(&format!("{username} is not a valid username, skipping over: {e}"))
|
|
||||||
.await;
|
|
||||||
|
|
||||||
continue;
|
|
||||||
},
|
|
||||||
| Ok(user_id) => {
|
| Ok(user_id) => {
|
||||||
if self.services.users.is_admin(&user_id).await && !force {
|
if self.services.users.is_admin(&user_id).await && !force {
|
||||||
self.services
|
self.services
|
||||||
.admin
|
.admin
|
||||||
.send_text(&format!(
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
"{username} is an admin and --force is not set, skipping over"
|
"{username} is an admin and --force is not set, skipping over"
|
||||||
))
|
)))
|
||||||
.await;
|
.await
|
||||||
|
.ok();
|
||||||
admins.push(username);
|
admins.push(username);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -294,16 +317,26 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
|
||||||
if user_id == self.services.globals.server_user {
|
if user_id == self.services.globals.server_user {
|
||||||
self.services
|
self.services
|
||||||
.admin
|
.admin
|
||||||
.send_text(&format!(
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
"{username} is the server service account, skipping over"
|
"{username} is the server service account, skipping over"
|
||||||
))
|
)))
|
||||||
.await;
|
.await
|
||||||
|
.ok();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
user_ids.push(user_id);
|
user_ids.push(user_id);
|
||||||
},
|
},
|
||||||
|
| Err(e) => {
|
||||||
|
self.services
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"{username} is not a valid username, skipping over: {e}"
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
|
continue;
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,12 +344,6 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
|
||||||
|
|
||||||
for user_id in user_ids {
|
for user_id in user_ids {
|
||||||
match self.services.users.deactivate_account(&user_id).await {
|
match self.services.users.deactivate_account(&user_id).await {
|
||||||
| Err(e) => {
|
|
||||||
self.services
|
|
||||||
.admin
|
|
||||||
.send_text(&format!("Failed deactivating user: {e}"))
|
|
||||||
.await;
|
|
||||||
},
|
|
||||||
| Ok(()) => {
|
| Ok(()) => {
|
||||||
deactivation_count = deactivation_count.saturating_add(1);
|
deactivation_count = deactivation_count.saturating_add(1);
|
||||||
if !no_leave_rooms {
|
if !no_leave_rooms {
|
||||||
|
@ -337,24 +364,33 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
|
||||||
leave_all_rooms(self.services, &user_id).await;
|
leave_all_rooms(self.services, &user_id).await;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
| Err(e) => {
|
||||||
|
self.services
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Failed deactivating user: {e}"
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if admins.is_empty() {
|
if admins.is_empty() {
|
||||||
write!(self, "Deactivated {deactivation_count} accounts.")
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Deactivated {deactivation_count} accounts."
|
||||||
|
)))
|
||||||
} else {
|
} else {
|
||||||
write!(
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
self,
|
|
||||||
"Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \
|
"Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \
|
||||||
--force to deactivate admin accounts",
|
--force to deactivate admin accounts",
|
||||||
admins.join(", ")
|
admins.join(", ")
|
||||||
)
|
)))
|
||||||
}
|
}
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
|
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result<RoomMessageEventContent> {
|
||||||
// Validate user id
|
// Validate user id
|
||||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||||
|
|
||||||
|
@ -368,20 +404,23 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if rooms.is_empty() {
|
if rooms.is_empty() {
|
||||||
return Err!("User is not in any rooms.");
|
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
|
||||||
}
|
}
|
||||||
|
|
||||||
rooms.sort_by_key(|r| r.1);
|
rooms.sort_by_key(|r| r.1);
|
||||||
rooms.reverse();
|
rooms.reverse();
|
||||||
|
|
||||||
let body = rooms
|
let output_plain = format!(
|
||||||
.iter()
|
"Rooms {user_id} Joined ({}):\n```\n{}\n```",
|
||||||
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
|
rooms.len(),
|
||||||
.collect::<Vec<_>>()
|
rooms
|
||||||
.join("\n");
|
.iter()
|
||||||
|
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n")
|
||||||
|
);
|
||||||
|
|
||||||
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),))
|
Ok(RoomMessageEventContent::notice_markdown(output_plain))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -389,23 +428,27 @@ pub(super) async fn force_join_list_of_local_users(
|
||||||
&self,
|
&self,
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
yes_i_want_to_do_this: bool,
|
yes_i_want_to_do_this: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if self.body.len() < 2
|
if self.body.len() < 2
|
||||||
|| !self.body[0].trim().starts_with("```")
|
|| !self.body[0].trim().starts_with("```")
|
||||||
|| self.body.last().unwrap_or(&"").trim() != "```"
|
|| self.body.last().unwrap_or(&"").trim() != "```"
|
||||||
{
|
{
|
||||||
return Err!("Expected code block in command body. Add --help for details.",);
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !yes_i_want_to_do_this {
|
if !yes_i_want_to_do_this {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::notice_markdown(
|
||||||
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
|
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
|
||||||
bulk join all specified local users.",
|
bulk join all specified local users.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
|
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
|
||||||
return Err!("There is not an admin room to check for server admins.",);
|
return Ok(RoomMessageEventContent::notice_markdown(
|
||||||
|
"There is not an admin room to check for server admins.",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
let (room_id, servers) = self
|
let (room_id, servers) = self
|
||||||
|
@ -422,7 +465,7 @@ pub(super) async fn force_join_list_of_local_users(
|
||||||
.server_in_room(self.services.globals.server_name(), &room_id)
|
.server_in_room(self.services.globals.server_name(), &room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("We are not joined in this room.");
|
return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let server_admins: Vec<_> = self
|
let server_admins: Vec<_> = self
|
||||||
|
@ -442,7 +485,9 @@ pub(super) async fn force_join_list_of_local_users(
|
||||||
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("There is not a single server admin in the room.",);
|
return Ok(RoomMessageEventContent::notice_markdown(
|
||||||
|
"There is not a single server admin in the room.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let usernames = self
|
let usernames = self
|
||||||
|
@ -460,11 +505,11 @@ pub(super) async fn force_join_list_of_local_users(
|
||||||
if user_id == self.services.globals.server_user {
|
if user_id == self.services.globals.server_user {
|
||||||
self.services
|
self.services
|
||||||
.admin
|
.admin
|
||||||
.send_text(&format!(
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
"{username} is the server service account, skipping over"
|
"{username} is the server service account, skipping over"
|
||||||
))
|
)))
|
||||||
.await;
|
.await
|
||||||
|
.ok();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -473,9 +518,11 @@ pub(super) async fn force_join_list_of_local_users(
|
||||||
| Err(e) => {
|
| Err(e) => {
|
||||||
self.services
|
self.services
|
||||||
.admin
|
.admin
|
||||||
.send_text(&format!("{username} is not a valid username, skipping over: {e}"))
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
.await;
|
"{username} is not a valid username, skipping over: {e}"
|
||||||
|
)))
|
||||||
|
.await
|
||||||
|
.ok();
|
||||||
continue;
|
continue;
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -506,11 +553,10 @@ pub(super) async fn force_join_list_of_local_users(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
|
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
|
||||||
failed.",
|
failed.",
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -518,16 +564,18 @@ pub(super) async fn force_join_all_local_users(
|
||||||
&self,
|
&self,
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
yes_i_want_to_do_this: bool,
|
yes_i_want_to_do_this: bool,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
if !yes_i_want_to_do_this {
|
if !yes_i_want_to_do_this {
|
||||||
return Err!(
|
return Ok(RoomMessageEventContent::notice_markdown(
|
||||||
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
|
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
|
||||||
bulk join all local users.",
|
bulk join all local users.",
|
||||||
);
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
|
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
|
||||||
return Err!("There is not an admin room to check for server admins.",);
|
return Ok(RoomMessageEventContent::notice_markdown(
|
||||||
|
"There is not an admin room to check for server admins.",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
let (room_id, servers) = self
|
let (room_id, servers) = self
|
||||||
|
@ -544,7 +592,7 @@ pub(super) async fn force_join_all_local_users(
|
||||||
.server_in_room(self.services.globals.server_name(), &room_id)
|
.server_in_room(self.services.globals.server_name(), &room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("We are not joined in this room.");
|
return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let server_admins: Vec<_> = self
|
let server_admins: Vec<_> = self
|
||||||
|
@ -564,7 +612,9 @@ pub(super) async fn force_join_all_local_users(
|
||||||
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("There is not a single server admin in the room.",);
|
return Ok(RoomMessageEventContent::notice_markdown(
|
||||||
|
"There is not a single server admin in the room.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut failed_joins: usize = 0;
|
let mut failed_joins: usize = 0;
|
||||||
|
@ -599,11 +649,10 @@ pub(super) async fn force_join_all_local_users(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
|
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
|
||||||
failed.",
|
failed.",
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -611,7 +660,7 @@ pub(super) async fn force_join_room(
|
||||||
&self,
|
&self,
|
||||||
user_id: String,
|
user_id: String,
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||||
let (room_id, servers) = self
|
let (room_id, servers) = self
|
||||||
.services
|
.services
|
||||||
|
@ -627,8 +676,9 @@ pub(super) async fn force_join_room(
|
||||||
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None)
|
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"{user_id} has been joined to {room_id}.",
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
|
@ -636,7 +686,7 @@ pub(super) async fn force_leave_room(
|
||||||
&self,
|
&self,
|
||||||
user_id: String,
|
user_id: String,
|
||||||
room_id: OwnedRoomOrAliasId,
|
room_id: OwnedRoomOrAliasId,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||||
|
|
||||||
|
@ -652,17 +702,24 @@ pub(super) async fn force_leave_room(
|
||||||
.is_joined(&user_id, &room_id)
|
.is_joined(&user_id, &room_id)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
return Err!("{user_id} is not joined in the room");
|
return Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
|
"{user_id} is not joined in the room"
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
leave_room(self.services, &user_id, &room_id, None).await?;
|
leave_room(self.services, &user_id, &room_id, None).await?;
|
||||||
|
|
||||||
self.write_str(&format!("{user_id} has left {room_id}.",))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"{user_id} has left {room_id}.",
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result {
|
pub(super) async fn force_demote(
|
||||||
|
&self,
|
||||||
|
user_id: String,
|
||||||
|
room_id: OwnedRoomOrAliasId,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||||
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
|
||||||
|
|
||||||
|
@ -673,11 +730,15 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
|
||||||
|
|
||||||
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
|
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
|
||||||
|
|
||||||
let room_power_levels: Option<RoomPowerLevelsEventContent> = self
|
let room_power_levels = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
.state_accessor
|
.state_accessor
|
||||||
.room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "")
|
.room_state_get_content::<RoomPowerLevelsEventContent>(
|
||||||
|
&room_id,
|
||||||
|
&StateEventType::RoomPowerLevels,
|
||||||
|
"",
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.ok();
|
.ok();
|
||||||
|
|
||||||
|
@ -695,7 +756,9 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
|
||||||
.is_ok_and(|event| event.sender == user_id);
|
.is_ok_and(|event| event.sender == user_id);
|
||||||
|
|
||||||
if !user_can_demote_self {
|
if !user_can_demote_self {
|
||||||
return Err!("User is not allowed to modify their own power levels in the room.",);
|
return Ok(RoomMessageEventContent::notice_markdown(
|
||||||
|
"User is not allowed to modify their own power levels in the room.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut power_levels_content = room_power_levels.unwrap_or_default();
|
let mut power_levels_content = room_power_levels.unwrap_or_default();
|
||||||
|
@ -713,34 +776,34 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
"User {user_id} demoted themselves to the room default power level in {room_id} - \
|
"User {user_id} demoted themselves to the room default power level in {room_id} - \
|
||||||
{event_id}"
|
{event_id}"
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
|
pub(super) async fn make_user_admin(&self, user_id: String) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
self.services.globals.user_is_local(&user_id),
|
self.services.globals.user_is_local(&user_id),
|
||||||
"Parsed user_id must be a local user"
|
"Parsed user_id must be a local user"
|
||||||
);
|
);
|
||||||
|
|
||||||
self.services.admin.make_user_admin(&user_id).await?;
|
self.services.admin.make_user_admin(&user_id).await?;
|
||||||
|
|
||||||
self.write_str(&format!("{user_id} has been granted admin privileges.",))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"{user_id} has been granted admin privileges.",
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn put_room_tag(
|
pub(super) async fn put_room_tag(
|
||||||
&self,
|
&self,
|
||||||
user_id: String,
|
user_id: String,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
tag: String,
|
tag: String,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
||||||
|
|
||||||
let mut tags_event = self
|
let mut tags_event = self
|
||||||
|
@ -767,19 +830,18 @@ pub(super) async fn put_room_tag(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Successfully updated room account data for {user_id} and room {room_id} with tag {tag}"
|
"Successfully updated room account data for {user_id} and room {room_id} with tag {tag}"
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn delete_room_tag(
|
pub(super) async fn delete_room_tag(
|
||||||
&self,
|
&self,
|
||||||
user_id: String,
|
user_id: String,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
tag: String,
|
tag: String,
|
||||||
) -> Result {
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
||||||
|
|
||||||
let mut tags_event = self
|
let mut tags_event = self
|
||||||
|
@ -803,15 +865,18 @@ pub(super) async fn delete_room_tag(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
self.write_str(&format!(
|
Ok(RoomMessageEventContent::text_plain(format!(
|
||||||
"Successfully updated room account data for {user_id} and room {room_id}, deleting room \
|
"Successfully updated room account data for {user_id} and room {room_id}, deleting room \
|
||||||
tag {tag}"
|
tag {tag}"
|
||||||
))
|
)))
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result {
|
pub(super) async fn get_room_tags(
|
||||||
|
&self,
|
||||||
|
user_id: String,
|
||||||
|
room_id: Box<RoomId>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
|
||||||
|
|
||||||
let tags_event = self
|
let tags_event = self
|
||||||
|
@ -823,12 +888,17 @@ pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId)
|
||||||
content: TagEventContent { tags: BTreeMap::new() },
|
content: TagEventContent { tags: BTreeMap::new() },
|
||||||
});
|
});
|
||||||
|
|
||||||
self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags))
|
Ok(RoomMessageEventContent::notice_markdown(format!(
|
||||||
.await
|
"```\n{:#?}\n```",
|
||||||
|
tags_event.content.tags
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[admin_command]
|
#[admin_command]
|
||||||
pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
pub(super) async fn redact_event(
|
||||||
|
&self,
|
||||||
|
event_id: Box<EventId>,
|
||||||
|
) -> Result<RoomMessageEventContent> {
|
||||||
let Ok(event) = self
|
let Ok(event) = self
|
||||||
.services
|
.services
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -836,18 +906,20 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||||
.get_non_outlier_pdu(&event_id)
|
.get_non_outlier_pdu(&event_id)
|
||||||
.await
|
.await
|
||||||
else {
|
else {
|
||||||
return Err!("Event does not exist in our database.");
|
return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database."));
|
||||||
};
|
};
|
||||||
|
|
||||||
if event.is_redacted() {
|
if event.is_redacted() {
|
||||||
return Err!("Event is already redacted.");
|
return Ok(RoomMessageEventContent::text_plain("Event is already redacted."));
|
||||||
}
|
}
|
||||||
|
|
||||||
let room_id = event.room_id;
|
let room_id = event.room_id;
|
||||||
let sender_user = event.sender;
|
let sender_user = event.sender;
|
||||||
|
|
||||||
if !self.services.globals.user_is_local(&sender_user) {
|
if !self.services.globals.user_is_local(&sender_user) {
|
||||||
return Err!("This command only works on local users.");
|
return Ok(RoomMessageEventContent::text_plain(
|
||||||
|
"This command only works on local users.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let reason = format!(
|
let reason = format!(
|
||||||
|
@ -876,8 +948,9 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||||
.await?
|
.await?
|
||||||
};
|
};
|
||||||
|
|
||||||
self.write_str(&format!(
|
let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}");
|
||||||
"Successfully redacted event. Redaction event ID: {redaction_event_id}"
|
|
||||||
))
|
self.write_str(out.as_str()).await?;
|
||||||
.await
|
|
||||||
|
Ok(RoomMessageEventContent::text_plain(""))
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@ mod commands;
|
||||||
|
|
||||||
use clap::Subcommand;
|
use clap::Subcommand;
|
||||||
use conduwuit::Result;
|
use conduwuit::Result;
|
||||||
use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId};
|
use ruma::{EventId, OwnedRoomOrAliasId, RoomId};
|
||||||
|
|
||||||
use crate::admin_command_dispatch;
|
use crate::admin_command_dispatch;
|
||||||
|
|
||||||
|
@ -102,21 +102,21 @@ pub(super) enum UserCommand {
|
||||||
/// room's internal ID, and the tag name `m.server_notice`.
|
/// room's internal ID, and the tag name `m.server_notice`.
|
||||||
PutRoomTag {
|
PutRoomTag {
|
||||||
user_id: String,
|
user_id: String,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
tag: String,
|
tag: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Deletes the room tag for the specified user and room ID
|
/// - Deletes the room tag for the specified user and room ID
|
||||||
DeleteRoomTag {
|
DeleteRoomTag {
|
||||||
user_id: String,
|
user_id: String,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
tag: String,
|
tag: String,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Gets all the room tags for the specified user and room ID
|
/// - Gets all the room tags for the specified user and room ID
|
||||||
GetRoomTags {
|
GetRoomTags {
|
||||||
user_id: String,
|
user_id: String,
|
||||||
room_id: OwnedRoomId,
|
room_id: Box<RoomId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Attempts to forcefully redact the specified event ID from the sender
|
/// - Attempts to forcefully redact the specified event ID from the sender
|
||||||
|
@ -124,7 +124,7 @@ pub(super) enum UserCommand {
|
||||||
///
|
///
|
||||||
/// This is only valid for local users
|
/// This is only valid for local users
|
||||||
RedactEvent {
|
RedactEvent {
|
||||||
event_id: OwnedEventId,
|
event_id: Box<EventId>,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// - Force joins a specified list of local users to join the specified
|
/// - Force joins a specified list of local users to join the specified
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
#![allow(dead_code)]
|
|
||||||
|
|
||||||
use conduwuit_core::{Err, Result, err};
|
use conduwuit_core::{Err, Result, err};
|
||||||
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
|
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||||
use service::Services;
|
use service::Services;
|
||||||
|
|
|
@ -17,50 +17,21 @@ crate-type = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
brotli_compression = [
|
element_hacks = []
|
||||||
"conduwuit-core/brotli_compression",
|
|
||||||
"conduwuit-service/brotli_compression",
|
|
||||||
"reqwest/brotli",
|
|
||||||
]
|
|
||||||
element_hacks = [
|
|
||||||
"conduwuit-service/element_hacks",
|
|
||||||
]
|
|
||||||
gzip_compression = [
|
|
||||||
"conduwuit-core/gzip_compression",
|
|
||||||
"conduwuit-service/gzip_compression",
|
|
||||||
"reqwest/gzip",
|
|
||||||
]
|
|
||||||
io_uring = [
|
|
||||||
"conduwuit-service/io_uring",
|
|
||||||
]
|
|
||||||
jemalloc = [
|
|
||||||
"conduwuit-core/jemalloc",
|
|
||||||
"conduwuit-service/jemalloc",
|
|
||||||
]
|
|
||||||
jemalloc_conf = [
|
|
||||||
"conduwuit-core/jemalloc_conf",
|
|
||||||
"conduwuit-service/jemalloc_conf",
|
|
||||||
]
|
|
||||||
jemalloc_prof = [
|
|
||||||
"conduwuit-core/jemalloc_prof",
|
|
||||||
"conduwuit-service/jemalloc_prof",
|
|
||||||
]
|
|
||||||
jemalloc_stats = [
|
|
||||||
"conduwuit-core/jemalloc_stats",
|
|
||||||
"conduwuit-service/jemalloc_stats",
|
|
||||||
]
|
|
||||||
release_max_log_level = [
|
release_max_log_level = [
|
||||||
"conduwuit-core/release_max_log_level",
|
|
||||||
"conduwuit-service/release_max_log_level",
|
|
||||||
"log/max_level_trace",
|
|
||||||
"log/release_max_level_info",
|
|
||||||
"tracing/max_level_trace",
|
"tracing/max_level_trace",
|
||||||
"tracing/release_max_level_info",
|
"tracing/release_max_level_info",
|
||||||
|
"log/max_level_trace",
|
||||||
|
"log/release_max_level_info",
|
||||||
]
|
]
|
||||||
zstd_compression = [
|
zstd_compression = [
|
||||||
"conduwuit-core/zstd_compression",
|
"reqwest/zstd",
|
||||||
"conduwuit-service/zstd_compression",
|
]
|
||||||
"reqwest/zstd",
|
gzip_compression = [
|
||||||
|
"reqwest/gzip",
|
||||||
|
]
|
||||||
|
brotli_compression = [
|
||||||
|
"reqwest/brotli",
|
||||||
]
|
]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
@ -71,6 +42,7 @@ axum.workspace = true
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
conduwuit-core.workspace = true
|
conduwuit-core.workspace = true
|
||||||
|
conduwuit-database.workspace = true
|
||||||
conduwuit-service.workspace = true
|
conduwuit-service.workspace = true
|
||||||
const-str.workspace = true
|
const-str.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
|
|
@ -3,13 +3,10 @@ use std::fmt::Write;
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
use conduwuit::{
|
use conduwuit::{
|
||||||
Err, Error, Result, debug_info, err, error, info, is_equal_to,
|
Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils,
|
||||||
matrix::pdu::PduBuilder,
|
|
||||||
utils,
|
|
||||||
utils::{ReadyExt, stream::BroadbandExt},
|
utils::{ReadyExt, stream::BroadbandExt},
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
use conduwuit_service::Services;
|
|
||||||
use futures::{FutureExt, StreamExt};
|
use futures::{FutureExt, StreamExt};
|
||||||
use register::RegistrationKind;
|
use register::RegistrationKind;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -33,6 +30,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
push,
|
push,
|
||||||
};
|
};
|
||||||
|
use service::Services;
|
||||||
|
|
||||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
|
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper};
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{Err, Result, err};
|
use conduwuit::{Err, err};
|
||||||
use conduwuit_service::Services;
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
RoomId, UserId,
|
RoomId, UserId,
|
||||||
api::client::config::{
|
api::client::config::{
|
||||||
|
@ -16,7 +15,7 @@ use ruma::{
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::{json, value::RawValue as RawJsonValue};
|
use serde_json::{json, value::RawValue as RawJsonValue};
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::{Result, Ruma, service::Services};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
|
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
|
||||||
///
|
///
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
use axum::extract::State;
|
use axum::extract::State;
|
||||||
use conduwuit::{Err, Result, debug};
|
use conduwuit::{Err, Result, debug};
|
||||||
use conduwuit_service::Services;
|
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
OwnedServerName, RoomAliasId, RoomId,
|
OwnedServerName, RoomAliasId, RoomId,
|
||||||
api::client::alias::{create_alias, delete_alias, get_alias},
|
api::client::alias::{create_alias, delete_alias, get_alias},
|
||||||
};
|
};
|
||||||
|
use service::Services;
|
||||||
|
|
||||||
use crate::Ruma;
|
use crate::Ruma;
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue