Compare commits
52 commits
tcpipuk/ci
...
alpine-pac
Author | SHA1 | Date | |
---|---|---|---|
20ad47b42b | |||
50ad2c4a6c | |||
8c207c2f23 | |||
|
3c44dccd65 | ||
|
b57be072c7 | ||
|
ea5dc8e09d | ||
|
b9d60c64e5 | ||
|
94ae824149 | ||
|
640714922b | ||
|
2b268fdaf3 | ||
|
e8d823a653 | ||
|
0ba77674c7 | ||
|
2ccbd7d60b | ||
|
60960c6e09 | ||
|
ce40304667 | ||
|
dcbc4b54c5 | ||
|
fce024b30b | ||
|
3e4e696761 | ||
|
f605913ea9 | ||
|
44302ce732 | ||
|
bfb0a2b76a | ||
|
fcd5669aa1 | ||
|
9b8b37f162 | ||
|
7a46563f23 | ||
|
1bf6537319 | ||
|
4ed04b343a | ||
|
a4ad72e11d | ||
|
1f57508879 | ||
|
a325dfa56a | ||
|
b5d2ef9a4a | ||
|
e200a7d991 | ||
|
034762c619 | ||
|
e31d261e66 | ||
|
c5db43ba9a | ||
|
ec08e16b9f | ||
|
f14725a51b | ||
|
d03325c65a | ||
|
066794fe90 | ||
|
beee996f72 | ||
|
7c58e40c96 | ||
|
5577ddca27 | ||
|
c0f46269b5 | ||
|
01594a6243 | ||
|
d78fc53577 | ||
|
e3ae024ed3 | ||
|
fb9d4c30f4 | ||
|
cbcf4300df | ||
|
a98da7d942 | ||
|
e1655edd83 | ||
|
4158c1cf62 | ||
|
edd5fc6c7e | ||
|
6b0288dd4c |
91 changed files with 2077 additions and 966 deletions
|
@ -11,10 +11,11 @@ docker/
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
# Git folder
|
# Git folder
|
||||||
.git
|
# .git
|
||||||
.gitea
|
.gitea
|
||||||
.gitlab
|
.gitlab
|
||||||
.github
|
.github
|
||||||
|
.forgejo
|
||||||
|
|
||||||
# Dot files
|
# Dot files
|
||||||
.env
|
.env
|
||||||
|
|
|
@ -22,3 +22,7 @@ indent_size = 2
|
||||||
[*.rs]
|
[*.rs]
|
||||||
indent_style = tab
|
indent_style = tab
|
||||||
max_line_length = 98
|
max_line_length = 98
|
||||||
|
|
||||||
|
[{.forgejo/**/*.yml,.github/**/*.yml}]
|
||||||
|
indent_size = 2
|
||||||
|
indent_style = space
|
||||||
|
|
63
.forgejo/actions/rust-toolchain/action.yml
Normal file
63
.forgejo/actions/rust-toolchain/action.yml
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
name: rust-toolchain
|
||||||
|
description: |
|
||||||
|
Install a Rust toolchain using rustup.
|
||||||
|
See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification
|
||||||
|
for more information about toolchains.
|
||||||
|
inputs:
|
||||||
|
toolchain:
|
||||||
|
description: |
|
||||||
|
Rust toolchain name.
|
||||||
|
See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification
|
||||||
|
required: false
|
||||||
|
target:
|
||||||
|
description: Target triple to install for this toolchain
|
||||||
|
required: false
|
||||||
|
components:
|
||||||
|
description: Space-separated list of components to be additionally installed for a new toolchain
|
||||||
|
required: false
|
||||||
|
outputs:
|
||||||
|
rustc_version:
|
||||||
|
description: The rustc version installed
|
||||||
|
value: ${{ steps.rustc-version.outputs.version }}
|
||||||
|
rustup_version:
|
||||||
|
description: The rustup version installed
|
||||||
|
value: ${{ steps.rustup-version.outputs.version }}
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Check if rustup is already installed
|
||||||
|
shell: bash
|
||||||
|
id: rustup-version
|
||||||
|
run: |
|
||||||
|
echo "version=$(rustup --version)" >> $GITHUB_OUTPUT
|
||||||
|
- name: Cache rustup toolchains
|
||||||
|
if: steps.rustup-version.outputs.version == ''
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.rustup
|
||||||
|
!~/.rustup/tmp
|
||||||
|
!~/.rustup/downloads
|
||||||
|
# Requires repo to be cloned if toolchain is not specified
|
||||||
|
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
||||||
|
- name: Install Rust toolchain
|
||||||
|
if: steps.rustup-version.outputs.version == ''
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if ! command -v rustup &> /dev/null ; then
|
||||||
|
curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y
|
||||||
|
echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH
|
||||||
|
fi
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
set -x
|
||||||
|
${{ inputs.toolchain && format('rustup override set {0}', inputs.toolchain) }}
|
||||||
|
${{ inputs.target && format('rustup target add {0}', inputs.target) }}
|
||||||
|
${{ inputs.components && format('rustup component add {0}', inputs.components) }}
|
||||||
|
cargo --version
|
||||||
|
rustc --version
|
||||||
|
- id: rustc-version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "version=$(rustc --version)" >> $GITHUB_OUTPUT
|
29
.forgejo/actions/sccache/action.yml
Normal file
29
.forgejo/actions/sccache/action.yml
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
name: sccache
|
||||||
|
description: |
|
||||||
|
Install sccache for caching builds in GitHub Actions.
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
token:
|
||||||
|
description: 'A Github PAT'
|
||||||
|
required: false
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Install sccache
|
||||||
|
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
|
||||||
|
with:
|
||||||
|
token: ${{ inputs.token }}
|
||||||
|
- name: Configure sccache
|
||||||
|
uses: https://github.com/actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||||
|
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||||
|
- shell: bash
|
||||||
|
run: |
|
||||||
|
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
|
||||||
|
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||||
|
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||||
|
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||||
|
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
46
.forgejo/actions/timelord/action.yml
Normal file
46
.forgejo/actions/timelord/action.yml
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
name: timelord
|
||||||
|
description: |
|
||||||
|
Use timelord to set file timestamps
|
||||||
|
inputs:
|
||||||
|
key:
|
||||||
|
description: |
|
||||||
|
The key to use for caching the timelord data.
|
||||||
|
This should be unique to the repository and the runner.
|
||||||
|
required: true
|
||||||
|
default: timelord-v0
|
||||||
|
path:
|
||||||
|
description: |
|
||||||
|
The path to the directory to be timestamped.
|
||||||
|
This should be the root of the repository.
|
||||||
|
required: true
|
||||||
|
default: .
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Cache timelord-cli installation
|
||||||
|
id: cache-timelord-bin
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.cargo/bin/timelord
|
||||||
|
key: timelord-cli-v3.0.1
|
||||||
|
- name: Install timelord-cli
|
||||||
|
uses: https://github.com/cargo-bins/cargo-binstall@main
|
||||||
|
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
|
||||||
|
- run: cargo binstall timelord-cli@3.0.1
|
||||||
|
shell: bash
|
||||||
|
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
|
||||||
|
|
||||||
|
- name: Load timelord files
|
||||||
|
uses: actions/cache/restore@v3
|
||||||
|
with:
|
||||||
|
path: /timelord/
|
||||||
|
key: ${{ inputs.key }}
|
||||||
|
- name: Run timelord to set timestamps
|
||||||
|
shell: bash
|
||||||
|
run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/
|
||||||
|
- name: Save timelord
|
||||||
|
uses: actions/cache/save@v3
|
||||||
|
with:
|
||||||
|
path: /timelord/
|
||||||
|
key: ${{ inputs.key }}
|
49
.forgejo/workflows/build-alpine.yml
Normal file
49
.forgejo/workflows/build-alpine.yml
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
on:
|
||||||
|
- workflow-dispatch
|
||||||
|
- push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: alpine:edge
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: set up dependencies
|
||||||
|
run: |
|
||||||
|
apk update
|
||||||
|
apk upgrade
|
||||||
|
apk add nodejs git alpine-sdk
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
name: checkout the alpine dir
|
||||||
|
with:
|
||||||
|
sparse-checkout: "alpine/"
|
||||||
|
|
||||||
|
# - uses: actions/checkout@v4
|
||||||
|
# name: checkout the rest in the alpine dir
|
||||||
|
# with:
|
||||||
|
# path: 'alpine/continuwuity'
|
||||||
|
- name: set up user
|
||||||
|
run: adduser -DG abuild ci
|
||||||
|
|
||||||
|
- name: set up keys
|
||||||
|
run: |
|
||||||
|
pwd
|
||||||
|
mkdir ~/.abuild
|
||||||
|
echo "${{ secrets.abuild_privkey }}" > ~/.abuild/ci@continuwuity.rsa
|
||||||
|
echo "${{ secrets.abuild_pubkey }}" > ~/.abuild/ci@continuwuity.rsa.pub
|
||||||
|
echo $HOME
|
||||||
|
echo 'PACKAGER_PRIVKEY="/root/.abuild/ci@continuwuity.rsa"' > ~/.abuild/abuild.conf
|
||||||
|
ls ~/.abuild
|
||||||
|
|
||||||
|
- name: go go gadget abuild
|
||||||
|
run: |
|
||||||
|
cd alpine
|
||||||
|
# modify the APKBUILD to use the current branch instead of the release
|
||||||
|
# note that it seems to require the repo to be public (as you'll get
|
||||||
|
# a 404 even if the token is provided)
|
||||||
|
export ARCHIVE_URL="${{ github.server_url }}/${{ github.repository }}/archive/${{ github.ref_name }}.tar.gz"
|
||||||
|
echo $ARCHIVE_URL
|
||||||
|
sed -i '/^source=/c\source="'"$ARCHIVE_URL" APKBUILD
|
||||||
|
abuild -F checksum
|
||||||
|
abuild -Fr
|
|
@ -3,7 +3,6 @@ concurrency:
|
||||||
group: "release-image-${{ github.ref }}"
|
group: "release-image-${{ github.ref }}"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
|
||||||
push:
|
push:
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- "*.md"
|
- "*.md"
|
||||||
|
@ -58,7 +57,6 @@ jobs:
|
||||||
|
|
||||||
build-image:
|
build-image:
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: define-variables
|
needs: define-variables
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
@ -80,17 +78,15 @@ jobs:
|
||||||
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
|
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
|
||||||
- name: Echo matrix
|
- name: Echo matrix
|
||||||
run: echo '${{ toJSON(matrix) }}'
|
run: echo '${{ toJSON(matrix) }}'
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- run: |
|
- name: Install rust
|
||||||
if ! command -v rustup &> /dev/null ; then
|
id: rust-toolchain
|
||||||
curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH
|
|
||||||
fi
|
|
||||||
- uses: https://github.com/cargo-bins/cargo-binstall@main
|
|
||||||
- run: cargo binstall timelord-cli@3.0.1
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
|
@ -124,18 +120,58 @@ jobs:
|
||||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||||
- name: Get Git commit timestamps
|
- name: Get Git commit timestamps
|
||||||
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
||||||
- name: Set up timelord
|
|
||||||
uses: actions/cache/restore@v3
|
- uses: ./.forgejo/actions/timelord
|
||||||
with:
|
with:
|
||||||
path: /timelord/
|
|
||||||
key: timelord-v0 # Cache is already split per runner
|
|
||||||
- name: Run timelord to set timestamps
|
|
||||||
run: timelord sync --source-dir . --cache-dir /timelord/
|
|
||||||
- name: Save timelord
|
|
||||||
uses: actions/cache/save@v3
|
|
||||||
with:
|
|
||||||
path: /timelord/
|
|
||||||
key: timelord-v0
|
key: timelord-v0
|
||||||
|
path: .
|
||||||
|
|
||||||
|
- name: Cache Rust registry
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
.cargo/git
|
||||||
|
.cargo/git/checkouts
|
||||||
|
.cargo/registry
|
||||||
|
.cargo/registry/src
|
||||||
|
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
||||||
|
- name: Cache cargo target
|
||||||
|
id: cache-cargo-target
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
cargo-target-${{ matrix.slug }}
|
||||||
|
key: cargo-target-${{ matrix.slug }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
||||||
|
- name: Cache apt cache
|
||||||
|
id: cache-apt
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
var-cache-apt-${{ matrix.slug }}
|
||||||
|
key: var-cache-apt-${{ matrix.slug }}
|
||||||
|
- name: Cache apt lib
|
||||||
|
id: cache-apt-lib
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
var-lib-apt-${{ matrix.slug }}
|
||||||
|
key: var-lib-apt-${{ matrix.slug }}
|
||||||
|
- name: inject cache into docker
|
||||||
|
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
|
||||||
|
with:
|
||||||
|
cache-map: |
|
||||||
|
{
|
||||||
|
".cargo/registry": "/usr/local/cargo/registry",
|
||||||
|
".cargo/git/db": "/usr/local/cargo/git/db",
|
||||||
|
"cargo-target-${{ matrix.slug }}": {
|
||||||
|
"target": "/app/target",
|
||||||
|
"id": "cargo-target-${{ matrix.platform }}"
|
||||||
|
},
|
||||||
|
"var-cache-apt-${{ matrix.slug }}": "/var/cache/apt",
|
||||||
|
"var-lib-apt-${{ matrix.slug }}": "/var/lib/apt"
|
||||||
|
}
|
||||||
|
skip-extraction: ${{ steps.cache.outputs.cache-hit }}
|
||||||
|
|
||||||
- name: Build and push Docker image by digest
|
- name: Build and push Docker image by digest
|
||||||
id: build
|
id: build
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
|
@ -143,12 +179,15 @@ jobs:
|
||||||
context: .
|
context: .
|
||||||
file: "docker/Dockerfile"
|
file: "docker/Dockerfile"
|
||||||
build-args: |
|
build-args: |
|
||||||
CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }}
|
GIT_COMMIT_HASH=${{ github.sha }})
|
||||||
|
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }})
|
||||||
|
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||||
|
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||||
platforms: ${{ matrix.platform }}
|
platforms: ${{ matrix.platform }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
annotations: ${{ steps.meta.outputs.annotations }}
|
annotations: ${{ steps.meta.outputs.annotations }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
# cache-to: type=gha,mode=max
|
||||||
sbom: true
|
sbom: true
|
||||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||||
env:
|
env:
|
||||||
|
@ -171,7 +210,6 @@ jobs:
|
||||||
|
|
||||||
merge:
|
merge:
|
||||||
runs-on: dind
|
runs-on: dind
|
||||||
container: ghcr.io/catthehacker/ubuntu:act-latest
|
|
||||||
needs: [define-variables, build-image]
|
needs: [define-variables, build-image]
|
||||||
steps:
|
steps:
|
||||||
- name: Download digests
|
- name: Download digests
|
||||||
|
@ -199,7 +237,7 @@ jobs:
|
||||||
type=semver,pattern=v{{version}}
|
type=semver,pattern=v{{version}}
|
||||||
type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }}
|
type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }}
|
||||||
type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
|
type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }}
|
||||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) 1= github.ref && 'branch-' || '' }}
|
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
|
||||||
type=ref,event=pr
|
type=ref,event=pr
|
||||||
type=sha,format=long
|
type=sha,format=long
|
||||||
images: ${{needs.define-variables.outputs.images}}
|
images: ${{needs.define-variables.outputs.images}}
|
||||||
|
|
142
.forgejo/workflows/rust-checks.yml
Normal file
142
.forgejo/workflows/rust-checks.yml
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
name: Rust Checks
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
format:
|
||||||
|
name: Format
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install rust
|
||||||
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
with:
|
||||||
|
toolchain: "nightly"
|
||||||
|
components: "rustfmt"
|
||||||
|
|
||||||
|
- name: Check formatting
|
||||||
|
run: |
|
||||||
|
cargo +nightly fmt --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
name: Clippy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install rust
|
||||||
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
|
||||||
|
- uses: https://github.com/actions/create-github-app-token@v2
|
||||||
|
id: app-token
|
||||||
|
with:
|
||||||
|
app-id: ${{ vars.GH_APP_ID }}
|
||||||
|
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||||
|
github-api-url: https://api.github.com
|
||||||
|
owner: ${{ vars.GH_APP_OWNER }}
|
||||||
|
repositories: ""
|
||||||
|
- name: Install sccache
|
||||||
|
uses: ./.forgejo/actions/sccache
|
||||||
|
with:
|
||||||
|
token: ${{ steps.app-token.outputs.token }}
|
||||||
|
- run: sudo apt-get update
|
||||||
|
- name: Install system dependencies
|
||||||
|
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||||
|
with:
|
||||||
|
packages: clang liburing-dev
|
||||||
|
version: 1
|
||||||
|
- name: Cache Rust registry
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/git
|
||||||
|
!~/.cargo/git/checkouts
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||||
|
- name: Timelord
|
||||||
|
uses: ./.forgejo/actions/timelord
|
||||||
|
with:
|
||||||
|
key: sccache-v0
|
||||||
|
path: .
|
||||||
|
- name: Clippy
|
||||||
|
run: |
|
||||||
|
cargo clippy \
|
||||||
|
--workspace \
|
||||||
|
--locked \
|
||||||
|
--no-deps \
|
||||||
|
--profile test \
|
||||||
|
-- \
|
||||||
|
-D warnings
|
||||||
|
|
||||||
|
- name: Show sccache stats
|
||||||
|
if: always()
|
||||||
|
run: sccache --show-stats
|
||||||
|
|
||||||
|
cargo-test:
|
||||||
|
name: Cargo Test
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: Install rust
|
||||||
|
uses: ./.forgejo/actions/rust-toolchain
|
||||||
|
|
||||||
|
- uses: https://github.com/actions/create-github-app-token@v2
|
||||||
|
id: app-token
|
||||||
|
with:
|
||||||
|
app-id: ${{ vars.GH_APP_ID }}
|
||||||
|
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||||
|
github-api-url: https://api.github.com
|
||||||
|
owner: ${{ vars.GH_APP_OWNER }}
|
||||||
|
repositories: ""
|
||||||
|
- name: Install sccache
|
||||||
|
uses: ./.forgejo/actions/sccache
|
||||||
|
with:
|
||||||
|
token: ${{ steps.app-token.outputs.token }}
|
||||||
|
- run: sudo apt-get update
|
||||||
|
- name: Install system dependencies
|
||||||
|
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||||
|
with:
|
||||||
|
packages: clang liburing-dev
|
||||||
|
version: 1
|
||||||
|
- name: Cache Rust registry
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/git
|
||||||
|
!~/.cargo/git/checkouts
|
||||||
|
~/.cargo/registry
|
||||||
|
!~/.cargo/registry/src
|
||||||
|
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||||
|
- name: Timelord
|
||||||
|
uses: ./.forgejo/actions/timelord
|
||||||
|
with:
|
||||||
|
key: sccache-v0
|
||||||
|
path: .
|
||||||
|
- name: Cargo Test
|
||||||
|
run: |
|
||||||
|
cargo test \
|
||||||
|
--workspace \
|
||||||
|
--locked \
|
||||||
|
--profile test \
|
||||||
|
--all-targets \
|
||||||
|
--no-fail-fast
|
||||||
|
|
||||||
|
- name: Show sccache stats
|
||||||
|
if: always()
|
||||||
|
run: sccache --show-stats
|
9
.typos.toml
Normal file
9
.typos.toml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
[files]
|
||||||
|
extend-exclude = ["*.csr"]
|
||||||
|
|
||||||
|
[default.extend-words]
|
||||||
|
"allocatedp" = "allocatedp"
|
||||||
|
"conduwuit" = "conduwuit"
|
||||||
|
"continuwuity" = "continuwuity"
|
||||||
|
"continuwity" = "continuwuity"
|
||||||
|
"execuse" = "execuse"
|
|
@ -1,10 +1,10 @@
|
||||||
# Contributing guide
|
# Contributing guide
|
||||||
|
|
||||||
This page is for about contributing to conduwuit. The
|
This page is for about contributing to Continuwuity. The
|
||||||
[development](./development.md) page may be of interest for you as well.
|
[development](./development.md) page may be of interest for you as well.
|
||||||
|
|
||||||
If you would like to work on an [issue][issues] that is not assigned, preferably
|
If you would like to work on an [issue][issues] that is not assigned, preferably
|
||||||
ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix],
|
ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix],
|
||||||
and comment on it.
|
and comment on it.
|
||||||
|
|
||||||
### Linting and Formatting
|
### Linting and Formatting
|
||||||
|
@ -23,9 +23,9 @@ suggestion, allow the lint and mention that in a comment.
|
||||||
|
|
||||||
### Running CI tests locally
|
### Running CI tests locally
|
||||||
|
|
||||||
conduwuit's CI for tests, linting, formatting, audit, etc use
|
continuwuity's CI for tests, linting, formatting, audit, etc use
|
||||||
[`engage`][engage]. engage can be installed from nixpkgs or `cargo install
|
[`engage`][engage]. engage can be installed from nixpkgs or `cargo install
|
||||||
engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`.
|
engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`.
|
||||||
Use `engage --help` for more usage details.
|
Use `engage --help` for more usage details.
|
||||||
|
|
||||||
To test, format, lint, etc that CI would do, install engage, allow the `.envrc`
|
To test, format, lint, etc that CI would do, install engage, allow the `.envrc`
|
||||||
|
@ -73,7 +73,7 @@ If you'd like to run Complement locally using Nix, see the
|
||||||
|
|
||||||
### Writing documentation
|
### Writing documentation
|
||||||
|
|
||||||
conduwuit's website uses [`mdbook`][mdbook] and deployed via CI using GitHub
|
Continuwuity's website uses [`mdbook`][mdbook] and deployed via CI using GitHub
|
||||||
Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's
|
Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's
|
||||||
mdbook in the devshell. All documentation is in the `docs/` directory at the top
|
mdbook in the devshell. All documentation is in the `docs/` directory at the top
|
||||||
level. The compiled mdbook website is also uploaded as an artifact.
|
level. The compiled mdbook website is also uploaded as an artifact.
|
||||||
|
@ -111,33 +111,28 @@ applies here.
|
||||||
|
|
||||||
### Creating pull requests
|
### Creating pull requests
|
||||||
|
|
||||||
Please try to keep contributions to the GitHub. While the mirrors of conduwuit
|
Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity
|
||||||
allow for pull/merge requests, there is no guarantee I will see them in a timely
|
allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely
|
||||||
manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts.
|
manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts.
|
||||||
This prevents me from having to ping once in a while to double check the status
|
This prevents us from having to ping once in a while to double check the status
|
||||||
of it, especially when the CI completed successfully and everything so it
|
of it, especially when the CI completed successfully and everything so it
|
||||||
*looks* done.
|
*looks* done.
|
||||||
|
|
||||||
If you open a pull request on one of the mirrors, it is your responsibility to
|
|
||||||
inform me about its existence. In the future I may try to solve this with more
|
|
||||||
repo bots in the conduwuit Matrix room. There is no mailing list or email-patch
|
|
||||||
support on the sr.ht mirror, but if you'd like to email me a git patch you can
|
|
||||||
do so at `strawberry@puppygock.gay`.
|
|
||||||
|
|
||||||
Direct all PRs/MRs to the `main` branch.
|
Direct all PRs/MRs to the `main` branch.
|
||||||
|
|
||||||
By sending a pull request or patch, you are agreeing that your changes are
|
By sending a pull request or patch, you are agreeing that your changes are
|
||||||
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
allowed to be licenced under the Apache-2.0 licence and all of your conduct is
|
||||||
in line with the Contributor's Covenant, and conduwuit's Code of Conduct.
|
in line with the Contributor's Covenant, and continuwuity's Code of Conduct.
|
||||||
|
|
||||||
Contribution by users who violate either of these code of conducts will not have
|
Contribution by users who violate either of these code of conducts will not have
|
||||||
their contributions accepted. This includes users who have been banned from
|
their contributions accepted. This includes users who have been banned from
|
||||||
conduwuit Matrix rooms for Code of Conduct violations.
|
continuwuityMatrix rooms for Code of Conduct violations.
|
||||||
|
|
||||||
[issues]: https://github.com/girlbossceo/conduwuit/issues
|
[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues
|
||||||
[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay
|
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org
|
||||||
[complement]: https://github.com/matrix-org/complement/
|
[complement]: https://github.com/matrix-org/complement/
|
||||||
[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml
|
[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml
|
||||||
[engage]: https://charles.page.computer.surgery/engage/
|
[engage]: https://charles.page.computer.surgery/engage/
|
||||||
[sytest]: https://github.com/matrix-org/sytest/
|
[sytest]: https://github.com/matrix-org/sytest/
|
||||||
[cargo-deb]: https://github.com/kornelski/cargo-deb
|
[cargo-deb]: https://github.com/kornelski/cargo-deb
|
||||||
|
@ -146,4 +141,4 @@ conduwuit Matrix rooms for Code of Conduct violations.
|
||||||
[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit
|
[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit
|
||||||
[direnv]: https://direnv.net/
|
[direnv]: https://direnv.net/
|
||||||
[mdbook]: https://rust-lang.github.io/mdBook/
|
[mdbook]: https://rust-lang.github.io/mdBook/
|
||||||
[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml
|
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml
|
||||||
|
|
535
Cargo.lock
generated
535
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
22
Cargo.toml
22
Cargo.toml
|
@ -298,7 +298,7 @@ version = "1.15.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["serde"]
|
features = ["serde"]
|
||||||
|
|
||||||
# Used for reading the configuration from conduwuit.toml & environment variables
|
# Used for reading the configuration from continuwuity.toml & environment variables
|
||||||
[workspace.dependencies.figment]
|
[workspace.dependencies.figment]
|
||||||
version = "0.10.19"
|
version = "0.10.19"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
@ -350,7 +350,7 @@ version = "0.1.2"
|
||||||
[workspace.dependencies.ruma]
|
[workspace.dependencies.ruma]
|
||||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||||
#branch = "conduwuit-changes"
|
#branch = "conduwuit-changes"
|
||||||
rev = "652cc4864203ab7ca60cf9c47b931c0385304cc7"
|
rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983"
|
||||||
features = [
|
features = [
|
||||||
"compat",
|
"compat",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -626,6 +626,17 @@ package = "conduwuit_macros"
|
||||||
path = "src/macros"
|
path = "src/macros"
|
||||||
default-features = false
|
default-features = false
|
||||||
|
|
||||||
|
[workspace.dependencies.conduwuit-web]
|
||||||
|
package = "conduwuit_web"
|
||||||
|
path = "src/web"
|
||||||
|
default-features = false
|
||||||
|
|
||||||
|
|
||||||
|
[workspace.dependencies.conduwuit-build-metadata]
|
||||||
|
package = "conduwuit_build_metadata"
|
||||||
|
path = "src/build_metadata"
|
||||||
|
default-features = false
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
#
|
#
|
||||||
# Release profiles
|
# Release profiles
|
||||||
|
@ -734,7 +745,6 @@ incremental = true
|
||||||
|
|
||||||
[profile.dev.package.conduwuit_core]
|
[profile.dev.package.conduwuit_core]
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
incremental = false
|
|
||||||
#rustflags = [
|
#rustflags = [
|
||||||
# '--cfg', 'conduwuit_mods',
|
# '--cfg', 'conduwuit_mods',
|
||||||
# '-Ztime-passes',
|
# '-Ztime-passes',
|
||||||
|
@ -774,7 +784,6 @@ inherits = "dev"
|
||||||
[profile.dev.package.'*']
|
[profile.dev.package.'*']
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
debug = 'limited'
|
debug = 'limited'
|
||||||
incremental = false
|
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
opt-level = 'z'
|
opt-level = 'z'
|
||||||
#rustflags = [
|
#rustflags = [
|
||||||
|
@ -796,7 +805,6 @@ inherits = "dev"
|
||||||
strip = false
|
strip = false
|
||||||
opt-level = 0
|
opt-level = 0
|
||||||
codegen-units = 16
|
codegen-units = 16
|
||||||
incremental = false
|
|
||||||
|
|
||||||
[profile.test.package.'*']
|
[profile.test.package.'*']
|
||||||
inherits = "dev"
|
inherits = "dev"
|
||||||
|
@ -804,7 +812,6 @@ debug = 0
|
||||||
strip = false
|
strip = false
|
||||||
opt-level = 0
|
opt-level = 0
|
||||||
codegen-units = 16
|
codegen-units = 16
|
||||||
incremental = false
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
#
|
#
|
||||||
|
@ -981,3 +988,6 @@ let_underscore_future = { level = "allow", priority = 1 }
|
||||||
|
|
||||||
# rust doesnt understand conduwuit's custom log macros
|
# rust doesnt understand conduwuit's custom log macros
|
||||||
literal_string_with_formatting_args = { level = "allow", priority = 1 }
|
literal_string_with_formatting_args = { level = "allow", priority = 1 }
|
||||||
|
|
||||||
|
|
||||||
|
needless_raw_string_hashes = "allow"
|
||||||
|
|
|
@ -11,12 +11,17 @@ It's a community continuation of the [conduwuit](https://github.com/girlbossceo/
|
||||||
|
|
||||||
<!-- ANCHOR: body -->
|
<!-- ANCHOR: body -->
|
||||||
|
|
||||||
|
[](https://forgejo.ellis.link/continuwuation/continuwuity)  [](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open)
|
||||||
|
|
||||||
|
[](https://github.com/continuwuity/continuwuity) 
|
||||||
|
|
||||||
|
[](https://codeberg.org/nexy7574/continuwuity) 
|
||||||
|
|
||||||
### Why does this exist?
|
### Why does this exist?
|
||||||
|
|
||||||
The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features.
|
The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features.
|
||||||
|
|
||||||
We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver.
|
We aim to provide a stable, well-maintained alternative for current conduwuit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver.
|
||||||
|
|
||||||
### Who are we?
|
### Who are we?
|
||||||
|
|
||||||
|
@ -112,4 +117,3 @@ Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [
|
||||||
|
|
||||||
|
|
||||||
[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity
|
[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity
|
||||||
|
|
||||||
|
|
63
SECURITY.md
Normal file
63
SECURITY.md
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
# Security Policy for Continuwuity
|
||||||
|
|
||||||
|
This document outlines the security policy for Continuwuity. Our goal is to maintain a secure platform for all users, and we take security matters seriously.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
We provide security updates for the following versions of Continuwuity:
|
||||||
|
|
||||||
|
| Version | Supported |
|
||||||
|
| -------------- |:----------------:|
|
||||||
|
| Latest release | ✅ |
|
||||||
|
| Main branch | ✅ |
|
||||||
|
| Older releases | ❌ |
|
||||||
|
|
||||||
|
We may backport fixes to the previous release at our discretion, but we don't guarantee this.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
### Responsible Disclosure
|
||||||
|
|
||||||
|
We appreciate the efforts of security researchers and the community in identifying and reporting vulnerabilities. To ensure that potential vulnerabilities are addressed properly, please follow these guidelines:
|
||||||
|
|
||||||
|
1. Contact members of the team over E2EE private message.
|
||||||
|
- [@jade:ellis.link](https://matrix.to/#/@jade:ellis.link)
|
||||||
|
- [@nex:nexy7574.co.uk](https://matrix.to/#/@nex:nexy7574.co.uk) <!-- ? -->
|
||||||
|
2. **Email the security team** directly at [security@continuwuity.org](mailto:security@continuwuity.org). This is not E2EE, so don't include sensitive details.
|
||||||
|
3. **Do not disclose the vulnerability publicly** until it has been addressed
|
||||||
|
4. **Provide detailed information** about the vulnerability, including:
|
||||||
|
- A clear description of the issue
|
||||||
|
- Steps to reproduce
|
||||||
|
- Potential impact
|
||||||
|
- Any possible mitigations
|
||||||
|
- Version(s) affected, including specific commits if possible
|
||||||
|
|
||||||
|
If you have any doubts about a potential security vulnerability, contact us via private channels first! We'd prefer that you bother us, instead of having a vulnerability disclosed without a fix.
|
||||||
|
|
||||||
|
### What to Expect
|
||||||
|
|
||||||
|
When you report a security vulnerability:
|
||||||
|
|
||||||
|
1. **Acknowledgment**: We will acknowledge receipt of your report.
|
||||||
|
2. **Assessment**: We will assess the vulnerability and determine its impact on our users
|
||||||
|
3. **Updates**: We will provide updates on our progress in addressing the vulnerability, and may request you help test mitigations
|
||||||
|
4. **Resolution**: Once resolved, we will notify you and discuss coordinated disclosure
|
||||||
|
5. **Credit**: We will recognize your contribution (unless you prefer to remain anonymous)
|
||||||
|
|
||||||
|
## Security Update Process
|
||||||
|
|
||||||
|
When security vulnerabilities are identified:
|
||||||
|
|
||||||
|
1. We will develop and test fixes in a private branch
|
||||||
|
2. Security updates will be released as soon as possible
|
||||||
|
3. Release notes will include information about the vulnerabilities, avoiding details that could facilitate exploitation where possible
|
||||||
|
4. Critical security updates may be backported to the previous stable release
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- [Matrix Security Disclosure Policy](https://matrix.org/security-disclosure-policy/)
|
||||||
|
- [Continuwuity Documentation](https://continuwuity.org/introduction)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
This security policy was last updated on May 25, 2025.
|
70
alpine/APKBUILD
Normal file
70
alpine/APKBUILD
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
# Contributor: magmaus3 <maia@magmaus3.eu.org>
|
||||||
|
# Maintainer: magmaus3 <maia@magmaus3.eu.org>
|
||||||
|
pkgname=continuwuity
|
||||||
|
|
||||||
|
# abuild doesn't like the format of v0.5.0-rc.5, so i had to change it
|
||||||
|
# see https://wiki.alpinelinux.org/wiki/Package_policies
|
||||||
|
pkgver=0.5.0_rc5
|
||||||
|
pkgrel=0
|
||||||
|
pkgdesc="a continuwuation of a very cool, featureful fork of conduit"
|
||||||
|
url="https://continuwuity.org/"
|
||||||
|
arch="all"
|
||||||
|
license="Apache-2.0"
|
||||||
|
depends="liburing"
|
||||||
|
|
||||||
|
# cargo version on alpine v3.21 is too old to use the 2024 edition
|
||||||
|
# i recommend either building everything on edge, or adding
|
||||||
|
# the edge repo as a tag
|
||||||
|
makedepends="cargo liburing-dev clang-dev linux-headers"
|
||||||
|
checkdepends=""
|
||||||
|
install="$pkgname.pre-install"
|
||||||
|
subpackages="$pkgname-openrc"
|
||||||
|
source="https://forgejo.ellis.link/continuwuation/continuwuity/archive/v0.5.0-rc.5.tar.gz
|
||||||
|
continuwuity.initd
|
||||||
|
continuwuity.confd
|
||||||
|
"
|
||||||
|
_giturl="https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||||
|
_gitbranch="main"
|
||||||
|
builddir="$srcdir/continuwuity"
|
||||||
|
options="net !check"
|
||||||
|
|
||||||
|
#snapshot() {
|
||||||
|
# # used for building from git
|
||||||
|
# git clone --depth=1 $_giturl -b $_gitbranch
|
||||||
|
#}
|
||||||
|
|
||||||
|
prepare() {
|
||||||
|
default_prepare
|
||||||
|
cd $srcdir/continuwuity
|
||||||
|
|
||||||
|
# add the default database path to the config (commented out)
|
||||||
|
cat conduwuit-example.toml \
|
||||||
|
| sed '/#database_path/ s:$: "/var/lib/continuwuity":' \
|
||||||
|
> "$srcdir"/continuwuity.toml
|
||||||
|
|
||||||
|
cargo fetch --target="$CTARGET" --locked
|
||||||
|
}
|
||||||
|
|
||||||
|
build() {
|
||||||
|
cargo build --frozen --release --all-features
|
||||||
|
}
|
||||||
|
|
||||||
|
check() {
|
||||||
|
# TODO: make sure the tests work
|
||||||
|
#cargo test --frozen
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
package() {
|
||||||
|
cd $srcdir
|
||||||
|
install -Dm755 continuwuity/target/release/conduwuit "$pkgdir"/usr/bin/continuwuity
|
||||||
|
install -Dm644 "$srcdir"/continuwuity.toml -t "$pkgdir"/etc/continuwuity
|
||||||
|
install -Dm755 "$srcdir"/continuwuity.initd "$pkgdir"/etc/init.d/continuwuity
|
||||||
|
install -Dm644 "$srcdir"/continuwuity.confd "$pkgdir"/etc/conf.d/continuwuity
|
||||||
|
}
|
||||||
|
|
||||||
|
sha512sums="
|
||||||
|
66f6da5e98b6f7bb8c1082500101d5c87b1b79955c139b44c6ef5123919fb05feb0dffc669a3af1bc8d571ddb9f3576660f08dc10a6b19eab6db9e391175436a v0.5.0-rc.5.tar.gz
|
||||||
|
0482674be24740496d70da256d4121c5a5e3b749f2445d2bbe0e8991f1449de052724f8427da21a6f55574bc53eac9ca1e47e5012b4c13049b2b39044734d80d continuwuity.initd
|
||||||
|
38e2576278b450d16ba804dd8f4a128f18cd793e6c3ce55aedee1e186905755b31ee23baaa6586b1ab0e25a1f29bf1ea86bfaae4185b0cb1a29203726a199426 continuwuity.confd
|
||||||
|
"
|
7
alpine/README.md
Normal file
7
alpine/README.md
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
# building
|
||||||
|
|
||||||
|
1. [set up your build
|
||||||
|
environment](https://wiki.alpinelinux.org/wiki/Include:Setup_your_system_and_account_for_building_packages)
|
||||||
|
|
||||||
|
2. run `abuild` (or `abuild -K` if you want to keep the source directory to make
|
||||||
|
rebuilding faster)
|
3
alpine/continuwuity.confd
Normal file
3
alpine/continuwuity.confd
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
supervisor=supervise-daemon
|
||||||
|
export CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml
|
||||||
|
|
19
alpine/continuwuity.initd
Normal file
19
alpine/continuwuity.initd
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
#!/sbin/openrc-run
|
||||||
|
|
||||||
|
command="/usr/bin/continuwuity"
|
||||||
|
command_user="continuwuity:continuwuity"
|
||||||
|
command_args="--config ${CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml}"
|
||||||
|
command_background=true
|
||||||
|
pidfile="/run/$RC_SVCNAME.pid"
|
||||||
|
|
||||||
|
output_log="/var/log/continuwuity.log"
|
||||||
|
error_log="/var/log/continuwuity.log"
|
||||||
|
|
||||||
|
depend() {
|
||||||
|
need net
|
||||||
|
}
|
||||||
|
|
||||||
|
start_pre() {
|
||||||
|
checkpath -d -m 0755 -o "$command_user" /var/lib/continuwuity
|
||||||
|
checkpath -f -m 0644 -o "$command_user" "$output_log"
|
||||||
|
}
|
4
alpine/continuwuity.pre-install
Normal file
4
alpine/continuwuity.pre-install
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
#!/bin/sh
|
||||||
|
addgroup -S continuwuity 2>/dev/null
|
||||||
|
adduser -S -D -H -h /var/lib/continuwuity -s /sbin/nologin -G continuwuity -g continuwuity continuwuity 2>/dev/null
|
||||||
|
exit 0
|
|
@ -1,11 +1,11 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=conduwuit Matrix homeserver
|
|
||||||
|
Description=Continuwuity - Matrix homeserver
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
Documentation=https://conduwuit.puppyirl.gay/
|
Documentation=https://continuwuity.org/
|
||||||
RequiresMountsFor=/var/lib/private/conduwuit
|
RequiresMountsFor=/var/lib/private/conduwuit
|
||||||
Alias=matrix-conduwuit.service
|
Alias=matrix-conduwuit.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
DynamicUser=yes
|
DynamicUser=yes
|
||||||
Type=notify-reload
|
Type=notify-reload
|
||||||
|
@ -59,7 +59,7 @@ StateDirectory=conduwuit
|
||||||
RuntimeDirectory=conduwuit
|
RuntimeDirectory=conduwuit
|
||||||
RuntimeDirectoryMode=0750
|
RuntimeDirectoryMode=0750
|
||||||
|
|
||||||
Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml"
|
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||||
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
|
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
|
||||||
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
|
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
### conduwuit Configuration
|
### continuwuity Configuration
|
||||||
###
|
###
|
||||||
### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE
|
### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE
|
||||||
### OVERWRITTEN!
|
### OVERWRITTEN!
|
||||||
|
@ -13,7 +13,7 @@
|
||||||
### that say "YOU NEED TO EDIT THIS".
|
### that say "YOU NEED TO EDIT THIS".
|
||||||
###
|
###
|
||||||
### For more information, see:
|
### For more information, see:
|
||||||
### https://conduwuit.puppyirl.gay/configuration.html
|
### https://continuwuity.org/configuration.html
|
||||||
|
|
||||||
[global]
|
[global]
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
# suffix for user and room IDs/aliases.
|
# suffix for user and room IDs/aliases.
|
||||||
#
|
#
|
||||||
# See the docs for reverse proxying and delegation:
|
# See the docs for reverse proxying and delegation:
|
||||||
# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy
|
# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy
|
||||||
#
|
#
|
||||||
# Also see the `[global.well_known]` config section at the very bottom.
|
# Also see the `[global.well_known]` config section at the very bottom.
|
||||||
#
|
#
|
||||||
|
@ -32,11 +32,11 @@
|
||||||
# YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE
|
# YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE
|
||||||
# WIPE.
|
# WIPE.
|
||||||
#
|
#
|
||||||
# example: "conduwuit.woof"
|
# example: "continuwuity.org"
|
||||||
#
|
#
|
||||||
#server_name =
|
#server_name =
|
||||||
|
|
||||||
# The default address (IPv4 or IPv6) conduwuit will listen on.
|
# The default address (IPv4 or IPv6) continuwuity will listen on.
|
||||||
#
|
#
|
||||||
# If you are using Docker or a container NAT networking setup, this must
|
# If you are using Docker or a container NAT networking setup, this must
|
||||||
# be "0.0.0.0".
|
# be "0.0.0.0".
|
||||||
|
@ -46,10 +46,10 @@
|
||||||
#
|
#
|
||||||
#address = ["127.0.0.1", "::1"]
|
#address = ["127.0.0.1", "::1"]
|
||||||
|
|
||||||
# The port(s) conduwuit will listen on.
|
# The port(s) continuwuity will listen on.
|
||||||
#
|
#
|
||||||
# For reverse proxying, see:
|
# For reverse proxying, see:
|
||||||
# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy
|
# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy
|
||||||
#
|
#
|
||||||
# If you are using Docker, don't change this, you'll need to map an
|
# If you are using Docker, don't change this, you'll need to map an
|
||||||
# external port to this.
|
# external port to this.
|
||||||
|
@ -58,16 +58,17 @@
|
||||||
#
|
#
|
||||||
#port = 8008
|
#port = 8008
|
||||||
|
|
||||||
# The UNIX socket conduwuit will listen on.
|
# The UNIX socket continuwuity will listen on.
|
||||||
#
|
#
|
||||||
# conduwuit cannot listen on both an IP address and a UNIX socket. If
|
# continuwuity cannot listen on both an IP address and a UNIX socket. If
|
||||||
# listening on a UNIX socket, you MUST remove/comment the `address` key.
|
# listening on a UNIX socket, you MUST remove/comment the `address` key.
|
||||||
#
|
#
|
||||||
# Remember to make sure that your reverse proxy has access to this socket
|
# Remember to make sure that your reverse proxy has access to this socket
|
||||||
# file, either by adding your reverse proxy to the 'conduwuit' group or
|
# file, either by adding your reverse proxy to the appropriate user group
|
||||||
# granting world R/W permissions with `unix_socket_perms` (666 minimum).
|
# or granting world R/W permissions with `unix_socket_perms` (666
|
||||||
|
# minimum).
|
||||||
#
|
#
|
||||||
# example: "/run/conduwuit/conduwuit.sock"
|
# example: "/run/continuwuity/continuwuity.sock"
|
||||||
#
|
#
|
||||||
#unix_socket_path =
|
#unix_socket_path =
|
||||||
|
|
||||||
|
@ -75,23 +76,23 @@
|
||||||
#
|
#
|
||||||
#unix_socket_perms = 660
|
#unix_socket_perms = 660
|
||||||
|
|
||||||
# This is the only directory where conduwuit will save its data, including
|
# This is the only directory where continuwuity will save its data,
|
||||||
# media. Note: this was previously "/var/lib/matrix-conduit".
|
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||||
#
|
#
|
||||||
# YOU NEED TO EDIT THIS.
|
# YOU NEED TO EDIT THIS.
|
||||||
#
|
#
|
||||||
# example: "/var/lib/conduwuit"
|
# example: "/var/lib/continuwuity"
|
||||||
#
|
#
|
||||||
#database_path =
|
#database_path =
|
||||||
|
|
||||||
# conduwuit supports online database backups using RocksDB's Backup engine
|
# continuwuity supports online database backups using RocksDB's Backup
|
||||||
# API. To use this, set a database backup path that conduwuit can write
|
# engine API. To use this, set a database backup path that continuwuity
|
||||||
# to.
|
# can write to.
|
||||||
#
|
#
|
||||||
# For more information, see:
|
# For more information, see:
|
||||||
# https://conduwuit.puppyirl.gay/maintenance.html#backups
|
# https://continuwuity.org/maintenance.html#backups
|
||||||
#
|
#
|
||||||
# example: "/opt/conduwuit-db-backups"
|
# example: "/opt/continuwuity-db-backups"
|
||||||
#
|
#
|
||||||
#database_backup_path =
|
#database_backup_path =
|
||||||
|
|
||||||
|
@ -112,14 +113,14 @@
|
||||||
#
|
#
|
||||||
#new_user_displayname_suffix = "🏳️⚧️"
|
#new_user_displayname_suffix = "🏳️⚧️"
|
||||||
|
|
||||||
# If enabled, conduwuit will send a simple GET request periodically to
|
# If enabled, continuwuity will send a simple GET request periodically to
|
||||||
# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
||||||
# announcements or major updates. This is not an update check endpoint.
|
# announcements or major updates. This is not an update check endpoint.
|
||||||
#
|
#
|
||||||
#allow_announcements_check = true
|
#allow_announcements_check = true
|
||||||
|
|
||||||
# Set this to any float value to multiply conduwuit's in-memory LRU caches
|
# Set this to any float value to multiply continuwuity's in-memory LRU
|
||||||
# with such as "auth_chain_cache_capacity".
|
# caches with such as "auth_chain_cache_capacity".
|
||||||
#
|
#
|
||||||
# May be useful if you have significant memory to spare to increase
|
# May be useful if you have significant memory to spare to increase
|
||||||
# performance.
|
# performance.
|
||||||
|
@ -131,7 +132,7 @@
|
||||||
#
|
#
|
||||||
#cache_capacity_modifier = 1.0
|
#cache_capacity_modifier = 1.0
|
||||||
|
|
||||||
# Set this to any float value in megabytes for conduwuit to tell the
|
# Set this to any float value in megabytes for continuwuity to tell the
|
||||||
# database engine that this much memory is available for database read
|
# database engine that this much memory is available for database read
|
||||||
# caches.
|
# caches.
|
||||||
#
|
#
|
||||||
|
@ -145,7 +146,7 @@
|
||||||
#
|
#
|
||||||
#db_cache_capacity_mb = varies by system
|
#db_cache_capacity_mb = varies by system
|
||||||
|
|
||||||
# Set this to any float value in megabytes for conduwuit to tell the
|
# Set this to any float value in megabytes for continuwuity to tell the
|
||||||
# database engine that this much memory is available for database write
|
# database engine that this much memory is available for database write
|
||||||
# caches.
|
# caches.
|
||||||
#
|
#
|
||||||
|
@ -250,9 +251,9 @@
|
||||||
# Enable using *only* TCP for querying your specified nameservers instead
|
# Enable using *only* TCP for querying your specified nameservers instead
|
||||||
# of UDP.
|
# of UDP.
|
||||||
#
|
#
|
||||||
# If you are running conduwuit in a container environment, this config
|
# If you are running continuwuity in a container environment, this config
|
||||||
# option may need to be enabled. For more details, see:
|
# option may need to be enabled. For more details, see:
|
||||||
# https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker
|
# https://continuwuity.org/troubleshooting.html#potential-dns-issues-when-using-docker
|
||||||
#
|
#
|
||||||
#query_over_tcp_only = false
|
#query_over_tcp_only = false
|
||||||
|
|
||||||
|
@ -418,9 +419,9 @@
|
||||||
# tokens. Multiple tokens can be added if you separate them with
|
# tokens. Multiple tokens can be added if you separate them with
|
||||||
# whitespace
|
# whitespace
|
||||||
#
|
#
|
||||||
# conduwuit must be able to access the file, and it must not be empty
|
# continuwuity must be able to access the file, and it must not be empty
|
||||||
#
|
#
|
||||||
# example: "/etc/conduwuit/.reg_token"
|
# example: "/etc/continuwuity/.reg_token"
|
||||||
#
|
#
|
||||||
#registration_token_file =
|
#registration_token_file =
|
||||||
|
|
||||||
|
@ -512,16 +513,16 @@
|
||||||
#allow_room_creation = true
|
#allow_room_creation = true
|
||||||
|
|
||||||
# Set to false to disable users from joining or creating room versions
|
# Set to false to disable users from joining or creating room versions
|
||||||
# that aren't officially supported by conduwuit.
|
# that aren't officially supported by continuwuity.
|
||||||
#
|
#
|
||||||
# conduwuit officially supports room versions 6 - 11.
|
# continuwuity officially supports room versions 6 - 11.
|
||||||
#
|
#
|
||||||
# conduwuit has slightly experimental (though works fine in practice)
|
# continuwuity has slightly experimental (though works fine in practice)
|
||||||
# support for versions 3 - 5.
|
# support for versions 3 - 5.
|
||||||
#
|
#
|
||||||
#allow_unstable_room_versions = true
|
#allow_unstable_room_versions = true
|
||||||
|
|
||||||
# Default room version conduwuit will create rooms with.
|
# Default room version continuwuity will create rooms with.
|
||||||
#
|
#
|
||||||
# Per spec, room version 11 is the default.
|
# Per spec, room version 11 is the default.
|
||||||
#
|
#
|
||||||
|
@ -587,7 +588,7 @@
|
||||||
# Servers listed here will be used to gather public keys of other servers
|
# Servers listed here will be used to gather public keys of other servers
|
||||||
# (notary trusted key servers).
|
# (notary trusted key servers).
|
||||||
#
|
#
|
||||||
# Currently, conduwuit doesn't support inbound batched key requests, so
|
# Currently, continuwuity doesn't support inbound batched key requests, so
|
||||||
# this list should only contain other Synapse servers.
|
# this list should only contain other Synapse servers.
|
||||||
#
|
#
|
||||||
# example: ["matrix.org", "tchncs.de"]
|
# example: ["matrix.org", "tchncs.de"]
|
||||||
|
@ -628,7 +629,7 @@
|
||||||
#
|
#
|
||||||
#trusted_server_batch_size = 1024
|
#trusted_server_batch_size = 1024
|
||||||
|
|
||||||
# Max log level for conduwuit. Allows debug, info, warn, or error.
|
# Max log level for continuwuity. Allows debug, info, warn, or error.
|
||||||
#
|
#
|
||||||
# See also:
|
# See also:
|
||||||
# https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
# https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||||
|
@ -649,8 +650,9 @@
|
||||||
#
|
#
|
||||||
#log_span_events = "none"
|
#log_span_events = "none"
|
||||||
|
|
||||||
# Configures whether CONDUWUIT_LOG EnvFilter matches values using regular
|
# Configures whether CONTINUWUITY_LOG EnvFilter matches values using
|
||||||
# expressions. See the tracing_subscriber documentation on Directives.
|
# regular expressions. See the tracing_subscriber documentation on
|
||||||
|
# Directives.
|
||||||
#
|
#
|
||||||
#log_filter_regex = true
|
#log_filter_regex = true
|
||||||
|
|
||||||
|
@ -718,7 +720,7 @@
|
||||||
# This takes priority over "turn_secret" first, and falls back to
|
# This takes priority over "turn_secret" first, and falls back to
|
||||||
# "turn_secret" if invalid or failed to open.
|
# "turn_secret" if invalid or failed to open.
|
||||||
#
|
#
|
||||||
# example: "/etc/conduwuit/.turn_secret"
|
# example: "/etc/continuwuity/.turn_secret"
|
||||||
#
|
#
|
||||||
#turn_secret_file =
|
#turn_secret_file =
|
||||||
|
|
||||||
|
@ -726,12 +728,12 @@
|
||||||
#
|
#
|
||||||
#turn_ttl = 86400
|
#turn_ttl = 86400
|
||||||
|
|
||||||
# List/vector of room IDs or room aliases that conduwuit will make newly
|
# List/vector of room IDs or room aliases that continuwuity will make
|
||||||
# registered users join. The rooms specified must be rooms that you have
|
# newly registered users join. The rooms specified must be rooms that you
|
||||||
# joined at least once on the server, and must be public.
|
# have joined at least once on the server, and must be public.
|
||||||
#
|
#
|
||||||
# example: ["#conduwuit:puppygock.gay",
|
# example: ["#continuwuity:continuwuity.org",
|
||||||
# "!eoIzvAvVwY23LPDay8:puppygock.gay"]
|
# "!main-1:continuwuity.org"]
|
||||||
#
|
#
|
||||||
#auto_join_rooms = []
|
#auto_join_rooms = []
|
||||||
|
|
||||||
|
@ -754,10 +756,10 @@
|
||||||
#
|
#
|
||||||
#auto_deactivate_banned_room_attempts = false
|
#auto_deactivate_banned_room_attempts = false
|
||||||
|
|
||||||
# RocksDB log level. This is not the same as conduwuit's log level. This
|
# RocksDB log level. This is not the same as continuwuity's log level.
|
||||||
# is the log level for the RocksDB engine/library which show up in your
|
# This is the log level for the RocksDB engine/library which show up in
|
||||||
# database folder/path as `LOG` files. conduwuit will log RocksDB errors
|
# your database folder/path as `LOG` files. continuwuity will log RocksDB
|
||||||
# as normal through tracing or panics if severe for safety.
|
# errors as normal through tracing or panics if severe for safety.
|
||||||
#
|
#
|
||||||
#rocksdb_log_level = "error"
|
#rocksdb_log_level = "error"
|
||||||
|
|
||||||
|
@ -777,7 +779,7 @@
|
||||||
# Set this to true to use RocksDB config options that are tailored to HDDs
|
# Set this to true to use RocksDB config options that are tailored to HDDs
|
||||||
# (slower device storage).
|
# (slower device storage).
|
||||||
#
|
#
|
||||||
# It is worth noting that by default, conduwuit will use RocksDB with
|
# It is worth noting that by default, continuwuity will use RocksDB with
|
||||||
# Direct IO enabled. *Generally* speaking this improves performance as it
|
# Direct IO enabled. *Generally* speaking this improves performance as it
|
||||||
# bypasses buffered I/O (system page cache). However there is a potential
|
# bypasses buffered I/O (system page cache). However there is a potential
|
||||||
# chance that Direct IO may cause issues with database operations if your
|
# chance that Direct IO may cause issues with database operations if your
|
||||||
|
@ -785,7 +787,7 @@
|
||||||
# possibly ZFS filesystem. RocksDB generally deals/corrects these issues
|
# possibly ZFS filesystem. RocksDB generally deals/corrects these issues
|
||||||
# but it cannot account for all setups. If you experience any weird
|
# but it cannot account for all setups. If you experience any weird
|
||||||
# RocksDB issues, try enabling this option as it turns off Direct IO and
|
# RocksDB issues, try enabling this option as it turns off Direct IO and
|
||||||
# feel free to report in the conduwuit Matrix room if this option fixes
|
# feel free to report in the continuwuity Matrix room if this option fixes
|
||||||
# your DB issues.
|
# your DB issues.
|
||||||
#
|
#
|
||||||
# For more information, see:
|
# For more information, see:
|
||||||
|
@ -840,7 +842,7 @@
|
||||||
# as they all differ. See their `kDefaultCompressionLevel`.
|
# as they all differ. See their `kDefaultCompressionLevel`.
|
||||||
#
|
#
|
||||||
# Note when using the default value we may override it with a setting
|
# Note when using the default value we may override it with a setting
|
||||||
# tailored specifically conduwuit.
|
# tailored specifically for continuwuity.
|
||||||
#
|
#
|
||||||
#rocksdb_compression_level = 32767
|
#rocksdb_compression_level = 32767
|
||||||
|
|
||||||
|
@ -856,7 +858,7 @@
|
||||||
# algorithm.
|
# algorithm.
|
||||||
#
|
#
|
||||||
# Note when using the default value we may override it with a setting
|
# Note when using the default value we may override it with a setting
|
||||||
# tailored specifically conduwuit.
|
# tailored specifically for continuwuity.
|
||||||
#
|
#
|
||||||
#rocksdb_bottommost_compression_level = 32767
|
#rocksdb_bottommost_compression_level = 32767
|
||||||
|
|
||||||
|
@ -896,13 +898,13 @@
|
||||||
# 0 = AbsoluteConsistency
|
# 0 = AbsoluteConsistency
|
||||||
# 1 = TolerateCorruptedTailRecords (default)
|
# 1 = TolerateCorruptedTailRecords (default)
|
||||||
# 2 = PointInTime (use me if trying to recover)
|
# 2 = PointInTime (use me if trying to recover)
|
||||||
# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty)
|
# 3 = SkipAnyCorruptedRecord (you now voided your Continuwuity warranty)
|
||||||
#
|
#
|
||||||
# For more information on these modes, see:
|
# For more information on these modes, see:
|
||||||
# https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes
|
# https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes
|
||||||
#
|
#
|
||||||
# For more details on recovering a corrupt database, see:
|
# For more details on recovering a corrupt database, see:
|
||||||
# https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption
|
# https://continuwuity.org/troubleshooting.html#database-corruption
|
||||||
#
|
#
|
||||||
#rocksdb_recovery_mode = 1
|
#rocksdb_recovery_mode = 1
|
||||||
|
|
||||||
|
@ -942,7 +944,7 @@
|
||||||
# - Disabling repair mode and restarting the server is recommended after
|
# - Disabling repair mode and restarting the server is recommended after
|
||||||
# running the repair.
|
# running the repair.
|
||||||
#
|
#
|
||||||
# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database.
|
# See https://continuwuity.org/troubleshooting.html#database-corruption for more details on recovering a corrupt database.
|
||||||
#
|
#
|
||||||
#rocksdb_repair = false
|
#rocksdb_repair = false
|
||||||
|
|
||||||
|
@ -969,7 +971,7 @@
|
||||||
# Enables RocksDB compaction. You should never ever have to set this
|
# Enables RocksDB compaction. You should never ever have to set this
|
||||||
# option to false. If you for some reason find yourself needing to use
|
# option to false. If you for some reason find yourself needing to use
|
||||||
# this option as part of troubleshooting or a bug, please reach out to us
|
# this option as part of troubleshooting or a bug, please reach out to us
|
||||||
# in the conduwuit Matrix room with information and details.
|
# in the continuwuity Matrix room with information and details.
|
||||||
#
|
#
|
||||||
# Disabling compaction will lead to a significantly bloated and
|
# Disabling compaction will lead to a significantly bloated and
|
||||||
# explosively large database, gradually poor performance, unnecessarily
|
# explosively large database, gradually poor performance, unnecessarily
|
||||||
|
@ -995,7 +997,7 @@
|
||||||
# purposes such as recovering/recreating your admin room, or inviting
|
# purposes such as recovering/recreating your admin room, or inviting
|
||||||
# yourself back.
|
# yourself back.
|
||||||
#
|
#
|
||||||
# See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room.
|
# See https://continuwuity.org/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room.
|
||||||
#
|
#
|
||||||
# Once this password is unset, all sessions will be logged out for
|
# Once this password is unset, all sessions will be logged out for
|
||||||
# security purposes.
|
# security purposes.
|
||||||
|
@ -1010,8 +1012,8 @@
|
||||||
|
|
||||||
# Allow local (your server only) presence updates/requests.
|
# Allow local (your server only) presence updates/requests.
|
||||||
#
|
#
|
||||||
# Note that presence on conduwuit is very fast unlike Synapse's. If using
|
# Note that presence on continuwuity is very fast unlike Synapse's. If
|
||||||
# outgoing presence, this MUST be enabled.
|
# using outgoing presence, this MUST be enabled.
|
||||||
#
|
#
|
||||||
#allow_local_presence = true
|
#allow_local_presence = true
|
||||||
|
|
||||||
|
@ -1019,7 +1021,7 @@
|
||||||
#
|
#
|
||||||
# This option receives presence updates from other servers, but does not
|
# This option receives presence updates from other servers, but does not
|
||||||
# send any unless `allow_outgoing_presence` is true. Note that presence on
|
# send any unless `allow_outgoing_presence` is true. Note that presence on
|
||||||
# conduwuit is very fast unlike Synapse's.
|
# continuwuity is very fast unlike Synapse's.
|
||||||
#
|
#
|
||||||
#allow_incoming_presence = true
|
#allow_incoming_presence = true
|
||||||
|
|
||||||
|
@ -1027,8 +1029,8 @@
|
||||||
#
|
#
|
||||||
# This option sends presence updates to other servers, but does not
|
# This option sends presence updates to other servers, but does not
|
||||||
# receive any unless `allow_incoming_presence` is true. Note that presence
|
# receive any unless `allow_incoming_presence` is true. Note that presence
|
||||||
# on conduwuit is very fast unlike Synapse's. If using outgoing presence,
|
# on continuwuity is very fast unlike Synapse's. If using outgoing
|
||||||
# you MUST enable `allow_local_presence` as well.
|
# presence, you MUST enable `allow_local_presence` as well.
|
||||||
#
|
#
|
||||||
#allow_outgoing_presence = true
|
#allow_outgoing_presence = true
|
||||||
|
|
||||||
|
@ -1081,8 +1083,8 @@
|
||||||
#
|
#
|
||||||
#typing_client_timeout_max_s = 45
|
#typing_client_timeout_max_s = 45
|
||||||
|
|
||||||
# Set this to true for conduwuit to compress HTTP response bodies using
|
# Set this to true for continuwuity to compress HTTP response bodies using
|
||||||
# zstd. This option does nothing if conduwuit was not built with
|
# zstd. This option does nothing if continuwuity was not built with
|
||||||
# `zstd_compression` feature. Please be aware that enabling HTTP
|
# `zstd_compression` feature. Please be aware that enabling HTTP
|
||||||
# compression may weaken TLS. Most users should not need to enable this.
|
# compression may weaken TLS. Most users should not need to enable this.
|
||||||
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
||||||
|
@ -1090,8 +1092,8 @@
|
||||||
#
|
#
|
||||||
#zstd_compression = false
|
#zstd_compression = false
|
||||||
|
|
||||||
# Set this to true for conduwuit to compress HTTP response bodies using
|
# Set this to true for continuwuity to compress HTTP response bodies using
|
||||||
# gzip. This option does nothing if conduwuit was not built with
|
# gzip. This option does nothing if continuwuity was not built with
|
||||||
# `gzip_compression` feature. Please be aware that enabling HTTP
|
# `gzip_compression` feature. Please be aware that enabling HTTP
|
||||||
# compression may weaken TLS. Most users should not need to enable this.
|
# compression may weaken TLS. Most users should not need to enable this.
|
||||||
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before
|
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before
|
||||||
|
@ -1102,8 +1104,8 @@
|
||||||
#
|
#
|
||||||
#gzip_compression = false
|
#gzip_compression = false
|
||||||
|
|
||||||
# Set this to true for conduwuit to compress HTTP response bodies using
|
# Set this to true for continuwuity to compress HTTP response bodies using
|
||||||
# brotli. This option does nothing if conduwuit was not built with
|
# brotli. This option does nothing if continuwuity was not built with
|
||||||
# `brotli_compression` feature. Please be aware that enabling HTTP
|
# `brotli_compression` feature. Please be aware that enabling HTTP
|
||||||
# compression may weaken TLS. Most users should not need to enable this.
|
# compression may weaken TLS. Most users should not need to enable this.
|
||||||
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
# See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
||||||
|
@ -1165,7 +1167,7 @@
|
||||||
# Otherwise setting this to false reduces filesystem clutter and overhead
|
# Otherwise setting this to false reduces filesystem clutter and overhead
|
||||||
# for managing these symlinks in the directory. This is now disabled by
|
# for managing these symlinks in the directory. This is now disabled by
|
||||||
# default. You may still return to upstream Conduit but you have to run
|
# default. You may still return to upstream Conduit but you have to run
|
||||||
# conduwuit at least once with this set to true and allow the
|
# continuwuity at least once with this set to true and allow the
|
||||||
# media_startup_check to take place before shutting down to return to
|
# media_startup_check to take place before shutting down to return to
|
||||||
# Conduit.
|
# Conduit.
|
||||||
#
|
#
|
||||||
|
@ -1210,8 +1212,8 @@
|
||||||
#
|
#
|
||||||
#allowed_remote_server_names = []
|
#allowed_remote_server_names = []
|
||||||
|
|
||||||
# Vector list of regex patterns of server names that conduwuit will refuse
|
# Vector list of regex patterns of server names that continuwuity will
|
||||||
# to download remote media from.
|
# refuse to download remote media from.
|
||||||
#
|
#
|
||||||
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||||
#
|
#
|
||||||
|
@ -1225,7 +1227,7 @@
|
||||||
#
|
#
|
||||||
#forbidden_remote_room_directory_server_names = []
|
#forbidden_remote_room_directory_server_names = []
|
||||||
|
|
||||||
# Vector list of regex patterns of server names that conduwuit will not
|
# Vector list of regex patterns of server names that continuwuity will not
|
||||||
# send messages to the client from.
|
# send messages to the client from.
|
||||||
#
|
#
|
||||||
# Note that there is no way for clients to receive messages once a server
|
# Note that there is no way for clients to receive messages once a server
|
||||||
|
@ -1249,7 +1251,7 @@
|
||||||
#send_messages_from_ignored_users_to_client = false
|
#send_messages_from_ignored_users_to_client = false
|
||||||
|
|
||||||
# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
# Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
||||||
# do not want conduwuit to send outbound requests to. Defaults to
|
# do not want continuwuity to send outbound requests to. Defaults to
|
||||||
# RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
# RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
||||||
# security.
|
# security.
|
||||||
#
|
#
|
||||||
|
@ -1399,26 +1401,26 @@
|
||||||
|
|
||||||
# Allow admins to enter commands in rooms other than "#admins" (admin
|
# Allow admins to enter commands in rooms other than "#admins" (admin
|
||||||
# room) by prefixing your message with "\!admin" or "\\!admin" followed up
|
# room) by prefixing your message with "\!admin" or "\\!admin" followed up
|
||||||
# a normal conduwuit admin command. The reply will be publicly visible to
|
# a normal continuwuity admin command. The reply will be publicly visible
|
||||||
# the room, originating from the sender.
|
# to the room, originating from the sender.
|
||||||
#
|
#
|
||||||
# example: \\!admin debug ping puppygock.gay
|
# example: \\!admin debug ping puppygock.gay
|
||||||
#
|
#
|
||||||
#admin_escape_commands = true
|
#admin_escape_commands = true
|
||||||
|
|
||||||
# Automatically activate the conduwuit admin room console / CLI on
|
# Automatically activate the continuwuity admin room console / CLI on
|
||||||
# startup. This option can also be enabled with `--console` conduwuit
|
# startup. This option can also be enabled with `--console` continuwuity
|
||||||
# argument.
|
# argument.
|
||||||
#
|
#
|
||||||
#admin_console_automatic = false
|
#admin_console_automatic = false
|
||||||
|
|
||||||
# List of admin commands to execute on startup.
|
# List of admin commands to execute on startup.
|
||||||
#
|
#
|
||||||
# This option can also be configured with the `--execute` conduwuit
|
# This option can also be configured with the `--execute` continuwuity
|
||||||
# argument and can take standard shell commands and environment variables
|
# argument and can take standard shell commands and environment variables
|
||||||
#
|
#
|
||||||
# For example: `./conduwuit --execute "server admin-notice conduwuit has
|
# For example: `./continuwuity --execute "server admin-notice continuwuity
|
||||||
# started up at $(date)"`
|
# has started up at $(date)"`
|
||||||
#
|
#
|
||||||
# example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]`
|
# example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]`
|
||||||
#
|
#
|
||||||
|
@ -1426,7 +1428,7 @@
|
||||||
|
|
||||||
# Ignore errors in startup commands.
|
# Ignore errors in startup commands.
|
||||||
#
|
#
|
||||||
# If false, conduwuit will error and fail to start if an admin execute
|
# If false, continuwuity will error and fail to start if an admin execute
|
||||||
# command (`--execute` / `admin_execute`) fails.
|
# command (`--execute` / `admin_execute`) fails.
|
||||||
#
|
#
|
||||||
#admin_execute_errors_ignore = false
|
#admin_execute_errors_ignore = false
|
||||||
|
@ -1447,15 +1449,14 @@
|
||||||
# The default room tag to apply on the admin room.
|
# The default room tag to apply on the admin room.
|
||||||
#
|
#
|
||||||
# On some clients like Element, the room tag "m.server_notice" is a
|
# On some clients like Element, the room tag "m.server_notice" is a
|
||||||
# special pinned room at the very bottom of your room list. The conduwuit
|
# special pinned room at the very bottom of your room list. The
|
||||||
# admin room can be pinned here so you always have an easy-to-access
|
# continuwuity admin room can be pinned here so you always have an
|
||||||
# shortcut dedicated to your admin room.
|
# easy-to-access shortcut dedicated to your admin room.
|
||||||
#
|
#
|
||||||
#admin_room_tag = "m.server_notice"
|
#admin_room_tag = "m.server_notice"
|
||||||
|
|
||||||
# Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
|
# Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
|
||||||
# This is NOT enabled by default. conduwuit's default Sentry reporting
|
# This is NOT enabled by default.
|
||||||
# endpoint domain is `o4506996327251968.ingest.us.sentry.io`.
|
|
||||||
#
|
#
|
||||||
#sentry = false
|
#sentry = false
|
||||||
|
|
||||||
|
@ -1463,7 +1464,7 @@
|
||||||
#
|
#
|
||||||
#sentry_endpoint = ""
|
#sentry_endpoint = ""
|
||||||
|
|
||||||
# Report your conduwuit server_name in Sentry.io crash reports and
|
# Report your continuwuity server_name in Sentry.io crash reports and
|
||||||
# metrics.
|
# metrics.
|
||||||
#
|
#
|
||||||
#sentry_send_server_name = false
|
#sentry_send_server_name = false
|
||||||
|
@ -1500,7 +1501,7 @@
|
||||||
# Enable the tokio-console. This option is only relevant to developers.
|
# Enable the tokio-console. This option is only relevant to developers.
|
||||||
#
|
#
|
||||||
# For more information, see:
|
# For more information, see:
|
||||||
# https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console
|
# https://continuwuity.org/development.html#debugging-with-tokio-console
|
||||||
#
|
#
|
||||||
#tokio_console = false
|
#tokio_console = false
|
||||||
|
|
||||||
|
@ -1640,19 +1641,29 @@
|
||||||
#
|
#
|
||||||
#server =
|
#server =
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# URL to a support page for the server, which will be served as part of
|
||||||
|
# the MSC1929 server support endpoint at /.well-known/matrix/support.
|
||||||
|
# Will be included alongside any contact information
|
||||||
#
|
#
|
||||||
#support_page =
|
#support_page =
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# Role string for server support contacts, to be served as part of the
|
||||||
|
# MSC1929 server support endpoint at /.well-known/matrix/support.
|
||||||
#
|
#
|
||||||
#support_role =
|
#support_role = "m.role.admin"
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# Email address for server support contacts, to be served as part of the
|
||||||
|
# MSC1929 server support endpoint.
|
||||||
|
# This will be used along with support_mxid if specified.
|
||||||
#
|
#
|
||||||
#support_email =
|
#support_email =
|
||||||
|
|
||||||
# This item is undocumented. Please contribute documentation for it.
|
# Matrix ID for server support contacts, to be served as part of the
|
||||||
|
# MSC1929 server support endpoint.
|
||||||
|
# This will be used along with support_email if specified.
|
||||||
|
#
|
||||||
|
# If no email or mxid is specified, all of the server's admins will be
|
||||||
|
# listed.
|
||||||
#
|
#
|
||||||
#support_mxid =
|
#support_mxid =
|
||||||
|
|
||||||
|
|
4
debian/README.md
vendored
4
debian/README.md
vendored
|
@ -1,4 +1,4 @@
|
||||||
# conduwuit for Debian
|
# Continuwuity for Debian
|
||||||
|
|
||||||
Information about downloading and deploying the Debian package. This may also be
|
Information about downloading and deploying the Debian package. This may also be
|
||||||
referenced for other `apt`-based distros such as Ubuntu.
|
referenced for other `apt`-based distros such as Ubuntu.
|
||||||
|
@ -22,7 +22,7 @@ options in `/etc/conduwuit/conduwuit.toml`.
|
||||||
|
|
||||||
### Running
|
### Running
|
||||||
|
|
||||||
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop conduwuit. The binary is installed at `/usr/sbin/conduwuit`.
|
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary is installed at `/usr/sbin/conduwuit`.
|
||||||
|
|
||||||
This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate.
|
This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate.
|
||||||
|
|
||||||
|
|
7
debian/conduwuit.service
vendored
7
debian/conduwuit.service
vendored
|
@ -1,9 +1,10 @@
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=conduwuit Matrix homeserver
|
|
||||||
|
Description=Continuwuity - Matrix homeserver
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
|
Documentation=https://continuwuity.org/
|
||||||
Alias=matrix-conduwuit.service
|
Alias=matrix-conduwuit.service
|
||||||
Documentation=https://conduwuit.puppyirl.gay/
|
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
DynamicUser=yes
|
DynamicUser=yes
|
||||||
|
@ -11,7 +12,7 @@ User=conduwuit
|
||||||
Group=conduwuit
|
Group=conduwuit
|
||||||
Type=notify
|
Type=notify
|
||||||
|
|
||||||
Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml"
|
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||||
|
|
||||||
ExecStart=/usr/sbin/conduwuit
|
ExecStart=/usr/sbin/conduwuit
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,7 @@ apt-get update && apt-get install -y \
|
||||||
|
|
||||||
# Create symlinks for LLVM tools
|
# Create symlinks for LLVM tools
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
|
set -o xtrace
|
||||||
# clang
|
# clang
|
||||||
ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang
|
ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang
|
||||||
ln -s "/usr/bin/clang++-${LLVM_VERSION}" "/usr/bin/clang++"
|
ln -s "/usr/bin/clang++-${LLVM_VERSION}" "/usr/bin/clang++"
|
||||||
|
@ -46,6 +47,7 @@ ENV LDDTREE_VERSION=0.3.7
|
||||||
|
|
||||||
# Install unpackaged tools
|
# Install unpackaged tools
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
|
set -o xtrace
|
||||||
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||||
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
||||||
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
||||||
|
@ -75,6 +77,7 @@ RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
|
||||||
|
|
||||||
# Configure pkg-config
|
# Configure pkg-config
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
|
set -o xtrace
|
||||||
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
||||||
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
||||||
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
|
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
|
||||||
|
@ -82,12 +85,14 @@ EOF
|
||||||
|
|
||||||
# Configure cc to use clang version
|
# Configure cc to use clang version
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
|
set -o xtrace
|
||||||
echo "CC=clang" >> /etc/environment
|
echo "CC=clang" >> /etc/environment
|
||||||
echo "CXX=clang++" >> /etc/environment
|
echo "CXX=clang++" >> /etc/environment
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Cross-language LTO
|
# Cross-language LTO
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
|
set -o xtrace
|
||||||
echo "CFLAGS=-flto" >> /etc/environment
|
echo "CFLAGS=-flto" >> /etc/environment
|
||||||
echo "CXXFLAGS=-flto" >> /etc/environment
|
echo "CXXFLAGS=-flto" >> /etc/environment
|
||||||
# Linker is set to target-compatible clang by xx
|
# Linker is set to target-compatible clang by xx
|
||||||
|
@ -98,6 +103,7 @@ EOF
|
||||||
ARG TARGET_CPU=
|
ARG TARGET_CPU=
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
set -o allexport
|
set -o allexport
|
||||||
|
set -o xtrace
|
||||||
. /etc/environment
|
. /etc/environment
|
||||||
if [ -n "${TARGET_CPU}" ]; then
|
if [ -n "${TARGET_CPU}" ]; then
|
||||||
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||||
|
@ -111,31 +117,37 @@ RUN mkdir /out
|
||||||
|
|
||||||
FROM toolchain AS builder
|
FROM toolchain AS builder
|
||||||
|
|
||||||
# Conduwuit version info
|
|
||||||
ARG COMMIT_SHA=
|
|
||||||
ARG CONDUWUIT_VERSION_EXTRA=
|
|
||||||
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
|
|
||||||
RUN <<EOF
|
|
||||||
if [ -z "${CONDUWUIT_VERSION_EXTRA}" ]; then
|
|
||||||
echo "CONDUWUIT_VERSION_EXTRA='$(set -e; git rev-parse --short ${COMMIT_SHA:-HEAD} || echo unknown revision)'" >> /etc/environment
|
|
||||||
fi
|
|
||||||
EOF
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
|
|
||||||
# Verify environment configuration
|
|
||||||
RUN cat /etc/environment
|
|
||||||
RUN xx-cargo --print-target-triple
|
|
||||||
|
|
||||||
# Get source
|
# Get source
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
# Verify environment configuration
|
||||||
|
RUN xx-cargo --print-target-triple
|
||||||
|
|
||||||
|
# Conduwuit version info
|
||||||
|
ARG GIT_COMMIT_HASH=
|
||||||
|
ARG GIT_COMMIT_HASH_SHORT=
|
||||||
|
ARG GIT_REMOTE_URL=
|
||||||
|
ARG GIT_REMOTE_COMMIT_URL=
|
||||||
|
ARG CONDUWUIT_VERSION_EXTRA=
|
||||||
|
ARG CONTINUWUITY_VERSION_EXTRA=
|
||||||
|
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
|
||||||
|
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
|
||||||
|
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
|
||||||
|
ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
|
||||||
|
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
|
||||||
|
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
|
||||||
|
|
||||||
|
|
||||||
# Build the binary
|
# Build the binary
|
||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||||
--mount=type=cache,target=/app/target \
|
--mount=type=cache,target=/app/target,id=cargo-target-${TARGETPLATFORM} \
|
||||||
bash <<'EOF'
|
bash <<'EOF'
|
||||||
set -o allexport
|
set -o allexport
|
||||||
|
set -o xtrace
|
||||||
. /etc/environment
|
. /etc/environment
|
||||||
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
||||||
jq -r ".target_directory"))
|
jq -r ".target_directory"))
|
||||||
|
@ -156,6 +168,7 @@ EOF
|
||||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||||
bash <<'EOF'
|
bash <<'EOF'
|
||||||
|
set -o xtrace
|
||||||
mkdir /out/sbom
|
mkdir /out/sbom
|
||||||
typeset -A PACKAGES
|
typeset -A PACKAGES
|
||||||
for BINARY in /out/sbin/*; do
|
for BINARY in /out/sbin/*; do
|
||||||
|
@ -174,6 +187,7 @@ EOF
|
||||||
|
|
||||||
# Extract dynamically linked dependencies
|
# Extract dynamically linked dependencies
|
||||||
RUN <<EOF
|
RUN <<EOF
|
||||||
|
set -o xtrace
|
||||||
mkdir /out/libs
|
mkdir /out/libs
|
||||||
mkdir /out/libs-root
|
mkdir /out/libs-root
|
||||||
for BINARY in /out/sbin/*; do
|
for BINARY in /out/sbin/*; do
|
||||||
|
|
|
@ -20,3 +20,4 @@
|
||||||
- [Testing](development/testing.md)
|
- [Testing](development/testing.md)
|
||||||
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
- [Hot Reloading ("Live" Development)](development/hot_reload.md)
|
||||||
- [Community (and Guidelines)](community.md)
|
- [Community (and Guidelines)](community.md)
|
||||||
|
- [Security](security.md)
|
||||||
|
|
|
@ -7,30 +7,30 @@ services:
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/conduwuit
|
- db:/var/lib/continuwuity
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||||
networks:
|
networks:
|
||||||
- proxy
|
- proxy
|
||||||
environment:
|
environment:
|
||||||
CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS
|
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||||
CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label
|
CONTINUWUITY_PORT: 6167 # should match the loadbalancer traefik label
|
||||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUWUIT_LOG: warn,state_res=warn
|
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||||
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
|
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||||
|
|
||||||
# We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN
|
# We need some way to serve the client and server .well-known json. The simplest way is via the CONTINUWUITY_WELL_KNOWN
|
||||||
# variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate
|
# variable / config option, there are multiple ways to do this, e.g. in the continuwuity.toml file, and in a separate
|
||||||
# see the override file for more information about delegation
|
# see the override file for more information about delegation
|
||||||
CONDUWUIT_WELL_KNOWN: |
|
CONTINUWUITY_WELL_KNOWN: |
|
||||||
{
|
{
|
||||||
client=https://your.server.name.example,
|
client=https://your.server.name.example,
|
||||||
server=your.server.name.example:443
|
server=your.server.name.example:443
|
||||||
|
|
|
@ -6,11 +6,11 @@ services:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
|
||||||
|
|
||||||
- "traefik.http.routers.to-conduwuit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Continuwuity is hosted
|
- "traefik.http.routers.to-continuwuity.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Continuwuity is hosted
|
||||||
- "traefik.http.routers.to-conduwuit.tls=true"
|
- "traefik.http.routers.to-continuwuity.tls=true"
|
||||||
- "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt"
|
- "traefik.http.routers.to-continuwuity.tls.certresolver=letsencrypt"
|
||||||
- "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker"
|
- "traefik.http.routers.to-continuwuity.middlewares=cors-headers@docker"
|
||||||
- "traefik.http.services.to_conduwuit.loadbalancer.server.port=6167"
|
- "traefik.http.services.to_continuwuity.loadbalancer.server.port=6167"
|
||||||
|
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
|
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
|
||||||
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||||
|
|
|
@ -25,23 +25,23 @@ services:
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/conduwuit
|
- db:/var/lib/continuwuity
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||||
environment:
|
environment:
|
||||||
CONDUWUIT_SERVER_NAME: example.com # EDIT THIS
|
CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS
|
||||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||||
CONDUWUIT_PORT: 6167
|
CONTINUWUITY_PORT: 6167
|
||||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUWUIT_LOG: warn,state_res=warn
|
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||||
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
|
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||||
networks:
|
networks:
|
||||||
- caddy
|
- caddy
|
||||||
labels:
|
labels:
|
||||||
|
|
|
@ -7,38 +7,38 @@ services:
|
||||||
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
image: forgejo.ellis.link/continuwuation/continuwuity:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/conduwuit
|
- db:/var/lib/continuwuity
|
||||||
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
- /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's.
|
||||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||||
networks:
|
networks:
|
||||||
- proxy
|
- proxy
|
||||||
environment:
|
environment:
|
||||||
CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS
|
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
CONDUWUIT_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this
|
CONTINUWUITY_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this
|
||||||
CONDUWUIT_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server
|
CONTINUWUITY_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server
|
||||||
#CONDUWUIT_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read
|
#CONTINUWUITY_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read
|
||||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||||
CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
|
CONTINUWUITY_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it
|
||||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||||
#CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above
|
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||||
### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too
|
### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUWUIT_LOG: info # default is: "warn,state_res=warn"
|
# CONTINUWUITY_LOG: info # default is: "warn,state_res=warn"
|
||||||
# CONDUWUIT_ALLOW_ENCRYPTION: 'true'
|
# CONTINUWUITY_ALLOW_ENCRYPTION: 'true'
|
||||||
# CONDUWUIT_ALLOW_FEDERATION: 'true'
|
# CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||||
# CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
# CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
# CONDUWUIT_ALLOW_INCOMING_PRESENCE: true
|
# CONTINUWUITY_ALLOW_INCOMING_PRESENCE: true
|
||||||
# CONDUWUIT_ALLOW_OUTGOING_PRESENCE: true
|
# CONTINUWUITY_ALLOW_OUTGOING_PRESENCE: true
|
||||||
# CONDUWUIT_ALLOW_LOCAL_PRESENCE: true
|
# CONTINUWUITY_ALLOW_LOCAL_PRESENCE: true
|
||||||
# CONDUWUIT_WORKERS: 10
|
# CONTINUWUITY_WORKERS: 10
|
||||||
# CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
# CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||||
# CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧"
|
# CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧"
|
||||||
|
|
||||||
# We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN
|
# We need some way to serve the client and server .well-known json. The simplest way is via the CONTINUWUITY_WELL_KNOWN
|
||||||
# variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate
|
# variable / config option, there are multiple ways to do this, e.g. in the continuwuity.toml file, and in a separate
|
||||||
# reverse proxy, but since you do not have a reverse proxy and following this guide, this example is included
|
# reverse proxy, but since you do not have a reverse proxy and following this guide, this example is included
|
||||||
CONDUWUIT_WELL_KNOWN: |
|
CONTINUWUITY_WELL_KNOWN: |
|
||||||
{
|
{
|
||||||
client=https://your.server.name.example,
|
client=https://your.server.name.example,
|
||||||
server=your.server.name.example:443
|
server=your.server.name.example:443
|
||||||
|
|
|
@ -9,22 +9,22 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8448:6167
|
- 8448:6167
|
||||||
volumes:
|
volumes:
|
||||||
- db:/var/lib/conduwuit
|
- db:/var/lib/continuwuity
|
||||||
#- ./conduwuit.toml:/etc/conduwuit.toml
|
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||||
environment:
|
environment:
|
||||||
CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS
|
CONTINUWUITY_SERVER_NAME: your.server.name # EDIT THIS
|
||||||
CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit
|
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||||
CONDUWUIT_PORT: 6167
|
CONTINUWUITY_PORT: 6167
|
||||||
CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB
|
||||||
CONDUWUIT_ALLOW_REGISTRATION: 'true'
|
CONTINUWUITY_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||||
#CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
#CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||||
CONDUWUIT_ALLOW_FEDERATION: 'true'
|
CONTINUWUITY_ALLOW_FEDERATION: 'true'
|
||||||
CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUWUIT_LOG: warn,state_res=warn
|
#CONTINUWUITY_LOG: warn,state_res=warn
|
||||||
CONDUWUIT_ADDRESS: 0.0.0.0
|
CONTINUWUITY_ADDRESS: 0.0.0.0
|
||||||
#CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above
|
#CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above
|
||||||
#
|
#
|
||||||
### Uncomment if you want to use your own Element-Web App.
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
### Note: You need to provide a config.json for Element and you also need a second
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
|
|
|
@ -30,16 +30,16 @@ When you have the image you can simply run it with
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -d -p 8448:6167 \
|
docker run -d -p 8448:6167 \
|
||||||
-v db:/var/lib/conduwuit/ \
|
-v db:/var/lib/continuwuity/ \
|
||||||
-e CONDUWUIT_SERVER_NAME="your.server.name" \
|
-e CONTINUWUITY_SERVER_NAME="your.server.name" \
|
||||||
-e CONDUWUIT_ALLOW_REGISTRATION=false \
|
-e CONTINUWUITY_ALLOW_REGISTRATION=false \
|
||||||
--name conduwuit $LINK
|
--name continuwuity $LINK
|
||||||
```
|
```
|
||||||
|
|
||||||
or you can use [docker compose](#docker-compose).
|
or you can use [docker compose](#docker-compose).
|
||||||
|
|
||||||
The `-d` flag lets the container run in detached mode. You may supply an
|
The `-d` flag lets the container run in detached mode. You may supply an
|
||||||
optional `conduwuit.toml` config file, the example config can be found
|
optional `continuwuity.toml` config file, the example config can be found
|
||||||
[here](../configuration/examples.md). You can pass in different env vars to
|
[here](../configuration/examples.md). You can pass in different env vars to
|
||||||
change config values on the fly. You can even configure Continuwuity completely by
|
change config values on the fly. You can even configure Continuwuity completely by
|
||||||
using env vars. For an overview of possible values, please take a look at the
|
using env vars. For an overview of possible values, please take a look at the
|
||||||
|
|
|
@ -115,7 +115,7 @@ ReadWritePaths=/path/to/custom/database/path
|
||||||
## Creating the Continuwuity configuration file
|
## Creating the Continuwuity configuration file
|
||||||
|
|
||||||
Now we need to create the Continuwuity's config file in
|
Now we need to create the Continuwuity's config file in
|
||||||
`/etc/conduwuit/conduwuit.toml`. The example config can be found at
|
`/etc/continuwuity/continuwuity.toml`. The example config can be found at
|
||||||
[conduwuit-example.toml](../configuration/examples.md).
|
[conduwuit-example.toml](../configuration/examples.md).
|
||||||
|
|
||||||
**Please take a moment to read the config. You need to change at least the
|
**Please take a moment to read the config. You need to change at least the
|
||||||
|
|
|
@ -190,7 +190,7 @@ The initial implementation PR is available [here][1].
|
||||||
- [Workspace-level metadata
|
- [Workspace-level metadata
|
||||||
(cargo-deb)](https://github.com/kornelski/cargo-deb/issues/68)
|
(cargo-deb)](https://github.com/kornelski/cargo-deb/issues/68)
|
||||||
|
|
||||||
[1]: https://github.com/girlbossceo/conduwuit/pull/387
|
[1]: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/387
|
||||||
[2]: https://wiki.musl-libc.org/functional-differences-from-glibc.html#Unloading-libraries
|
[2]: https://wiki.musl-libc.org/functional-differences-from-glibc.html#Unloading-libraries
|
||||||
[3]: https://github.com/rust-lang/rust/issues/28794
|
[3]: https://github.com/rust-lang/rust/issues/28794
|
||||||
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049
|
[4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049
|
||||||
|
|
|
@ -24,8 +24,9 @@ and run the script.
|
||||||
If you're on macOS and need to build an image, run `nix build .#linux-complement`.
|
If you're on macOS and need to build an image, run `nix build .#linux-complement`.
|
||||||
|
|
||||||
We have a Complement fork as some tests have needed to be fixed. This can be found
|
We have a Complement fork as some tests have needed to be fixed. This can be found
|
||||||
at: <https://github.com/girlbossceo/complement>
|
at: <https://forgejo.ellis.link/continuwuation/complement>
|
||||||
|
|
||||||
[ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo
|
[ci-workflows]:
|
||||||
|
https://forgejo.ellis.link/continuwuation/continuwuity/actions/?workflow=ci.yml&actor=0&status=1
|
||||||
[complement]: https://github.com/matrix-org/complement
|
[complement]: https://github.com/matrix-org/complement
|
||||||
[direnv]: https://direnv.net/docs/hook.html
|
[direnv]: https://direnv.net/docs/hook.html
|
||||||
|
|
|
@ -71,7 +71,7 @@ related to WAL tracking.
|
||||||
|
|
||||||
The only safe files that can be deleted are the `LOG` files (all caps). These
|
The only safe files that can be deleted are the `LOG` files (all caps). These
|
||||||
are the real RocksDB telemetry/log files, however Continuwuity has already
|
are the real RocksDB telemetry/log files, however Continuwuity has already
|
||||||
configured to only store up to 3 RocksDB `LOG` files due to generall being
|
configured to only store up to 3 RocksDB `LOG` files due to generally being
|
||||||
useless for average users unless troubleshooting something low-level. If you
|
useless for average users unless troubleshooting something low-level. If you
|
||||||
would like to store nearly none at all, see the `rocksdb_max_log_files`
|
would like to store nearly none at all, see the `rocksdb_max_log_files`
|
||||||
config option.
|
config option.
|
||||||
|
|
1
docs/security.md
Normal file
1
docs/security.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{{#include ../SECURITY.md}}
|
|
@ -75,9 +75,9 @@ dockerTools.buildImage {
|
||||||
else [];
|
else [];
|
||||||
|
|
||||||
Env = [
|
Env = [
|
||||||
"CONDUWUIT_TLS__KEY=${./private_key.key}"
|
"CONTINUWUITY_TLS__KEY=${./private_key.key}"
|
||||||
"CONDUWUIT_TLS__CERTS=${./certificate.crt}"
|
"CONTINUWUITY_TLS__CERTS=${./certificate.crt}"
|
||||||
"CONDUWUIT_CONFIG=${./config.toml}"
|
"CONTINUWUITY_CONFIG=${./config.toml}"
|
||||||
"RUST_BACKTRACE=full"
|
"RUST_BACKTRACE=full"
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
@ -130,7 +130,8 @@ buildDepsOnlyEnv =
|
||||||
});
|
});
|
||||||
|
|
||||||
buildPackageEnv = {
|
buildPackageEnv = {
|
||||||
CONDUWUIT_VERSION_EXTRA = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
|
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
|
||||||
|
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
|
||||||
} // buildDepsOnlyEnv // {
|
} // buildDepsOnlyEnv // {
|
||||||
# Only needed in static stdenv because these are transitive dependencies of rocksdb
|
# Only needed in static stdenv because these are transitive dependencies of rocksdb
|
||||||
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
|
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
|
||||||
|
|
|
@ -33,13 +33,13 @@ dockerTools.buildLayeredImage {
|
||||||
<jason@zemos.net>";
|
<jason@zemos.net>";
|
||||||
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
|
"org.opencontainers.image.created" ="@${toString inputs.self.lastModified}";
|
||||||
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
|
"org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust";
|
||||||
"org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/";
|
"org.opencontainers.image.documentation" = "https://continuwuity.org/";
|
||||||
"org.opencontainers.image.licenses" = "Apache-2.0";
|
"org.opencontainers.image.licenses" = "Apache-2.0";
|
||||||
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
|
"org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or "";
|
||||||
"org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit";
|
"org.opencontainers.image.source" = "https://forgejo.ellis.link/continuwuation/continuwuity";
|
||||||
"org.opencontainers.image.title" = main.pname;
|
"org.opencontainers.image.title" = main.pname;
|
||||||
"org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/";
|
"org.opencontainers.image.url" = "https://continuwuity.org/";
|
||||||
"org.opencontainers.image.vendor" = "girlbossceo";
|
"org.opencontainers.image.vendor" = "continuwuation";
|
||||||
"org.opencontainers.image.version" = main.version;
|
"org.opencontainers.image.version" = main.version;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -94,7 +94,7 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
|
||||||
#[allow(clippy::result_large_err)]
|
#[allow(clippy::result_large_err)]
|
||||||
fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
|
||||||
let link =
|
let link =
|
||||||
"Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺";
|
"Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺";
|
||||||
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
|
let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}");
|
||||||
let content = RoomMessageEventContent::notice_markdown(msg);
|
let content = RoomMessageEventContent::notice_markdown(msg);
|
||||||
error!("Panic while processing command: {error:?}");
|
error!("Panic while processing command: {error:?}");
|
||||||
|
|
|
@ -15,7 +15,7 @@ use crate::Ruma;
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v3/capabilities`
|
/// # `GET /_matrix/client/v3/capabilities`
|
||||||
///
|
///
|
||||||
/// Get information on the supported feature set and other relevent capabilities
|
/// Get information on the supported feature set and other relevant capabilities
|
||||||
/// of this server.
|
/// of this server.
|
||||||
pub(crate) async fn get_capabilities_route(
|
pub(crate) async fn get_capabilities_route(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
|
|
|
@ -2162,6 +2162,109 @@ async fn knock_room_by_id_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For knock_restricted rooms, check if the user meets the restricted conditions
|
||||||
|
// If they do, attempt to join instead of knock
|
||||||
|
// This is not mentioned in the spec, but should be allowable (we're allowed to
|
||||||
|
// auto-join invites to knocked rooms)
|
||||||
|
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||||
|
if let JoinRule::KnockRestricted(restricted) = &join_rule {
|
||||||
|
let restriction_rooms: Vec<_> = restricted
|
||||||
|
.allow
|
||||||
|
.iter()
|
||||||
|
.filter_map(|a| match a {
|
||||||
|
| AllowRule::RoomMembership(r) => Some(&r.room_id),
|
||||||
|
| _ => None,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Check if the user is in any of the allowed rooms
|
||||||
|
let mut user_meets_restrictions = false;
|
||||||
|
for restriction_room_id in &restriction_rooms {
|
||||||
|
if services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, restriction_room_id)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
user_meets_restrictions = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the user meets the restrictions, try joining instead
|
||||||
|
if user_meets_restrictions {
|
||||||
|
debug_info!(
|
||||||
|
"{sender_user} meets the restricted criteria in knock_restricted room \
|
||||||
|
{room_id}, attempting to join instead of knock"
|
||||||
|
);
|
||||||
|
// For this case, we need to drop the state lock and get a new one in
|
||||||
|
// join_room_by_id_helper We need to release the lock here and let
|
||||||
|
// join_room_by_id_helper acquire it again
|
||||||
|
drop(state_lock);
|
||||||
|
match join_room_by_id_helper(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason.clone(),
|
||||||
|
servers,
|
||||||
|
None,
|
||||||
|
&None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
| Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())),
|
||||||
|
| Err(e) => {
|
||||||
|
debug_warn!(
|
||||||
|
"Failed to convert knock to join for {sender_user} in {room_id}: {e:?}"
|
||||||
|
);
|
||||||
|
// Get a new state lock for the remaining knock logic
|
||||||
|
let new_state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||||
|
|
||||||
|
let server_in_room = services
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.server_in_room(services.globals.server_name(), room_id)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let local_knock = server_in_room
|
||||||
|
|| servers.is_empty()
|
||||||
|
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
||||||
|
|
||||||
|
if local_knock {
|
||||||
|
knock_room_helper_local(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason,
|
||||||
|
servers,
|
||||||
|
new_state_lock,
|
||||||
|
)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
knock_room_helper_remote(
|
||||||
|
services,
|
||||||
|
sender_user,
|
||||||
|
room_id,
|
||||||
|
reason,
|
||||||
|
servers,
|
||||||
|
new_state_lock,
|
||||||
|
)
|
||||||
|
.boxed()
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Ok(knock_room::v3::Response::new(room_id.to_owned()));
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||||
|
debug_warn!(
|
||||||
|
"{sender_user} attempted to knock on room {room_id} but its join rule is \
|
||||||
|
{join_rule:?}, not knock or knock_restricted"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let server_in_room = services
|
let server_in_room = services
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
|
@ -2209,6 +2312,12 @@ async fn knock_room_helper_local(
|
||||||
return Err!(Request(Forbidden("This room does not support knocking.")));
|
return Err!(Request(Forbidden("This room does not support knocking.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify that this room has a valid knock or knock_restricted join rule
|
||||||
|
let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await;
|
||||||
|
if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) {
|
||||||
|
return Err!(Request(Forbidden("This room's join rule does not allow knocking.")));
|
||||||
|
}
|
||||||
|
|
||||||
let content = RoomMemberEventContent {
|
let content = RoomMemberEventContent {
|
||||||
displayname: services.users.displayname(sender_user).await.ok(),
|
displayname: services.users.displayname(sender_user).await.ok(),
|
||||||
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
avatar_url: services.users.avatar_url(sender_user).await.ok(),
|
||||||
|
|
|
@ -143,7 +143,10 @@ pub(crate) async fn get_message_events_route(
|
||||||
if let Some(registration) = body.appservice_info.as_ref() {
|
if let Some(registration) = body.appservice_info.as_ref() {
|
||||||
<&DeviceId>::from(registration.registration.id.as_str())
|
<&DeviceId>::from(registration.registration.id.as_str())
|
||||||
} else {
|
} else {
|
||||||
panic!("No device_id provided and no appservice registration found, this should be unreachable");
|
panic!(
|
||||||
|
"No device_id provided and no appservice registration found, this \
|
||||||
|
should be unreachable"
|
||||||
|
);
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
room_id,
|
room_id,
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use axum::{Json, extract::State, response::IntoResponse};
|
use axum::{Json, extract::State, response::IntoResponse};
|
||||||
use conduwuit::{Error, Result};
|
use conduwuit::{Error, Result};
|
||||||
|
use futures::StreamExt;
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
discovery::{
|
discovery::{
|
||||||
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
|
discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo},
|
||||||
|
@ -17,7 +18,7 @@ pub(crate) async fn well_known_client(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
_body: Ruma<discover_homeserver::Request>,
|
_body: Ruma<discover_homeserver::Request>,
|
||||||
) -> Result<discover_homeserver::Response> {
|
) -> Result<discover_homeserver::Response> {
|
||||||
let client_url = match services.server.config.well_known.client.as_ref() {
|
let client_url = match services.config.well_known.client.as_ref() {
|
||||||
| Some(url) => url.to_string(),
|
| Some(url) => url.to_string(),
|
||||||
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||||
};
|
};
|
||||||
|
@ -33,44 +34,63 @@ pub(crate) async fn well_known_client(
|
||||||
/// # `GET /.well-known/matrix/support`
|
/// # `GET /.well-known/matrix/support`
|
||||||
///
|
///
|
||||||
/// Server support contact and support page of a homeserver's domain.
|
/// Server support contact and support page of a homeserver's domain.
|
||||||
|
/// Implements MSC1929 for server discovery.
|
||||||
|
/// If no configuration is set, uses admin users as contacts.
|
||||||
pub(crate) async fn well_known_support(
|
pub(crate) async fn well_known_support(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
_body: Ruma<discover_support::Request>,
|
_body: Ruma<discover_support::Request>,
|
||||||
) -> Result<discover_support::Response> {
|
) -> Result<discover_support::Response> {
|
||||||
let support_page = services
|
let support_page = services
|
||||||
.server
|
|
||||||
.config
|
.config
|
||||||
.well_known
|
.well_known
|
||||||
.support_page
|
.support_page
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(ToString::to_string);
|
.map(ToString::to_string);
|
||||||
|
|
||||||
let role = services.server.config.well_known.support_role.clone();
|
let email_address = services.config.well_known.support_email.clone();
|
||||||
|
let matrix_id = services.config.well_known.support_mxid.clone();
|
||||||
|
|
||||||
// support page or role must be either defined for this to be valid
|
// TODO: support defining multiple contacts in the config
|
||||||
if support_page.is_none() && role.is_none() {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let email_address = services.server.config.well_known.support_email.clone();
|
|
||||||
let matrix_id = services.server.config.well_known.support_mxid.clone();
|
|
||||||
|
|
||||||
// if a role is specified, an email address or matrix id is required
|
|
||||||
if role.is_some() && (email_address.is_none() && matrix_id.is_none()) {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TOOD: support defining multiple contacts in the config
|
|
||||||
let mut contacts: Vec<Contact> = vec![];
|
let mut contacts: Vec<Contact> = vec![];
|
||||||
|
|
||||||
if let Some(role) = role {
|
let role_value = services
|
||||||
let contact = Contact { role, email_address, matrix_id };
|
.config
|
||||||
|
.well_known
|
||||||
|
.support_role
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| "m.role.admin".to_owned().into());
|
||||||
|
|
||||||
contacts.push(contact);
|
// Add configured contact if at least one contact method is specified
|
||||||
|
if email_address.is_some() || matrix_id.is_some() {
|
||||||
|
contacts.push(Contact {
|
||||||
|
role: role_value.clone(),
|
||||||
|
email_address: email_address.clone(),
|
||||||
|
matrix_id: matrix_id.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to add admin users as contacts if no contacts are configured
|
||||||
|
if contacts.is_empty() {
|
||||||
|
if let Ok(admin_room) = services.admin.get_admin_room().await {
|
||||||
|
let admin_users = services.rooms.state_cache.room_members(&admin_room);
|
||||||
|
let mut stream = admin_users;
|
||||||
|
|
||||||
|
while let Some(user_id) = stream.next().await {
|
||||||
|
// Skip server user
|
||||||
|
if *user_id == services.globals.server_user {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
contacts.push(Contact {
|
||||||
|
role: role_value.clone(),
|
||||||
|
email_address: None,
|
||||||
|
matrix_id: Some(user_id.to_owned()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// support page or role+contacts must be either defined for this to be valid
|
|
||||||
if contacts.is_empty() && support_page.is_none() {
|
if contacts.is_empty() && support_page.is_none() {
|
||||||
|
// No admin room, no configured contacts, and no support page
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Not found."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,9 +104,9 @@ pub(crate) async fn well_known_support(
|
||||||
pub(crate) async fn syncv3_client_server_json(
|
pub(crate) async fn syncv3_client_server_json(
|
||||||
State(services): State<crate::State>,
|
State(services): State<crate::State>,
|
||||||
) -> Result<impl IntoResponse> {
|
) -> Result<impl IntoResponse> {
|
||||||
let server_url = match services.server.config.well_known.client.as_ref() {
|
let server_url = match services.config.well_known.client.as_ref() {
|
||||||
| Some(url) => url.to_string(),
|
| Some(url) => url.to_string(),
|
||||||
| None => match services.server.config.well_known.server.as_ref() {
|
| None => match services.config.well_known.server.as_ref() {
|
||||||
| Some(url) => url.to_string(),
|
| Some(url) => url.to_string(),
|
||||||
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
| None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
|
||||||
},
|
},
|
||||||
|
|
|
@ -3,7 +3,6 @@ mod auth;
|
||||||
mod handler;
|
mod handler;
|
||||||
mod request;
|
mod request;
|
||||||
mod response;
|
mod response;
|
||||||
pub mod state;
|
|
||||||
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
@ -13,10 +12,11 @@ use axum::{
|
||||||
routing::{any, get, post},
|
routing::{any, get, post},
|
||||||
};
|
};
|
||||||
use conduwuit::{Server, err};
|
use conduwuit::{Server, err};
|
||||||
|
pub(super) use conduwuit_service::state::State;
|
||||||
use http::{Uri, uri};
|
use http::{Uri, uri};
|
||||||
|
|
||||||
use self::handler::RouterExt;
|
use self::handler::RouterExt;
|
||||||
pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State};
|
pub(super) use self::{args::Args as Ruma, response::RumaResponse};
|
||||||
use crate::{client, server};
|
use crate::{client, server};
|
||||||
|
|
||||||
pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||||
|
|
34
src/build_metadata/Cargo.toml
Normal file
34
src/build_metadata/Cargo.toml
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
[package]
|
||||||
|
name = "conduwuit_build_metadata"
|
||||||
|
categories.workspace = true
|
||||||
|
description.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
keywords.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
version.workspace = true
|
||||||
|
|
||||||
|
|
||||||
|
build = "build.rs"
|
||||||
|
# [[bin]]
|
||||||
|
# path = "main.rs"
|
||||||
|
# name = "conduwuit_build_metadata"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
path = "mod.rs"
|
||||||
|
crate-type = [
|
||||||
|
"rlib",
|
||||||
|
# "dylib",
|
||||||
|
]
|
||||||
|
|
||||||
|
[features]
|
||||||
|
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
built = { version = "0.8", features = [] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
93
src/build_metadata/build.rs
Normal file
93
src/build_metadata/build.rs
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
fn run_git_command(args: &[&str]) -> Option<String> {
|
||||||
|
Command::new("git")
|
||||||
|
.args(args)
|
||||||
|
.output()
|
||||||
|
.ok()
|
||||||
|
.filter(|output| output.status.success())
|
||||||
|
.and_then(|output| String::from_utf8(output.stdout).ok())
|
||||||
|
.map(|s| s.trim().to_owned())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
}
|
||||||
|
fn get_env(env_var: &str) -> Option<String> {
|
||||||
|
match std::env::var(env_var) {
|
||||||
|
| Ok(val) if !val.is_empty() => Some(val),
|
||||||
|
| _ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn main() {
|
||||||
|
// built gets the default crate from the workspace. Not sure if this is intended
|
||||||
|
// behavior, but it's what we want.
|
||||||
|
built::write_built_file().expect("Failed to acquire build-time information");
|
||||||
|
|
||||||
|
// --- Git Information ---
|
||||||
|
let mut commit_hash = None;
|
||||||
|
let mut commit_hash_short = None;
|
||||||
|
let mut remote_url_web = None;
|
||||||
|
|
||||||
|
// Get full commit hash
|
||||||
|
if let Some(hash) =
|
||||||
|
get_env("GIT_COMMIT_HASH").or_else(|| run_git_command(&["rev-parse", "HEAD"]))
|
||||||
|
{
|
||||||
|
println!("cargo:rustc-env=GIT_COMMIT_HASH={hash}");
|
||||||
|
commit_hash = Some(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get short commit hash
|
||||||
|
if let Some(short_hash) = get_env("GIT_COMMIT_HASH_SHORT")
|
||||||
|
.or_else(|| run_git_command(&["rev-parse", "--short", "HEAD"]))
|
||||||
|
{
|
||||||
|
println!("cargo:rustc-env=GIT_COMMIT_HASH_SHORT={short_hash}");
|
||||||
|
commit_hash_short = Some(short_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get remote URL and convert to web URL
|
||||||
|
if let Some(remote_url_raw) = get_env("GIT_REMOTE_URL")
|
||||||
|
.or_else(|| run_git_command(&["config", "--get", "remote.origin.url"]))
|
||||||
|
{
|
||||||
|
println!("cargo:rustc-env=GIT_REMOTE_URL={remote_url_raw}");
|
||||||
|
let web_url = if remote_url_raw.starts_with("https://") {
|
||||||
|
remote_url_raw.trim_end_matches(".git").to_owned()
|
||||||
|
} else if remote_url_raw.starts_with("git@") {
|
||||||
|
remote_url_raw
|
||||||
|
.trim_end_matches(".git")
|
||||||
|
.replacen(':', "/", 1)
|
||||||
|
.replacen("git@", "https://", 1)
|
||||||
|
} else if remote_url_raw.starts_with("ssh://") {
|
||||||
|
remote_url_raw
|
||||||
|
.trim_end_matches(".git")
|
||||||
|
.replacen("git@", "", 1)
|
||||||
|
.replacen("ssh:", "https:", 1)
|
||||||
|
} else {
|
||||||
|
// Assume it's already a web URL or unknown format
|
||||||
|
remote_url_raw
|
||||||
|
};
|
||||||
|
println!("cargo:rustc-env=GIT_REMOTE_WEB_URL={web_url}");
|
||||||
|
remote_url_web = Some(web_url);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct remote commit URL
|
||||||
|
if let Some(remote_commit_url) = get_env("GIT_REMOTE_COMMIT_URL") {
|
||||||
|
println!("cargo:rustc-env=GIT_REMOTE_COMMIT_URL={remote_commit_url}");
|
||||||
|
} else if let (Some(base_url), Some(hash)) =
|
||||||
|
(&remote_url_web, commit_hash.as_ref().or(commit_hash_short.as_ref()))
|
||||||
|
{
|
||||||
|
let commit_page = format!("{base_url}/commit/{hash}");
|
||||||
|
println!("cargo:rustc-env=GIT_REMOTE_COMMIT_URL={commit_page}");
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Rerun Triggers ---
|
||||||
|
// TODO: The git rerun triggers seem to always run
|
||||||
|
// // Rerun if the git HEAD changes
|
||||||
|
// println!("cargo:rerun-if-changed=.git/HEAD");
|
||||||
|
// // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch)
|
||||||
|
// if let Some(ref_path) = run_git_command(&["symbolic-ref", "--quiet", "HEAD"])
|
||||||
|
// { println!("cargo:rerun-if-changed=.git/{ref_path}");
|
||||||
|
// }
|
||||||
|
|
||||||
|
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
|
||||||
|
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH_SHORT");
|
||||||
|
println!("cargo:rerun-if-env-changed=GIT_REMOTE_URL");
|
||||||
|
println!("cargo:rerun-if-env-changed=GIT_REMOTE_COMMIT_URL");
|
||||||
|
}
|
29
src/build_metadata/mod.rs
Normal file
29
src/build_metadata/mod.rs
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
pub mod built {
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/built.rs"));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub static GIT_COMMIT_HASH: Option<&str> = option_env!("GIT_COMMIT_HASH");
|
||||||
|
|
||||||
|
pub static GIT_COMMIT_HASH_SHORT: Option<&str> = option_env!("GIT_COMMIT_HASH_SHORT");
|
||||||
|
|
||||||
|
// this would be a lot better if Option::or was const.
|
||||||
|
pub static VERSION_EXTRA: Option<&str> =
|
||||||
|
if let v @ Some(_) = option_env!("CONTINUWUITY_VERSION_EXTRA") {
|
||||||
|
v
|
||||||
|
} else if let v @ Some(_) = option_env!("CONDUWUIT_VERSION_EXTRA") {
|
||||||
|
v
|
||||||
|
} else {
|
||||||
|
option_env!("CONDUIT_VERSION_EXTRA")
|
||||||
|
};
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
pub fn version_tag() -> Option<&'static str> {
|
||||||
|
VERSION_EXTRA
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.or(GIT_COMMIT_HASH_SHORT)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub static GIT_REMOTE_WEB_URL: Option<&str> = option_env!("GIT_REMOTE_WEB_URL");
|
||||||
|
pub static GIT_REMOTE_COMMIT_URL: Option<&str> = option_env!("GIT_REMOTE_COMMIT_URL");
|
||||||
|
|
||||||
|
// TODO: Mark dirty builds within the version string
|
|
@ -67,6 +67,7 @@ checked_ops.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
conduwuit-macros.workspace = true
|
conduwuit-macros.workspace = true
|
||||||
|
conduwuit-build-metadata.workspace = true
|
||||||
const-str.workspace = true
|
const-str.workspace = true
|
||||||
core_affinity.workspace = true
|
core_affinity.workspace = true
|
||||||
ctor.workspace = true
|
ctor.workspace = true
|
||||||
|
|
|
@ -274,6 +274,10 @@ pub fn set_dirty_decay<I: Into<Option<usize>>>(arena: I, decay_ms: isize) -> Res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn background_thread_enable(enable: bool) -> Result<bool> {
|
||||||
|
set::<u8>(&mallctl!("background_thread"), enable.into()).map(is_nonzero!())
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() }
|
pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() }
|
||||||
|
|
|
@ -118,7 +118,7 @@ pub fn check(config: &Config) -> Result {
|
||||||
if cfg!(not(debug_assertions)) && config.server_name == "your.server.name" {
|
if cfg!(not(debug_assertions)) && config.server_name == "your.server.name" {
|
||||||
return Err!(Config(
|
return Err!(Config(
|
||||||
"server_name",
|
"server_name",
|
||||||
"You must specify a valid server name for production usage of conduwuit."
|
"You must specify a valid server name for production usage of continuwuity."
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +290,7 @@ fn warn_deprecated(config: &Config) {
|
||||||
|
|
||||||
if was_deprecated {
|
if was_deprecated {
|
||||||
warn!(
|
warn!(
|
||||||
"Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check your \
|
"Read continuwuity config documentation at https://continuwuity.org/configuration.html and check your \
|
||||||
configuration if any new configuration parameters should be adjusted"
|
configuration if any new configuration parameters should be adjusted"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ use self::proxy::ProxyConfig;
|
||||||
pub use self::{check::check, manager::Manager};
|
pub use self::{check::check, manager::Manager};
|
||||||
use crate::{Result, err, error::Error, utils::sys};
|
use crate::{Result, err, error::Error, utils::sys};
|
||||||
|
|
||||||
/// All the config options for conduwuit.
|
/// All the config options for continuwuity.
|
||||||
#[allow(clippy::struct_excessive_bools)]
|
#[allow(clippy::struct_excessive_bools)]
|
||||||
#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)]
|
#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)]
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
@ -35,7 +35,7 @@ use crate::{Result, err, error::Error, utils::sys};
|
||||||
filename = "conduwuit-example.toml",
|
filename = "conduwuit-example.toml",
|
||||||
section = "global",
|
section = "global",
|
||||||
undocumented = "# This item is undocumented. Please contribute documentation for it.",
|
undocumented = "# This item is undocumented. Please contribute documentation for it.",
|
||||||
header = r#"### conduwuit Configuration
|
header = r#"### continuwuity Configuration
|
||||||
###
|
###
|
||||||
### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE
|
### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE
|
||||||
### OVERWRITTEN!
|
### OVERWRITTEN!
|
||||||
|
@ -50,7 +50,7 @@ use crate::{Result, err, error::Error, utils::sys};
|
||||||
### that say "YOU NEED TO EDIT THIS".
|
### that say "YOU NEED TO EDIT THIS".
|
||||||
###
|
###
|
||||||
### For more information, see:
|
### For more information, see:
|
||||||
### https://conduwuit.puppyirl.gay/configuration.html
|
### https://continuwuity.org/configuration.html
|
||||||
"#,
|
"#,
|
||||||
ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure"
|
ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure"
|
||||||
)]
|
)]
|
||||||
|
@ -59,7 +59,7 @@ pub struct Config {
|
||||||
/// suffix for user and room IDs/aliases.
|
/// suffix for user and room IDs/aliases.
|
||||||
///
|
///
|
||||||
/// See the docs for reverse proxying and delegation:
|
/// See the docs for reverse proxying and delegation:
|
||||||
/// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy
|
/// https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy
|
||||||
///
|
///
|
||||||
/// Also see the `[global.well_known]` config section at the very bottom.
|
/// Also see the `[global.well_known]` config section at the very bottom.
|
||||||
///
|
///
|
||||||
|
@ -70,10 +70,10 @@ pub struct Config {
|
||||||
/// YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE
|
/// YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE
|
||||||
/// WIPE.
|
/// WIPE.
|
||||||
///
|
///
|
||||||
/// example: "conduwuit.woof"
|
/// example: "continuwuity.org"
|
||||||
pub server_name: OwnedServerName,
|
pub server_name: OwnedServerName,
|
||||||
|
|
||||||
/// The default address (IPv4 or IPv6) conduwuit will listen on.
|
/// The default address (IPv4 or IPv6) continuwuity will listen on.
|
||||||
///
|
///
|
||||||
/// If you are using Docker or a container NAT networking setup, this must
|
/// If you are using Docker or a container NAT networking setup, this must
|
||||||
/// be "0.0.0.0".
|
/// be "0.0.0.0".
|
||||||
|
@ -85,10 +85,10 @@ pub struct Config {
|
||||||
#[serde(default = "default_address")]
|
#[serde(default = "default_address")]
|
||||||
address: ListeningAddr,
|
address: ListeningAddr,
|
||||||
|
|
||||||
/// The port(s) conduwuit will listen on.
|
/// The port(s) continuwuity will listen on.
|
||||||
///
|
///
|
||||||
/// For reverse proxying, see:
|
/// For reverse proxying, see:
|
||||||
/// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy
|
/// https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy
|
||||||
///
|
///
|
||||||
/// If you are using Docker, don't change this, you'll need to map an
|
/// If you are using Docker, don't change this, you'll need to map an
|
||||||
/// external port to this.
|
/// external port to this.
|
||||||
|
@ -103,16 +103,17 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub tls: TlsConfig,
|
pub tls: TlsConfig,
|
||||||
|
|
||||||
/// The UNIX socket conduwuit will listen on.
|
/// The UNIX socket continuwuity will listen on.
|
||||||
///
|
///
|
||||||
/// conduwuit cannot listen on both an IP address and a UNIX socket. If
|
/// continuwuity cannot listen on both an IP address and a UNIX socket. If
|
||||||
/// listening on a UNIX socket, you MUST remove/comment the `address` key.
|
/// listening on a UNIX socket, you MUST remove/comment the `address` key.
|
||||||
///
|
///
|
||||||
/// Remember to make sure that your reverse proxy has access to this socket
|
/// Remember to make sure that your reverse proxy has access to this socket
|
||||||
/// file, either by adding your reverse proxy to the 'conduwuit' group or
|
/// file, either by adding your reverse proxy to the appropriate user group
|
||||||
/// granting world R/W permissions with `unix_socket_perms` (666 minimum).
|
/// or granting world R/W permissions with `unix_socket_perms` (666
|
||||||
|
/// minimum).
|
||||||
///
|
///
|
||||||
/// example: "/run/conduwuit/conduwuit.sock"
|
/// example: "/run/continuwuity/continuwuity.sock"
|
||||||
pub unix_socket_path: Option<PathBuf>,
|
pub unix_socket_path: Option<PathBuf>,
|
||||||
|
|
||||||
/// The default permissions (in octal) to create the UNIX socket with.
|
/// The default permissions (in octal) to create the UNIX socket with.
|
||||||
|
@ -121,22 +122,22 @@ pub struct Config {
|
||||||
#[serde(default = "default_unix_socket_perms")]
|
#[serde(default = "default_unix_socket_perms")]
|
||||||
pub unix_socket_perms: u32,
|
pub unix_socket_perms: u32,
|
||||||
|
|
||||||
/// This is the only directory where conduwuit will save its data, including
|
/// This is the only directory where continuwuity will save its data,
|
||||||
/// media. Note: this was previously "/var/lib/matrix-conduit".
|
/// including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||||
///
|
///
|
||||||
/// YOU NEED TO EDIT THIS.
|
/// YOU NEED TO EDIT THIS.
|
||||||
///
|
///
|
||||||
/// example: "/var/lib/conduwuit"
|
/// example: "/var/lib/continuwuity"
|
||||||
pub database_path: PathBuf,
|
pub database_path: PathBuf,
|
||||||
|
|
||||||
/// conduwuit supports online database backups using RocksDB's Backup engine
|
/// continuwuity supports online database backups using RocksDB's Backup
|
||||||
/// API. To use this, set a database backup path that conduwuit can write
|
/// engine API. To use this, set a database backup path that continuwuity
|
||||||
/// to.
|
/// can write to.
|
||||||
///
|
///
|
||||||
/// For more information, see:
|
/// For more information, see:
|
||||||
/// https://conduwuit.puppyirl.gay/maintenance.html#backups
|
/// https://continuwuity.org/maintenance.html#backups
|
||||||
///
|
///
|
||||||
/// example: "/opt/conduwuit-db-backups"
|
/// example: "/opt/continuwuity-db-backups"
|
||||||
pub database_backup_path: Option<PathBuf>,
|
pub database_backup_path: Option<PathBuf>,
|
||||||
|
|
||||||
/// The amount of online RocksDB database backups to keep/retain, if using
|
/// The amount of online RocksDB database backups to keep/retain, if using
|
||||||
|
@ -160,7 +161,7 @@ pub struct Config {
|
||||||
#[serde(default = "default_new_user_displayname_suffix")]
|
#[serde(default = "default_new_user_displayname_suffix")]
|
||||||
pub new_user_displayname_suffix: String,
|
pub new_user_displayname_suffix: String,
|
||||||
|
|
||||||
/// If enabled, conduwuit will send a simple GET request periodically to
|
/// If enabled, continuwuity will send a simple GET request periodically to
|
||||||
/// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
/// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new
|
||||||
/// announcements or major updates. This is not an update check endpoint.
|
/// announcements or major updates. This is not an update check endpoint.
|
||||||
///
|
///
|
||||||
|
@ -168,8 +169,8 @@ pub struct Config {
|
||||||
#[serde(alias = "allow_check_for_updates", default = "true_fn")]
|
#[serde(alias = "allow_check_for_updates", default = "true_fn")]
|
||||||
pub allow_announcements_check: bool,
|
pub allow_announcements_check: bool,
|
||||||
|
|
||||||
/// Set this to any float value to multiply conduwuit's in-memory LRU caches
|
/// Set this to any float value to multiply continuwuity's in-memory LRU
|
||||||
/// with such as "auth_chain_cache_capacity".
|
/// caches with such as "auth_chain_cache_capacity".
|
||||||
///
|
///
|
||||||
/// May be useful if you have significant memory to spare to increase
|
/// May be useful if you have significant memory to spare to increase
|
||||||
/// performance.
|
/// performance.
|
||||||
|
@ -186,7 +187,7 @@ pub struct Config {
|
||||||
)]
|
)]
|
||||||
pub cache_capacity_modifier: f64,
|
pub cache_capacity_modifier: f64,
|
||||||
|
|
||||||
/// Set this to any float value in megabytes for conduwuit to tell the
|
/// Set this to any float value in megabytes for continuwuity to tell the
|
||||||
/// database engine that this much memory is available for database read
|
/// database engine that this much memory is available for database read
|
||||||
/// caches.
|
/// caches.
|
||||||
///
|
///
|
||||||
|
@ -202,7 +203,7 @@ pub struct Config {
|
||||||
#[serde(default = "default_db_cache_capacity_mb")]
|
#[serde(default = "default_db_cache_capacity_mb")]
|
||||||
pub db_cache_capacity_mb: f64,
|
pub db_cache_capacity_mb: f64,
|
||||||
|
|
||||||
/// Set this to any float value in megabytes for conduwuit to tell the
|
/// Set this to any float value in megabytes for continuwuity to tell the
|
||||||
/// database engine that this much memory is available for database write
|
/// database engine that this much memory is available for database write
|
||||||
/// caches.
|
/// caches.
|
||||||
///
|
///
|
||||||
|
@ -319,9 +320,9 @@ pub struct Config {
|
||||||
/// Enable using *only* TCP for querying your specified nameservers instead
|
/// Enable using *only* TCP for querying your specified nameservers instead
|
||||||
/// of UDP.
|
/// of UDP.
|
||||||
///
|
///
|
||||||
/// If you are running conduwuit in a container environment, this config
|
/// If you are running continuwuity in a container environment, this config
|
||||||
/// option may need to be enabled. For more details, see:
|
/// option may need to be enabled. For more details, see:
|
||||||
/// https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker
|
/// https://continuwuity.org/troubleshooting.html#potential-dns-issues-when-using-docker
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub query_over_tcp_only: bool,
|
pub query_over_tcp_only: bool,
|
||||||
|
|
||||||
|
@ -534,9 +535,9 @@ pub struct Config {
|
||||||
/// tokens. Multiple tokens can be added if you separate them with
|
/// tokens. Multiple tokens can be added if you separate them with
|
||||||
/// whitespace
|
/// whitespace
|
||||||
///
|
///
|
||||||
/// conduwuit must be able to access the file, and it must not be empty
|
/// continuwuity must be able to access the file, and it must not be empty
|
||||||
///
|
///
|
||||||
/// example: "/etc/conduwuit/.reg_token"
|
/// example: "/etc/continuwuity/.reg_token"
|
||||||
pub registration_token_file: Option<PathBuf>,
|
pub registration_token_file: Option<PathBuf>,
|
||||||
|
|
||||||
/// Controls whether encrypted rooms and events are allowed.
|
/// Controls whether encrypted rooms and events are allowed.
|
||||||
|
@ -627,16 +628,16 @@ pub struct Config {
|
||||||
pub allow_room_creation: bool,
|
pub allow_room_creation: bool,
|
||||||
|
|
||||||
/// Set to false to disable users from joining or creating room versions
|
/// Set to false to disable users from joining or creating room versions
|
||||||
/// that aren't officially supported by conduwuit.
|
/// that aren't officially supported by continuwuity.
|
||||||
///
|
///
|
||||||
/// conduwuit officially supports room versions 6 - 11.
|
/// continuwuity officially supports room versions 6 - 11.
|
||||||
///
|
///
|
||||||
/// conduwuit has slightly experimental (though works fine in practice)
|
/// continuwuity has slightly experimental (though works fine in practice)
|
||||||
/// support for versions 3 - 5.
|
/// support for versions 3 - 5.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_unstable_room_versions: bool,
|
pub allow_unstable_room_versions: bool,
|
||||||
|
|
||||||
/// Default room version conduwuit will create rooms with.
|
/// Default room version continuwuity will create rooms with.
|
||||||
///
|
///
|
||||||
/// Per spec, room version 11 is the default.
|
/// Per spec, room version 11 is the default.
|
||||||
///
|
///
|
||||||
|
@ -710,7 +711,7 @@ pub struct Config {
|
||||||
/// Servers listed here will be used to gather public keys of other servers
|
/// Servers listed here will be used to gather public keys of other servers
|
||||||
/// (notary trusted key servers).
|
/// (notary trusted key servers).
|
||||||
///
|
///
|
||||||
/// Currently, conduwuit doesn't support inbound batched key requests, so
|
/// Currently, continuwuity doesn't support inbound batched key requests, so
|
||||||
/// this list should only contain other Synapse servers.
|
/// this list should only contain other Synapse servers.
|
||||||
///
|
///
|
||||||
/// example: ["matrix.org", "tchncs.de"]
|
/// example: ["matrix.org", "tchncs.de"]
|
||||||
|
@ -755,7 +756,7 @@ pub struct Config {
|
||||||
#[serde(default = "default_trusted_server_batch_size")]
|
#[serde(default = "default_trusted_server_batch_size")]
|
||||||
pub trusted_server_batch_size: usize,
|
pub trusted_server_batch_size: usize,
|
||||||
|
|
||||||
/// Max log level for conduwuit. Allows debug, info, warn, or error.
|
/// Max log level for continuwuity. Allows debug, info, warn, or error.
|
||||||
///
|
///
|
||||||
/// See also:
|
/// See also:
|
||||||
/// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
/// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
|
||||||
|
@ -780,8 +781,9 @@ pub struct Config {
|
||||||
#[serde(default = "default_log_span_events")]
|
#[serde(default = "default_log_span_events")]
|
||||||
pub log_span_events: String,
|
pub log_span_events: String,
|
||||||
|
|
||||||
/// Configures whether CONDUWUIT_LOG EnvFilter matches values using regular
|
/// Configures whether CONTINUWUITY_LOG EnvFilter matches values using
|
||||||
/// expressions. See the tracing_subscriber documentation on Directives.
|
/// regular expressions. See the tracing_subscriber documentation on
|
||||||
|
/// Directives.
|
||||||
///
|
///
|
||||||
/// default: true
|
/// default: true
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
|
@ -863,7 +865,7 @@ pub struct Config {
|
||||||
/// This takes priority over "turn_secret" first, and falls back to
|
/// This takes priority over "turn_secret" first, and falls back to
|
||||||
/// "turn_secret" if invalid or failed to open.
|
/// "turn_secret" if invalid or failed to open.
|
||||||
///
|
///
|
||||||
/// example: "/etc/conduwuit/.turn_secret"
|
/// example: "/etc/continuwuity/.turn_secret"
|
||||||
pub turn_secret_file: Option<PathBuf>,
|
pub turn_secret_file: Option<PathBuf>,
|
||||||
|
|
||||||
/// TURN TTL, in seconds.
|
/// TURN TTL, in seconds.
|
||||||
|
@ -872,12 +874,12 @@ pub struct Config {
|
||||||
#[serde(default = "default_turn_ttl")]
|
#[serde(default = "default_turn_ttl")]
|
||||||
pub turn_ttl: u64,
|
pub turn_ttl: u64,
|
||||||
|
|
||||||
/// List/vector of room IDs or room aliases that conduwuit will make newly
|
/// List/vector of room IDs or room aliases that continuwuity will make
|
||||||
/// registered users join. The rooms specified must be rooms that you have
|
/// newly registered users join. The rooms specified must be rooms that you
|
||||||
/// joined at least once on the server, and must be public.
|
/// have joined at least once on the server, and must be public.
|
||||||
///
|
///
|
||||||
/// example: ["#conduwuit:puppygock.gay",
|
/// example: ["#continuwuity:continuwuity.org",
|
||||||
/// "!eoIzvAvVwY23LPDay8:puppygock.gay"]
|
/// "!main-1:continuwuity.org"]
|
||||||
///
|
///
|
||||||
/// default: []
|
/// default: []
|
||||||
#[serde(default = "Vec::new")]
|
#[serde(default = "Vec::new")]
|
||||||
|
@ -902,10 +904,10 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub auto_deactivate_banned_room_attempts: bool,
|
pub auto_deactivate_banned_room_attempts: bool,
|
||||||
|
|
||||||
/// RocksDB log level. This is not the same as conduwuit's log level. This
|
/// RocksDB log level. This is not the same as continuwuity's log level.
|
||||||
/// is the log level for the RocksDB engine/library which show up in your
|
/// This is the log level for the RocksDB engine/library which show up in
|
||||||
/// database folder/path as `LOG` files. conduwuit will log RocksDB errors
|
/// your database folder/path as `LOG` files. continuwuity will log RocksDB
|
||||||
/// as normal through tracing or panics if severe for safety.
|
/// errors as normal through tracing or panics if severe for safety.
|
||||||
///
|
///
|
||||||
/// default: "error"
|
/// default: "error"
|
||||||
#[serde(default = "default_rocksdb_log_level")]
|
#[serde(default = "default_rocksdb_log_level")]
|
||||||
|
@ -930,7 +932,7 @@ pub struct Config {
|
||||||
/// Set this to true to use RocksDB config options that are tailored to HDDs
|
/// Set this to true to use RocksDB config options that are tailored to HDDs
|
||||||
/// (slower device storage).
|
/// (slower device storage).
|
||||||
///
|
///
|
||||||
/// It is worth noting that by default, conduwuit will use RocksDB with
|
/// It is worth noting that by default, continuwuity will use RocksDB with
|
||||||
/// Direct IO enabled. *Generally* speaking this improves performance as it
|
/// Direct IO enabled. *Generally* speaking this improves performance as it
|
||||||
/// bypasses buffered I/O (system page cache). However there is a potential
|
/// bypasses buffered I/O (system page cache). However there is a potential
|
||||||
/// chance that Direct IO may cause issues with database operations if your
|
/// chance that Direct IO may cause issues with database operations if your
|
||||||
|
@ -938,7 +940,7 @@ pub struct Config {
|
||||||
/// possibly ZFS filesystem. RocksDB generally deals/corrects these issues
|
/// possibly ZFS filesystem. RocksDB generally deals/corrects these issues
|
||||||
/// but it cannot account for all setups. If you experience any weird
|
/// but it cannot account for all setups. If you experience any weird
|
||||||
/// RocksDB issues, try enabling this option as it turns off Direct IO and
|
/// RocksDB issues, try enabling this option as it turns off Direct IO and
|
||||||
/// feel free to report in the conduwuit Matrix room if this option fixes
|
/// feel free to report in the continuwuity Matrix room if this option fixes
|
||||||
/// your DB issues.
|
/// your DB issues.
|
||||||
///
|
///
|
||||||
/// For more information, see:
|
/// For more information, see:
|
||||||
|
@ -999,7 +1001,7 @@ pub struct Config {
|
||||||
/// as they all differ. See their `kDefaultCompressionLevel`.
|
/// as they all differ. See their `kDefaultCompressionLevel`.
|
||||||
///
|
///
|
||||||
/// Note when using the default value we may override it with a setting
|
/// Note when using the default value we may override it with a setting
|
||||||
/// tailored specifically conduwuit.
|
/// tailored specifically for continuwuity.
|
||||||
///
|
///
|
||||||
/// default: 32767
|
/// default: 32767
|
||||||
#[serde(default = "default_rocksdb_compression_level")]
|
#[serde(default = "default_rocksdb_compression_level")]
|
||||||
|
@ -1017,7 +1019,7 @@ pub struct Config {
|
||||||
/// algorithm.
|
/// algorithm.
|
||||||
///
|
///
|
||||||
/// Note when using the default value we may override it with a setting
|
/// Note when using the default value we may override it with a setting
|
||||||
/// tailored specifically conduwuit.
|
/// tailored specifically for continuwuity.
|
||||||
///
|
///
|
||||||
/// default: 32767
|
/// default: 32767
|
||||||
#[serde(default = "default_rocksdb_bottommost_compression_level")]
|
#[serde(default = "default_rocksdb_bottommost_compression_level")]
|
||||||
|
@ -1059,13 +1061,13 @@ pub struct Config {
|
||||||
/// 0 = AbsoluteConsistency
|
/// 0 = AbsoluteConsistency
|
||||||
/// 1 = TolerateCorruptedTailRecords (default)
|
/// 1 = TolerateCorruptedTailRecords (default)
|
||||||
/// 2 = PointInTime (use me if trying to recover)
|
/// 2 = PointInTime (use me if trying to recover)
|
||||||
/// 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty)
|
/// 3 = SkipAnyCorruptedRecord (you now voided your Continuwuity warranty)
|
||||||
///
|
///
|
||||||
/// For more information on these modes, see:
|
/// For more information on these modes, see:
|
||||||
/// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes
|
/// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes
|
||||||
///
|
///
|
||||||
/// For more details on recovering a corrupt database, see:
|
/// For more details on recovering a corrupt database, see:
|
||||||
/// https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption
|
/// https://continuwuity.org/troubleshooting.html#database-corruption
|
||||||
///
|
///
|
||||||
/// default: 1
|
/// default: 1
|
||||||
#[serde(default = "default_rocksdb_recovery_mode")]
|
#[serde(default = "default_rocksdb_recovery_mode")]
|
||||||
|
@ -1109,7 +1111,7 @@ pub struct Config {
|
||||||
/// - Disabling repair mode and restarting the server is recommended after
|
/// - Disabling repair mode and restarting the server is recommended after
|
||||||
/// running the repair.
|
/// running the repair.
|
||||||
///
|
///
|
||||||
/// See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database.
|
/// See https://continuwuity.org/troubleshooting.html#database-corruption for more details on recovering a corrupt database.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub rocksdb_repair: bool,
|
pub rocksdb_repair: bool,
|
||||||
|
|
||||||
|
@ -1134,7 +1136,7 @@ pub struct Config {
|
||||||
/// Enables RocksDB compaction. You should never ever have to set this
|
/// Enables RocksDB compaction. You should never ever have to set this
|
||||||
/// option to false. If you for some reason find yourself needing to use
|
/// option to false. If you for some reason find yourself needing to use
|
||||||
/// this option as part of troubleshooting or a bug, please reach out to us
|
/// this option as part of troubleshooting or a bug, please reach out to us
|
||||||
/// in the conduwuit Matrix room with information and details.
|
/// in the continuwuity Matrix room with information and details.
|
||||||
///
|
///
|
||||||
/// Disabling compaction will lead to a significantly bloated and
|
/// Disabling compaction will lead to a significantly bloated and
|
||||||
/// explosively large database, gradually poor performance, unnecessarily
|
/// explosively large database, gradually poor performance, unnecessarily
|
||||||
|
@ -1162,7 +1164,7 @@ pub struct Config {
|
||||||
/// purposes such as recovering/recreating your admin room, or inviting
|
/// purposes such as recovering/recreating your admin room, or inviting
|
||||||
/// yourself back.
|
/// yourself back.
|
||||||
///
|
///
|
||||||
/// See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room.
|
/// See https://continuwuity.org/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room.
|
||||||
///
|
///
|
||||||
/// Once this password is unset, all sessions will be logged out for
|
/// Once this password is unset, all sessions will be logged out for
|
||||||
/// security purposes.
|
/// security purposes.
|
||||||
|
@ -1178,8 +1180,8 @@ pub struct Config {
|
||||||
|
|
||||||
/// Allow local (your server only) presence updates/requests.
|
/// Allow local (your server only) presence updates/requests.
|
||||||
///
|
///
|
||||||
/// Note that presence on conduwuit is very fast unlike Synapse's. If using
|
/// Note that presence on continuwuity is very fast unlike Synapse's. If
|
||||||
/// outgoing presence, this MUST be enabled.
|
/// using outgoing presence, this MUST be enabled.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_local_presence: bool,
|
pub allow_local_presence: bool,
|
||||||
|
|
||||||
|
@ -1187,7 +1189,7 @@ pub struct Config {
|
||||||
///
|
///
|
||||||
/// This option receives presence updates from other servers, but does not
|
/// This option receives presence updates from other servers, but does not
|
||||||
/// send any unless `allow_outgoing_presence` is true. Note that presence on
|
/// send any unless `allow_outgoing_presence` is true. Note that presence on
|
||||||
/// conduwuit is very fast unlike Synapse's.
|
/// continuwuity is very fast unlike Synapse's.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_incoming_presence: bool,
|
pub allow_incoming_presence: bool,
|
||||||
|
|
||||||
|
@ -1195,8 +1197,8 @@ pub struct Config {
|
||||||
///
|
///
|
||||||
/// This option sends presence updates to other servers, but does not
|
/// This option sends presence updates to other servers, but does not
|
||||||
/// receive any unless `allow_incoming_presence` is true. Note that presence
|
/// receive any unless `allow_incoming_presence` is true. Note that presence
|
||||||
/// on conduwuit is very fast unlike Synapse's. If using outgoing presence,
|
/// on continuwuity is very fast unlike Synapse's. If using outgoing
|
||||||
/// you MUST enable `allow_local_presence` as well.
|
/// presence, you MUST enable `allow_local_presence` as well.
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub allow_outgoing_presence: bool,
|
pub allow_outgoing_presence: bool,
|
||||||
|
|
||||||
|
@ -1259,8 +1261,8 @@ pub struct Config {
|
||||||
#[serde(default = "default_typing_client_timeout_max_s")]
|
#[serde(default = "default_typing_client_timeout_max_s")]
|
||||||
pub typing_client_timeout_max_s: u64,
|
pub typing_client_timeout_max_s: u64,
|
||||||
|
|
||||||
/// Set this to true for conduwuit to compress HTTP response bodies using
|
/// Set this to true for continuwuity to compress HTTP response bodies using
|
||||||
/// zstd. This option does nothing if conduwuit was not built with
|
/// zstd. This option does nothing if continuwuity was not built with
|
||||||
/// `zstd_compression` feature. Please be aware that enabling HTTP
|
/// `zstd_compression` feature. Please be aware that enabling HTTP
|
||||||
/// compression may weaken TLS. Most users should not need to enable this.
|
/// compression may weaken TLS. Most users should not need to enable this.
|
||||||
/// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
/// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
||||||
|
@ -1268,8 +1270,8 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub zstd_compression: bool,
|
pub zstd_compression: bool,
|
||||||
|
|
||||||
/// Set this to true for conduwuit to compress HTTP response bodies using
|
/// Set this to true for continuwuity to compress HTTP response bodies using
|
||||||
/// gzip. This option does nothing if conduwuit was not built with
|
/// gzip. This option does nothing if continuwuity was not built with
|
||||||
/// `gzip_compression` feature. Please be aware that enabling HTTP
|
/// `gzip_compression` feature. Please be aware that enabling HTTP
|
||||||
/// compression may weaken TLS. Most users should not need to enable this.
|
/// compression may weaken TLS. Most users should not need to enable this.
|
||||||
/// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before
|
/// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before
|
||||||
|
@ -1280,8 +1282,8 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub gzip_compression: bool,
|
pub gzip_compression: bool,
|
||||||
|
|
||||||
/// Set this to true for conduwuit to compress HTTP response bodies using
|
/// Set this to true for continuwuity to compress HTTP response bodies using
|
||||||
/// brotli. This option does nothing if conduwuit was not built with
|
/// brotli. This option does nothing if continuwuity was not built with
|
||||||
/// `brotli_compression` feature. Please be aware that enabling HTTP
|
/// `brotli_compression` feature. Please be aware that enabling HTTP
|
||||||
/// compression may weaken TLS. Most users should not need to enable this.
|
/// compression may weaken TLS. Most users should not need to enable this.
|
||||||
/// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
/// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH
|
||||||
|
@ -1342,7 +1344,7 @@ pub struct Config {
|
||||||
/// Otherwise setting this to false reduces filesystem clutter and overhead
|
/// Otherwise setting this to false reduces filesystem clutter and overhead
|
||||||
/// for managing these symlinks in the directory. This is now disabled by
|
/// for managing these symlinks in the directory. This is now disabled by
|
||||||
/// default. You may still return to upstream Conduit but you have to run
|
/// default. You may still return to upstream Conduit but you have to run
|
||||||
/// conduwuit at least once with this set to true and allow the
|
/// continuwuity at least once with this set to true and allow the
|
||||||
/// media_startup_check to take place before shutting down to return to
|
/// media_startup_check to take place before shutting down to return to
|
||||||
/// Conduit.
|
/// Conduit.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
|
@ -1391,8 +1393,8 @@ pub struct Config {
|
||||||
#[serde(default, with = "serde_regex")]
|
#[serde(default, with = "serde_regex")]
|
||||||
pub allowed_remote_server_names: RegexSet,
|
pub allowed_remote_server_names: RegexSet,
|
||||||
|
|
||||||
/// Vector list of regex patterns of server names that conduwuit will refuse
|
/// Vector list of regex patterns of server names that continuwuity will
|
||||||
/// to download remote media from.
|
/// refuse to download remote media from.
|
||||||
///
|
///
|
||||||
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
/// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"]
|
||||||
///
|
///
|
||||||
|
@ -1410,7 +1412,7 @@ pub struct Config {
|
||||||
#[serde(default, with = "serde_regex")]
|
#[serde(default, with = "serde_regex")]
|
||||||
pub forbidden_remote_room_directory_server_names: RegexSet,
|
pub forbidden_remote_room_directory_server_names: RegexSet,
|
||||||
|
|
||||||
/// Vector list of regex patterns of server names that conduwuit will not
|
/// Vector list of regex patterns of server names that continuwuity will not
|
||||||
/// send messages to the client from.
|
/// send messages to the client from.
|
||||||
///
|
///
|
||||||
/// Note that there is no way for clients to receive messages once a server
|
/// Note that there is no way for clients to receive messages once a server
|
||||||
|
@ -1436,7 +1438,7 @@ pub struct Config {
|
||||||
pub send_messages_from_ignored_users_to_client: bool,
|
pub send_messages_from_ignored_users_to_client: bool,
|
||||||
|
|
||||||
/// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
/// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you
|
||||||
/// do not want conduwuit to send outbound requests to. Defaults to
|
/// do not want continuwuity to send outbound requests to. Defaults to
|
||||||
/// RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
/// RFC1918, unroutable, loopback, multicast, and testnet addresses for
|
||||||
/// security.
|
/// security.
|
||||||
///
|
///
|
||||||
|
@ -1604,26 +1606,26 @@ pub struct Config {
|
||||||
|
|
||||||
/// Allow admins to enter commands in rooms other than "#admins" (admin
|
/// Allow admins to enter commands in rooms other than "#admins" (admin
|
||||||
/// room) by prefixing your message with "\!admin" or "\\!admin" followed up
|
/// room) by prefixing your message with "\!admin" or "\\!admin" followed up
|
||||||
/// a normal conduwuit admin command. The reply will be publicly visible to
|
/// a normal continuwuity admin command. The reply will be publicly visible
|
||||||
/// the room, originating from the sender.
|
/// to the room, originating from the sender.
|
||||||
///
|
///
|
||||||
/// example: \\!admin debug ping puppygock.gay
|
/// example: \\!admin debug ping puppygock.gay
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub admin_escape_commands: bool,
|
pub admin_escape_commands: bool,
|
||||||
|
|
||||||
/// Automatically activate the conduwuit admin room console / CLI on
|
/// Automatically activate the continuwuity admin room console / CLI on
|
||||||
/// startup. This option can also be enabled with `--console` conduwuit
|
/// startup. This option can also be enabled with `--console` continuwuity
|
||||||
/// argument.
|
/// argument.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub admin_console_automatic: bool,
|
pub admin_console_automatic: bool,
|
||||||
|
|
||||||
/// List of admin commands to execute on startup.
|
/// List of admin commands to execute on startup.
|
||||||
///
|
///
|
||||||
/// This option can also be configured with the `--execute` conduwuit
|
/// This option can also be configured with the `--execute` continuwuity
|
||||||
/// argument and can take standard shell commands and environment variables
|
/// argument and can take standard shell commands and environment variables
|
||||||
///
|
///
|
||||||
/// For example: `./conduwuit --execute "server admin-notice conduwuit has
|
/// For example: `./continuwuity --execute "server admin-notice continuwuity
|
||||||
/// started up at $(date)"`
|
/// has started up at $(date)"`
|
||||||
///
|
///
|
||||||
/// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]`
|
/// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]`
|
||||||
///
|
///
|
||||||
|
@ -1633,7 +1635,7 @@ pub struct Config {
|
||||||
|
|
||||||
/// Ignore errors in startup commands.
|
/// Ignore errors in startup commands.
|
||||||
///
|
///
|
||||||
/// If false, conduwuit will error and fail to start if an admin execute
|
/// If false, continuwuity will error and fail to start if an admin execute
|
||||||
/// command (`--execute` / `admin_execute`) fails.
|
/// command (`--execute` / `admin_execute`) fails.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub admin_execute_errors_ignore: bool,
|
pub admin_execute_errors_ignore: bool,
|
||||||
|
@ -1658,17 +1660,16 @@ pub struct Config {
|
||||||
/// The default room tag to apply on the admin room.
|
/// The default room tag to apply on the admin room.
|
||||||
///
|
///
|
||||||
/// On some clients like Element, the room tag "m.server_notice" is a
|
/// On some clients like Element, the room tag "m.server_notice" is a
|
||||||
/// special pinned room at the very bottom of your room list. The conduwuit
|
/// special pinned room at the very bottom of your room list. The
|
||||||
/// admin room can be pinned here so you always have an easy-to-access
|
/// continuwuity admin room can be pinned here so you always have an
|
||||||
/// shortcut dedicated to your admin room.
|
/// easy-to-access shortcut dedicated to your admin room.
|
||||||
///
|
///
|
||||||
/// default: "m.server_notice"
|
/// default: "m.server_notice"
|
||||||
#[serde(default = "default_admin_room_tag")]
|
#[serde(default = "default_admin_room_tag")]
|
||||||
pub admin_room_tag: String,
|
pub admin_room_tag: String,
|
||||||
|
|
||||||
/// Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
|
/// Sentry.io crash/panic reporting, performance monitoring/metrics, etc.
|
||||||
/// This is NOT enabled by default. conduwuit's default Sentry reporting
|
/// This is NOT enabled by default.
|
||||||
/// endpoint domain is `o4506996327251968.ingest.us.sentry.io`.
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub sentry: bool,
|
pub sentry: bool,
|
||||||
|
|
||||||
|
@ -1679,7 +1680,7 @@ pub struct Config {
|
||||||
#[serde(default = "default_sentry_endpoint")]
|
#[serde(default = "default_sentry_endpoint")]
|
||||||
pub sentry_endpoint: Option<Url>,
|
pub sentry_endpoint: Option<Url>,
|
||||||
|
|
||||||
/// Report your conduwuit server_name in Sentry.io crash reports and
|
/// Report your continuwuity server_name in Sentry.io crash reports and
|
||||||
/// metrics.
|
/// metrics.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub sentry_send_server_name: bool,
|
pub sentry_send_server_name: bool,
|
||||||
|
@ -1720,7 +1721,7 @@ pub struct Config {
|
||||||
/// Enable the tokio-console. This option is only relevant to developers.
|
/// Enable the tokio-console. This option is only relevant to developers.
|
||||||
///
|
///
|
||||||
/// For more information, see:
|
/// For more information, see:
|
||||||
/// https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console
|
/// https://continuwuity.org/development.html#debugging-with-tokio-console
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub tokio_console: bool,
|
pub tokio_console: bool,
|
||||||
|
|
||||||
|
@ -1896,12 +1897,28 @@ pub struct WellKnownConfig {
|
||||||
/// example: "matrix.example.com:443"
|
/// example: "matrix.example.com:443"
|
||||||
pub server: Option<OwnedServerName>,
|
pub server: Option<OwnedServerName>,
|
||||||
|
|
||||||
|
/// URL to a support page for the server, which will be served as part of
|
||||||
|
/// the MSC1929 server support endpoint at /.well-known/matrix/support.
|
||||||
|
/// Will be included alongside any contact information
|
||||||
pub support_page: Option<Url>,
|
pub support_page: Option<Url>,
|
||||||
|
|
||||||
|
/// Role string for server support contacts, to be served as part of the
|
||||||
|
/// MSC1929 server support endpoint at /.well-known/matrix/support.
|
||||||
|
///
|
||||||
|
/// default: "m.role.admin"
|
||||||
pub support_role: Option<ContactRole>,
|
pub support_role: Option<ContactRole>,
|
||||||
|
|
||||||
|
/// Email address for server support contacts, to be served as part of the
|
||||||
|
/// MSC1929 server support endpoint.
|
||||||
|
/// This will be used along with support_mxid if specified.
|
||||||
pub support_email: Option<String>,
|
pub support_email: Option<String>,
|
||||||
|
|
||||||
|
/// Matrix ID for server support contacts, to be served as part of the
|
||||||
|
/// MSC1929 server support endpoint.
|
||||||
|
/// This will be used along with support_email if specified.
|
||||||
|
///
|
||||||
|
/// If no email or mxid is specified, all of the server's admins will be
|
||||||
|
/// listed.
|
||||||
pub support_mxid: Option<OwnedUserId>,
|
pub support_mxid: Option<OwnedUserId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1962,7 +1979,11 @@ impl Config {
|
||||||
where
|
where
|
||||||
I: Iterator<Item = &'a Path>,
|
I: Iterator<Item = &'a Path>,
|
||||||
{
|
{
|
||||||
let envs = [Env::var("CONDUIT_CONFIG"), Env::var("CONDUWUIT_CONFIG")];
|
let envs = [
|
||||||
|
Env::var("CONDUIT_CONFIG"),
|
||||||
|
Env::var("CONDUWUIT_CONFIG"),
|
||||||
|
Env::var("CONTINUWUITY_CONFIG"),
|
||||||
|
];
|
||||||
|
|
||||||
let config = envs
|
let config = envs
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -1971,7 +1992,8 @@ impl Config {
|
||||||
.chain(paths.map(Toml::file))
|
.chain(paths.map(Toml::file))
|
||||||
.fold(Figment::new(), |config, file| config.merge(file.nested()))
|
.fold(Figment::new(), |config, file| config.merge(file.nested()))
|
||||||
.merge(Env::prefixed("CONDUIT_").global().split("__"))
|
.merge(Env::prefixed("CONDUIT_").global().split("__"))
|
||||||
.merge(Env::prefixed("CONDUWUIT_").global().split("__"));
|
.merge(Env::prefixed("CONDUWUIT_").global().split("__"))
|
||||||
|
.merge(Env::prefixed("CONTINUWUITY_").global().split("__"));
|
||||||
|
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ const MAIN_MANIFEST: &'static str = ();
|
||||||
/// For *enabled* features see the info::rustc module instead.
|
/// For *enabled* features see the info::rustc module instead.
|
||||||
static FEATURES: OnceLock<Vec<String>> = OnceLock::new();
|
static FEATURES: OnceLock<Vec<String>> = OnceLock::new();
|
||||||
|
|
||||||
/// Processed list of dependencies. This is generated from the datas captured in
|
/// Processed list of dependencies. This is generated from the data captured in
|
||||||
/// the MANIFEST.
|
/// the MANIFEST.
|
||||||
static DEPENDENCIES: OnceLock<DepsSet> = OnceLock::new();
|
static DEPENDENCIES: OnceLock<DepsSet> = OnceLock::new();
|
||||||
|
|
||||||
|
|
|
@ -26,13 +26,6 @@ pub fn user_agent() -> &'static str { USER_AGENT.get_or_init(init_user_agent) }
|
||||||
fn init_user_agent() -> String { format!("{}/{}", name(), version()) }
|
fn init_user_agent() -> String { format!("{}/{}", name(), version()) }
|
||||||
|
|
||||||
fn init_version() -> String {
|
fn init_version() -> String {
|
||||||
option_env!("CONDUWUIT_VERSION_EXTRA")
|
conduwuit_build_metadata::version_tag()
|
||||||
.or(option_env!("CONDUIT_VERSION_EXTRA"))
|
.map_or(SEMANTIC.to_owned(), |extra| format!("{SEMANTIC} ({extra})"))
|
||||||
.map_or(SEMANTIC.to_owned(), |extra| {
|
|
||||||
if extra.is_empty() {
|
|
||||||
SEMANTIC.to_owned()
|
|
||||||
} else {
|
|
||||||
format!("{SEMANTIC} ({extra})")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,9 +16,9 @@ use crate::{Result, error};
|
||||||
/// pulling in a version of tracing that's incompatible with the rest of our
|
/// pulling in a version of tracing that's incompatible with the rest of our
|
||||||
/// deps.
|
/// deps.
|
||||||
///
|
///
|
||||||
/// To work around this, we define an trait without the S paramter that forwards
|
/// To work around this, we define an trait without the S parameter that
|
||||||
/// to the reload::Handle::reload method, and then store the handle as a trait
|
/// forwards to the reload::Handle::reload method, and then store the handle as
|
||||||
/// object.
|
/// a trait object.
|
||||||
///
|
///
|
||||||
/// [1]: <https://github.com/tokio-rs/tracing/pull/1035/commits/8a87ea52425098d3ef8f56d92358c2f6c144a28f>
|
/// [1]: <https://github.com/tokio-rs/tracing/pull/1035/commits/8a87ea52425098d3ef8f56d92358c2f6c144a28f>
|
||||||
pub trait ReloadHandle<L> {
|
pub trait ReloadHandle<L> {
|
||||||
|
|
|
@ -1,18 +1,10 @@
|
||||||
use std::{
|
|
||||||
borrow::Borrow,
|
|
||||||
fmt::{Debug, Display},
|
|
||||||
hash::Hash,
|
|
||||||
};
|
|
||||||
|
|
||||||
use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType};
|
use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType};
|
||||||
use serde_json::value::RawValue as RawJsonValue;
|
use serde_json::value::RawValue as RawJsonValue;
|
||||||
|
|
||||||
/// Abstraction of a PDU so users can have their own PDU types.
|
/// Abstraction of a PDU so users can have their own PDU types.
|
||||||
pub trait Event {
|
pub trait Event {
|
||||||
type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow<EventId>;
|
|
||||||
|
|
||||||
/// The `EventId` of this event.
|
/// The `EventId` of this event.
|
||||||
fn event_id(&self) -> &Self::Id;
|
fn event_id(&self) -> &EventId;
|
||||||
|
|
||||||
/// The `RoomId` of this event.
|
/// The `RoomId` of this event.
|
||||||
fn room_id(&self) -> &RoomId;
|
fn room_id(&self) -> &RoomId;
|
||||||
|
@ -34,20 +26,18 @@ pub trait Event {
|
||||||
|
|
||||||
/// The events before this event.
|
/// The events before this event.
|
||||||
// Requires GATs to avoid boxing (and TAIT for making it convenient).
|
// Requires GATs to avoid boxing (and TAIT for making it convenient).
|
||||||
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_;
|
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_;
|
||||||
|
|
||||||
/// All the authenticating events for this event.
|
/// All the authenticating events for this event.
|
||||||
// Requires GATs to avoid boxing (and TAIT for making it convenient).
|
// Requires GATs to avoid boxing (and TAIT for making it convenient).
|
||||||
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_;
|
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_;
|
||||||
|
|
||||||
/// If this event is a redaction event this is the event it redacts.
|
/// If this event is a redaction event this is the event it redacts.
|
||||||
fn redacts(&self) -> Option<&Self::Id>;
|
fn redacts(&self) -> Option<&EventId>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Event> Event for &T {
|
impl<T: Event> Event for &T {
|
||||||
type Id = T::Id;
|
fn event_id(&self) -> &EventId { (*self).event_id() }
|
||||||
|
|
||||||
fn event_id(&self) -> &Self::Id { (*self).event_id() }
|
|
||||||
|
|
||||||
fn room_id(&self) -> &RoomId { (*self).room_id() }
|
fn room_id(&self) -> &RoomId { (*self).room_id() }
|
||||||
|
|
||||||
|
@ -61,13 +51,13 @@ impl<T: Event> Event for &T {
|
||||||
|
|
||||||
fn state_key(&self) -> Option<&str> { (*self).state_key() }
|
fn state_key(&self) -> Option<&str> { (*self).state_key() }
|
||||||
|
|
||||||
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
|
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
|
||||||
(*self).prev_events()
|
(*self).prev_events()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
|
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
|
||||||
(*self).auth_events()
|
(*self).auth_events()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() }
|
fn redacts(&self) -> Option<&EventId> { (*self).redacts() }
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,9 +79,7 @@ impl Pdu {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Event for Pdu {
|
impl Event for Pdu {
|
||||||
type Id = OwnedEventId;
|
fn event_id(&self) -> &EventId { &self.event_id }
|
||||||
|
|
||||||
fn event_id(&self) -> &Self::Id { &self.event_id }
|
|
||||||
|
|
||||||
fn room_id(&self) -> &RoomId { &self.room_id }
|
fn room_id(&self) -> &RoomId { &self.room_id }
|
||||||
|
|
||||||
|
@ -97,15 +95,15 @@ impl Event for Pdu {
|
||||||
|
|
||||||
fn state_key(&self) -> Option<&str> { self.state_key.as_deref() }
|
fn state_key(&self) -> Option<&str> { self.state_key.as_deref() }
|
||||||
|
|
||||||
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
|
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
|
||||||
self.prev_events.iter()
|
self.prev_events.iter().map(AsRef::as_ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
|
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &EventId> + Send + '_ {
|
||||||
self.auth_events.iter()
|
self.auth_events.iter().map(AsRef::as_ref)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() }
|
fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prevent derived equality which wouldn't limit itself to event_id
|
/// Prevent derived equality which wouldn't limit itself to event_id
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use ruma::{
|
use ruma::{
|
||||||
events::{
|
events::{
|
||||||
AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent,
|
AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent,
|
||||||
AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent,
|
AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, room::member::RoomMemberEventContent,
|
||||||
room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent,
|
space::child::HierarchySpaceChildEvent,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
};
|
};
|
||||||
|
@ -10,41 +10,6 @@ use serde_json::{json, value::Value as JsonValue};
|
||||||
|
|
||||||
use crate::implement;
|
use crate::implement;
|
||||||
|
|
||||||
/// This only works for events that are also AnyRoomEvents.
|
|
||||||
#[must_use]
|
|
||||||
#[implement(super::Pdu)]
|
|
||||||
pub fn into_any_event(self) -> Raw<AnyEphemeralRoomEvent> {
|
|
||||||
serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This only works for events that are also AnyRoomEvents.
|
|
||||||
#[implement(super::Pdu)]
|
|
||||||
#[must_use]
|
|
||||||
#[inline]
|
|
||||||
pub fn into_any_event_value(self) -> JsonValue {
|
|
||||||
let (redacts, content) = self.copy_redacts();
|
|
||||||
let mut json = json!({
|
|
||||||
"content": content,
|
|
||||||
"type": self.kind,
|
|
||||||
"event_id": self.event_id,
|
|
||||||
"sender": self.sender,
|
|
||||||
"origin_server_ts": self.origin_server_ts,
|
|
||||||
"room_id": self.room_id,
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some(unsigned) = &self.unsigned {
|
|
||||||
json["unsigned"] = json!(unsigned);
|
|
||||||
}
|
|
||||||
if let Some(state_key) = &self.state_key {
|
|
||||||
json["state_key"] = json!(state_key);
|
|
||||||
}
|
|
||||||
if let Some(redacts) = &redacts {
|
|
||||||
json["redacts"] = json!(redacts);
|
|
||||||
}
|
|
||||||
|
|
||||||
json
|
|
||||||
}
|
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
#[inline]
|
#[inline]
|
||||||
|
@ -53,7 +18,8 @@ pub fn into_room_event(self) -> Raw<AnyTimelineEvent> { self.to_room_event() }
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
|
pub fn to_room_event(&self) -> Raw<AnyTimelineEvent> {
|
||||||
serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works")
|
let value = self.to_room_event_value();
|
||||||
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
@ -91,8 +57,8 @@ pub fn into_message_like_event(self) -> Raw<AnyMessageLikeEvent> { self.to_messa
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
|
pub fn to_message_like_event(&self) -> Raw<AnyMessageLikeEvent> {
|
||||||
serde_json::from_value(self.to_message_like_event_value())
|
let value = self.to_message_like_event_value();
|
||||||
.expect("Raw::from_value always works")
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
@ -130,7 +96,8 @@ pub fn into_sync_room_event(self) -> Raw<AnySyncTimelineEvent> { self.to_sync_ro
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
|
pub fn to_sync_room_event(&self) -> Raw<AnySyncTimelineEvent> {
|
||||||
serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works")
|
let value = self.to_sync_room_event_value();
|
||||||
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
@ -162,7 +129,8 @@ pub fn to_sync_room_event_value(&self) -> JsonValue {
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn into_state_event(self) -> Raw<AnyStateEvent> {
|
pub fn into_state_event(self) -> Raw<AnyStateEvent> {
|
||||||
serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works")
|
let value = self.into_state_event_value();
|
||||||
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
@ -189,8 +157,8 @@ pub fn into_state_event_value(self) -> JsonValue {
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn into_sync_state_event(self) -> Raw<AnySyncStateEvent> {
|
pub fn into_sync_state_event(self) -> Raw<AnySyncStateEvent> {
|
||||||
serde_json::from_value(self.into_sync_state_event_value())
|
let value = self.into_sync_state_event_value();
|
||||||
.expect("Raw::from_value always works")
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
@ -223,8 +191,8 @@ pub fn into_stripped_state_event(self) -> Raw<AnyStrippedStateEvent> {
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn to_stripped_state_event(&self) -> Raw<AnyStrippedStateEvent> {
|
pub fn to_stripped_state_event(&self) -> Raw<AnyStrippedStateEvent> {
|
||||||
serde_json::from_value(self.to_stripped_state_event_value())
|
let value = self.to_stripped_state_event_value();
|
||||||
.expect("Raw::from_value always works")
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
@ -242,8 +210,8 @@ pub fn to_stripped_state_event_value(&self) -> JsonValue {
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn into_stripped_spacechild_state_event(self) -> Raw<HierarchySpaceChildEvent> {
|
pub fn into_stripped_spacechild_state_event(self) -> Raw<HierarchySpaceChildEvent> {
|
||||||
serde_json::from_value(self.into_stripped_spacechild_state_event_value())
|
let value = self.into_stripped_spacechild_state_event_value();
|
||||||
.expect("Raw::from_value always works")
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
@ -262,7 +230,8 @@ pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue {
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn into_member_event(self) -> Raw<StateEvent<RoomMemberEventContent>> {
|
pub fn into_member_event(self) -> Raw<StateEvent<RoomMemberEventContent>> {
|
||||||
serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works")
|
let value = self.into_member_event_value();
|
||||||
|
serde_json::from_value(value).expect("Failed to serialize Event value")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Pdu)]
|
#[implement(super::Pdu)]
|
||||||
|
|
|
@ -52,7 +52,6 @@ fn lexico_topo_sort(c: &mut test::Bencher) {
|
||||||
#[cfg(conduwuit_bench)]
|
#[cfg(conduwuit_bench)]
|
||||||
#[cfg_attr(conduwuit_bench, bench)]
|
#[cfg_attr(conduwuit_bench, bench)]
|
||||||
fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
|
fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
|
||||||
let parallel_fetches = 32;
|
|
||||||
let mut store = TestStore(hashmap! {});
|
let mut store = TestStore(hashmap! {});
|
||||||
|
|
||||||
// build up the DAG
|
// build up the DAG
|
||||||
|
@ -78,7 +77,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
|
||||||
&auth_chain_sets,
|
&auth_chain_sets,
|
||||||
&fetch,
|
&fetch,
|
||||||
&exists,
|
&exists,
|
||||||
parallel_fetches,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
@ -91,7 +89,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
|
||||||
#[cfg(conduwuit_bench)]
|
#[cfg(conduwuit_bench)]
|
||||||
#[cfg_attr(conduwuit_bench, bench)]
|
#[cfg_attr(conduwuit_bench, bench)]
|
||||||
fn resolve_deeper_event_set(c: &mut test::Bencher) {
|
fn resolve_deeper_event_set(c: &mut test::Bencher) {
|
||||||
let parallel_fetches = 32;
|
|
||||||
let mut inner = INITIAL_EVENTS();
|
let mut inner = INITIAL_EVENTS();
|
||||||
let ban = BAN_STATE_SET();
|
let ban = BAN_STATE_SET();
|
||||||
|
|
||||||
|
@ -153,7 +150,6 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) {
|
||||||
&auth_chain_sets,
|
&auth_chain_sets,
|
||||||
&fetch,
|
&fetch,
|
||||||
&exists,
|
&exists,
|
||||||
parallel_fetches,
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
@ -190,7 +186,11 @@ impl<E: Event + Clone> TestStore<E> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a Vec of the related auth events to the given `event`.
|
/// Returns a Vec of the related auth events to the given `event`.
|
||||||
fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec<E::Id>) -> Result<HashSet<E::Id>> {
|
fn auth_event_ids(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
event_ids: Vec<OwnedEventId>,
|
||||||
|
) -> Result<HashSet<OwnedEventId>> {
|
||||||
let mut result = HashSet::new();
|
let mut result = HashSet::new();
|
||||||
let mut stack = event_ids;
|
let mut stack = event_ids;
|
||||||
|
|
||||||
|
@ -216,8 +216,8 @@ impl<E: Event + Clone> TestStore<E> {
|
||||||
fn auth_chain_diff(
|
fn auth_chain_diff(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_ids: Vec<Vec<E::Id>>,
|
event_ids: Vec<Vec<OwnedEventId>>,
|
||||||
) -> Result<Vec<E::Id>> {
|
) -> Result<Vec<OwnedEventId>> {
|
||||||
let mut auth_chain_sets = vec![];
|
let mut auth_chain_sets = vec![];
|
||||||
for ids in event_ids {
|
for ids in event_ids {
|
||||||
// TODO state store `auth_event_ids` returns self in the event ids list
|
// TODO state store `auth_event_ids` returns self in the event ids list
|
||||||
|
@ -238,7 +238,7 @@ impl<E: Event + Clone> TestStore<E> {
|
||||||
Ok(auth_chain_sets
|
Ok(auth_chain_sets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|id| !common.contains(id.borrow()))
|
.filter(|id| !common.contains(id))
|
||||||
.collect())
|
.collect())
|
||||||
} else {
|
} else {
|
||||||
Ok(vec![])
|
Ok(vec![])
|
||||||
|
@ -565,7 +565,7 @@ impl EventTypeExt for &TimelineEventType {
|
||||||
|
|
||||||
mod event {
|
mod event {
|
||||||
use ruma::{
|
use ruma::{
|
||||||
MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
|
EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
|
||||||
events::{TimelineEventType, pdu::Pdu},
|
events::{TimelineEventType, pdu::Pdu},
|
||||||
};
|
};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -574,9 +574,7 @@ mod event {
|
||||||
use super::Event;
|
use super::Event;
|
||||||
|
|
||||||
impl Event for PduEvent {
|
impl Event for PduEvent {
|
||||||
type Id = OwnedEventId;
|
fn event_id(&self) -> &EventId { &self.event_id }
|
||||||
|
|
||||||
fn event_id(&self) -> &Self::Id { &self.event_id }
|
|
||||||
|
|
||||||
fn room_id(&self) -> &RoomId {
|
fn room_id(&self) -> &RoomId {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
|
@ -632,28 +630,30 @@ mod event {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
|
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
| Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)),
|
| Pdu::RoomV1Pdu(ev) =>
|
||||||
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()),
|
Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())),
|
||||||
|
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)),
|
||||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||||
| _ => unreachable!("new PDU version"),
|
| _ => unreachable!("new PDU version"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
|
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
| Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)),
|
| Pdu::RoomV1Pdu(ev) =>
|
||||||
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()),
|
Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())),
|
||||||
|
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)),
|
||||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||||
| _ => unreachable!("new PDU version"),
|
| _ => unreachable!("new PDU version"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn redacts(&self) -> Option<&Self::Id> {
|
fn redacts(&self) -> Option<&EventId> {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(),
|
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(),
|
||||||
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(),
|
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(),
|
||||||
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
#[cfg(not(feature = "unstable-exhaustive-types"))]
|
||||||
| _ => unreachable!("new PDU version"),
|
| _ => unreachable!("new PDU version"),
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,7 +133,7 @@ pub fn auth_types_for_event(
|
||||||
level = "debug",
|
level = "debug",
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
event_id = incoming_event.event_id().borrow().as_str()
|
event_id = incoming_event.event_id().as_str(),
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
pub async fn auth_check<F, Fut, Fetched, Incoming>(
|
pub async fn auth_check<F, Fut, Fetched, Incoming>(
|
||||||
|
@ -259,7 +259,7 @@ where
|
||||||
// 3. If event does not have m.room.create in auth_events reject
|
// 3. If event does not have m.room.create in auth_events reject
|
||||||
if !incoming_event
|
if !incoming_event
|
||||||
.auth_events()
|
.auth_events()
|
||||||
.any(|id| id.borrow() == room_create_event.event_id().borrow())
|
.any(|id| id == room_create_event.event_id())
|
||||||
{
|
{
|
||||||
warn!("no m.room.create event in auth events");
|
warn!("no m.room.create event in auth events");
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
|
@ -638,7 +638,7 @@ fn valid_membership_change(
|
||||||
warn!(?target_user_membership_event_id, "Banned user can't join");
|
warn!(?target_user_membership_event_id, "Banned user can't join");
|
||||||
false
|
false
|
||||||
} else if (join_rules == JoinRule::Invite
|
} else if (join_rules == JoinRule::Invite
|
||||||
|| room_version.allow_knocking && join_rules == JoinRule::Knock)
|
|| room_version.allow_knocking && (join_rules == JoinRule::Knock || matches!(join_rules, JoinRule::KnockRestricted(_))))
|
||||||
// If the join_rule is invite then allow if membership state is invite or join
|
// If the join_rule is invite then allow if membership state is invite or join
|
||||||
&& (target_user_current_membership == MembershipState::Join
|
&& (target_user_current_membership == MembershipState::Join
|
||||||
|| target_user_current_membership == MembershipState::Invite)
|
|| target_user_current_membership == MembershipState::Invite)
|
||||||
|
@ -1021,11 +1021,11 @@ fn check_redaction(
|
||||||
|
|
||||||
// If the domain of the event_id of the event being redacted is the same as the
|
// If the domain of the event_id of the event being redacted is the same as the
|
||||||
// domain of the event_id of the m.room.redaction, allow
|
// domain of the event_id of the m.room.redaction, allow
|
||||||
if redaction_event.event_id().borrow().server_name()
|
if redaction_event.event_id().server_name()
|
||||||
== redaction_event
|
== redaction_event
|
||||||
.redacts()
|
.redacts()
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|&id| id.borrow().server_name())
|
.and_then(|&id| id.server_name())
|
||||||
{
|
{
|
||||||
debug!("redaction event allowed via room version 1 rules");
|
debug!("redaction event allowed via room version 1 rules");
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
|
|
|
@ -20,7 +20,7 @@ use std::{
|
||||||
|
|
||||||
use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future};
|
use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId,
|
EventId, Int, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId,
|
||||||
events::{
|
events::{
|
||||||
StateEventType, TimelineEventType,
|
StateEventType, TimelineEventType,
|
||||||
room::member::{MembershipState, RoomMemberEventContent},
|
room::member::{MembershipState, RoomMemberEventContent},
|
||||||
|
@ -39,9 +39,7 @@ use crate::{
|
||||||
debug, debug_error,
|
debug, debug_error,
|
||||||
matrix::{event::Event, pdu::StateKey},
|
matrix::{event::Event, pdu::StateKey},
|
||||||
trace,
|
trace,
|
||||||
utils::stream::{
|
utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, WidebandExt},
|
||||||
BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt,
|
|
||||||
},
|
|
||||||
warn,
|
warn,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -69,9 +67,6 @@ type Result<T, E = Error> = crate::Result<T, E>;
|
||||||
/// * `event_fetch` - Any event not found in the `event_map` will defer to this
|
/// * `event_fetch` - Any event not found in the `event_map` will defer to this
|
||||||
/// closure to find the event.
|
/// closure to find the event.
|
||||||
///
|
///
|
||||||
/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight
|
|
||||||
/// for any given operation.
|
|
||||||
///
|
|
||||||
/// ## Invariants
|
/// ## Invariants
|
||||||
///
|
///
|
||||||
/// The caller of `resolve` must ensure that all the events are from the same
|
/// The caller of `resolve` must ensure that all the events are from the same
|
||||||
|
@ -82,21 +77,19 @@ type Result<T, E = Error> = crate::Result<T, E>;
|
||||||
pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>(
|
pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>(
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
state_sets: Sets,
|
state_sets: Sets,
|
||||||
auth_chain_sets: &'a [HashSet<E::Id, Hasher>],
|
auth_chain_sets: &'a [HashSet<OwnedEventId, Hasher>],
|
||||||
event_fetch: &Fetch,
|
event_fetch: &Fetch,
|
||||||
event_exists: &Exists,
|
event_exists: &Exists,
|
||||||
parallel_fetches: usize,
|
) -> Result<StateMap<OwnedEventId>>
|
||||||
) -> Result<StateMap<E::Id>>
|
|
||||||
where
|
where
|
||||||
Fetch: Fn(E::Id) -> FetchFut + Sync,
|
Fetch: Fn(OwnedEventId) -> FetchFut + Sync,
|
||||||
FetchFut: Future<Output = Option<E>> + Send,
|
FetchFut: Future<Output = Option<E>> + Send,
|
||||||
Exists: Fn(E::Id) -> ExistsFut + Sync,
|
Exists: Fn(OwnedEventId) -> ExistsFut + Sync,
|
||||||
ExistsFut: Future<Output = bool> + Send,
|
ExistsFut: Future<Output = bool> + Send,
|
||||||
Sets: IntoIterator<IntoIter = SetIter> + Send,
|
Sets: IntoIterator<IntoIter = SetIter> + Send,
|
||||||
SetIter: Iterator<Item = &'a StateMap<E::Id>> + Clone + Send,
|
SetIter: Iterator<Item = &'a StateMap<OwnedEventId>> + Clone + Send,
|
||||||
Hasher: BuildHasher + Send + Sync,
|
Hasher: BuildHasher + Send + Sync,
|
||||||
E: Event + Clone + Send + Sync,
|
E: Event + Clone + Send + Sync,
|
||||||
E::Id: Borrow<EventId> + Send + Sync,
|
|
||||||
for<'b> &'b E: Send,
|
for<'b> &'b E: Send,
|
||||||
{
|
{
|
||||||
debug!("State resolution starting");
|
debug!("State resolution starting");
|
||||||
|
@ -147,13 +140,8 @@ where
|
||||||
|
|
||||||
// Sort the control events based on power_level/clock/event_id and
|
// Sort the control events based on power_level/clock/event_id and
|
||||||
// outgoing/incoming edges
|
// outgoing/incoming edges
|
||||||
let sorted_control_levels = reverse_topological_power_sort(
|
let sorted_control_levels =
|
||||||
control_events,
|
reverse_topological_power_sort(control_events, &all_conflicted, &event_fetch).await?;
|
||||||
&all_conflicted,
|
|
||||||
&event_fetch,
|
|
||||||
parallel_fetches,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!(count = sorted_control_levels.len(), "power events");
|
debug!(count = sorted_control_levels.len(), "power events");
|
||||||
trace!(list = ?sorted_control_levels, "sorted power events");
|
trace!(list = ?sorted_control_levels, "sorted power events");
|
||||||
|
@ -162,7 +150,7 @@ where
|
||||||
// Sequentially auth check each control event.
|
// Sequentially auth check each control event.
|
||||||
let resolved_control = iterative_auth_check(
|
let resolved_control = iterative_auth_check(
|
||||||
&room_version,
|
&room_version,
|
||||||
sorted_control_levels.iter().stream(),
|
sorted_control_levels.iter().stream().map(AsRef::as_ref),
|
||||||
clean.clone(),
|
clean.clone(),
|
||||||
&event_fetch,
|
&event_fetch,
|
||||||
)
|
)
|
||||||
|
@ -179,7 +167,7 @@ where
|
||||||
// that failed auth
|
// that failed auth
|
||||||
let events_to_resolve: Vec<_> = all_conflicted
|
let events_to_resolve: Vec<_> = all_conflicted
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|&id| !deduped_power_ev.contains(id.borrow()))
|
.filter(|&id| !deduped_power_ev.contains(id))
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
@ -199,7 +187,7 @@ where
|
||||||
|
|
||||||
let mut resolved_state = iterative_auth_check(
|
let mut resolved_state = iterative_auth_check(
|
||||||
&room_version,
|
&room_version,
|
||||||
sorted_left_events.iter().stream(),
|
sorted_left_events.iter().stream().map(AsRef::as_ref),
|
||||||
resolved_control, // The control events are added to the final resolved state
|
resolved_control, // The control events are added to the final resolved state
|
||||||
&event_fetch,
|
&event_fetch,
|
||||||
)
|
)
|
||||||
|
@ -292,16 +280,14 @@ where
|
||||||
/// earlier (further back in time) origin server timestamp.
|
/// earlier (further back in time) origin server timestamp.
|
||||||
#[tracing::instrument(level = "debug", skip_all)]
|
#[tracing::instrument(level = "debug", skip_all)]
|
||||||
async fn reverse_topological_power_sort<E, F, Fut>(
|
async fn reverse_topological_power_sort<E, F, Fut>(
|
||||||
events_to_sort: Vec<E::Id>,
|
events_to_sort: Vec<OwnedEventId>,
|
||||||
auth_diff: &HashSet<E::Id>,
|
auth_diff: &HashSet<OwnedEventId>,
|
||||||
fetch_event: &F,
|
fetch_event: &F,
|
||||||
parallel_fetches: usize,
|
) -> Result<Vec<OwnedEventId>>
|
||||||
) -> Result<Vec<E::Id>>
|
|
||||||
where
|
where
|
||||||
F: Fn(E::Id) -> Fut + Sync,
|
F: Fn(OwnedEventId) -> Fut + Sync,
|
||||||
Fut: Future<Output = Option<E>> + Send,
|
Fut: Future<Output = Option<E>> + Send,
|
||||||
E: Event + Send + Sync,
|
E: Event + Send + Sync,
|
||||||
E::Id: Borrow<EventId> + Send + Sync,
|
|
||||||
{
|
{
|
||||||
debug!("reverse topological sort of power events");
|
debug!("reverse topological sort of power events");
|
||||||
|
|
||||||
|
@ -311,35 +297,36 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is used in the `key_fn` passed to the lexico_topo_sort fn
|
// This is used in the `key_fn` passed to the lexico_topo_sort fn
|
||||||
let event_to_pl = graph
|
let event_to_pl: HashMap<_, _> = graph
|
||||||
.keys()
|
.keys()
|
||||||
|
.cloned()
|
||||||
.stream()
|
.stream()
|
||||||
.map(|event_id| {
|
.broad_filter_map(async |event_id| {
|
||||||
get_power_level_for_sender(event_id.clone(), fetch_event)
|
let pl = get_power_level_for_sender(&event_id, fetch_event)
|
||||||
.map(move |res| res.map(|pl| (event_id, pl)))
|
.await
|
||||||
|
.ok()?;
|
||||||
|
Some((event_id, pl))
|
||||||
})
|
})
|
||||||
.buffer_unordered(parallel_fetches)
|
.inspect(|(event_id, pl)| {
|
||||||
.ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| {
|
|
||||||
debug!(
|
debug!(
|
||||||
event_id = event_id.borrow().as_str(),
|
event_id = event_id.as_str(),
|
||||||
power_level = i64::from(pl),
|
power_level = i64::from(*pl),
|
||||||
"found the power level of an event's sender",
|
"found the power level of an event's sender",
|
||||||
);
|
);
|
||||||
|
|
||||||
event_to_pl.insert(event_id.clone(), pl);
|
|
||||||
Ok(event_to_pl)
|
|
||||||
})
|
})
|
||||||
|
.collect()
|
||||||
.boxed()
|
.boxed()
|
||||||
.await?;
|
.await;
|
||||||
|
|
||||||
let event_to_pl = &event_to_pl;
|
let fetcher = async |event_id: OwnedEventId| {
|
||||||
let fetcher = |event_id: E::Id| async move {
|
|
||||||
let pl = *event_to_pl
|
let pl = *event_to_pl
|
||||||
.get(event_id.borrow())
|
.get(&event_id)
|
||||||
.ok_or_else(|| Error::NotFound(String::new()))?;
|
.ok_or_else(|| Error::NotFound(String::new()))?;
|
||||||
|
|
||||||
let ev = fetch_event(event_id)
|
let ev = fetch_event(event_id)
|
||||||
.await
|
.await
|
||||||
.ok_or_else(|| Error::NotFound(String::new()))?;
|
.ok_or_else(|| Error::NotFound(String::new()))?;
|
||||||
|
|
||||||
Ok((pl, ev.origin_server_ts()))
|
Ok((pl, ev.origin_server_ts()))
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -476,18 +463,17 @@ where
|
||||||
/// the eventId at the eventId's generation (we walk backwards to `EventId`s
|
/// the eventId at the eventId's generation (we walk backwards to `EventId`s
|
||||||
/// most recent previous power level event).
|
/// most recent previous power level event).
|
||||||
async fn get_power_level_for_sender<E, F, Fut>(
|
async fn get_power_level_for_sender<E, F, Fut>(
|
||||||
event_id: E::Id,
|
event_id: &EventId,
|
||||||
fetch_event: &F,
|
fetch_event: &F,
|
||||||
) -> serde_json::Result<Int>
|
) -> serde_json::Result<Int>
|
||||||
where
|
where
|
||||||
F: Fn(E::Id) -> Fut + Sync,
|
F: Fn(OwnedEventId) -> Fut + Sync,
|
||||||
Fut: Future<Output = Option<E>> + Send,
|
Fut: Future<Output = Option<E>> + Send,
|
||||||
E: Event + Send,
|
E: Event + Send,
|
||||||
E::Id: Borrow<EventId> + Send,
|
|
||||||
{
|
{
|
||||||
debug!("fetch event ({event_id}) senders power level");
|
debug!("fetch event ({event_id}) senders power level");
|
||||||
|
|
||||||
let event = fetch_event(event_id).await;
|
let event = fetch_event(event_id.to_owned()).await;
|
||||||
|
|
||||||
let auth_events = event.as_ref().map(Event::auth_events);
|
let auth_events = event.as_ref().map(Event::auth_events);
|
||||||
|
|
||||||
|
@ -495,7 +481,7 @@ where
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.flatten()
|
.flatten()
|
||||||
.stream()
|
.stream()
|
||||||
.broadn_filter_map(5, |aid| fetch_event(aid.clone()))
|
.broadn_filter_map(5, |aid| fetch_event(aid.to_owned()))
|
||||||
.ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, ""))
|
.ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, ""))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -528,14 +514,13 @@ where
|
||||||
async fn iterative_auth_check<'a, E, F, Fut, S>(
|
async fn iterative_auth_check<'a, E, F, Fut, S>(
|
||||||
room_version: &RoomVersion,
|
room_version: &RoomVersion,
|
||||||
events_to_check: S,
|
events_to_check: S,
|
||||||
unconflicted_state: StateMap<E::Id>,
|
unconflicted_state: StateMap<OwnedEventId>,
|
||||||
fetch_event: &F,
|
fetch_event: &F,
|
||||||
) -> Result<StateMap<E::Id>>
|
) -> Result<StateMap<OwnedEventId>>
|
||||||
where
|
where
|
||||||
F: Fn(E::Id) -> Fut + Sync,
|
F: Fn(OwnedEventId) -> Fut + Sync,
|
||||||
Fut: Future<Output = Option<E>> + Send,
|
Fut: Future<Output = Option<E>> + Send,
|
||||||
E::Id: Borrow<EventId> + Clone + Eq + Ord + Send + Sync + 'a,
|
S: Stream<Item = &'a EventId> + Send + 'a,
|
||||||
S: Stream<Item = &'a E::Id> + Send + 'a,
|
|
||||||
E: Event + Clone + Send + Sync,
|
E: Event + Clone + Send + Sync,
|
||||||
{
|
{
|
||||||
debug!("starting iterative auth check");
|
debug!("starting iterative auth check");
|
||||||
|
@ -543,7 +528,7 @@ where
|
||||||
let events_to_check: Vec<_> = events_to_check
|
let events_to_check: Vec<_> = events_to_check
|
||||||
.map(Result::Ok)
|
.map(Result::Ok)
|
||||||
.broad_and_then(async |event_id| {
|
.broad_and_then(async |event_id| {
|
||||||
fetch_event(event_id.clone())
|
fetch_event(event_id.to_owned())
|
||||||
.await
|
.await
|
||||||
.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}")))
|
.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}")))
|
||||||
})
|
})
|
||||||
|
@ -551,16 +536,16 @@ where
|
||||||
.boxed()
|
.boxed()
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let auth_event_ids: HashSet<E::Id> = events_to_check
|
let auth_event_ids: HashSet<OwnedEventId> = events_to_check
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|event: &E| event.auth_events().map(Clone::clone))
|
.flat_map(|event: &E| event.auth_events().map(ToOwned::to_owned))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let auth_events: HashMap<E::Id, E> = auth_event_ids
|
let auth_events: HashMap<OwnedEventId, E> = auth_event_ids
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.stream()
|
.stream()
|
||||||
.broad_filter_map(fetch_event)
|
.broad_filter_map(fetch_event)
|
||||||
.map(|auth_event| (auth_event.event_id().clone(), auth_event))
|
.map(|auth_event| (auth_event.event_id().to_owned(), auth_event))
|
||||||
.collect()
|
.collect()
|
||||||
.boxed()
|
.boxed()
|
||||||
.await;
|
.await;
|
||||||
|
@ -581,7 +566,7 @@ where
|
||||||
|
|
||||||
let mut auth_state = StateMap::new();
|
let mut auth_state = StateMap::new();
|
||||||
for aid in event.auth_events() {
|
for aid in event.auth_events() {
|
||||||
if let Some(ev) = auth_events.get(aid.borrow()) {
|
if let Some(ev) = auth_events.get(aid) {
|
||||||
//TODO: synapse checks "rejected_reason" which is most likely related to
|
//TODO: synapse checks "rejected_reason" which is most likely related to
|
||||||
// soft-failing
|
// soft-failing
|
||||||
auth_state.insert(
|
auth_state.insert(
|
||||||
|
@ -592,7 +577,7 @@ where
|
||||||
ev.clone(),
|
ev.clone(),
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
warn!(event_id = aid.borrow().as_str(), "missing auth event");
|
warn!(event_id = aid.as_str(), "missing auth event");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,7 +586,7 @@ where
|
||||||
.stream()
|
.stream()
|
||||||
.ready_filter_map(|key| Some((key, resolved_state.get(key)?)))
|
.ready_filter_map(|key| Some((key, resolved_state.get(key)?)))
|
||||||
.filter_map(|(key, ev_id)| async move {
|
.filter_map(|(key, ev_id)| async move {
|
||||||
if let Some(event) = auth_events.get(ev_id.borrow()) {
|
if let Some(event) = auth_events.get(ev_id) {
|
||||||
Some((key, event.clone()))
|
Some((key, event.clone()))
|
||||||
} else {
|
} else {
|
||||||
Some((key, fetch_event(ev_id.clone()).await?))
|
Some((key, fetch_event(ev_id.clone()).await?))
|
||||||
|
@ -633,7 +618,7 @@ where
|
||||||
// add event to resolved state map
|
// add event to resolved state map
|
||||||
resolved_state.insert(
|
resolved_state.insert(
|
||||||
event.event_type().with_state_key(state_key),
|
event.event_type().with_state_key(state_key),
|
||||||
event.event_id().clone(),
|
event.event_id().to_owned(),
|
||||||
);
|
);
|
||||||
},
|
},
|
||||||
| Ok(false) => {
|
| Ok(false) => {
|
||||||
|
@ -660,15 +645,14 @@ where
|
||||||
/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth
|
/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth
|
||||||
/// 0.
|
/// 0.
|
||||||
async fn mainline_sort<E, F, Fut>(
|
async fn mainline_sort<E, F, Fut>(
|
||||||
to_sort: &[E::Id],
|
to_sort: &[OwnedEventId],
|
||||||
resolved_power_level: Option<E::Id>,
|
resolved_power_level: Option<OwnedEventId>,
|
||||||
fetch_event: &F,
|
fetch_event: &F,
|
||||||
) -> Result<Vec<E::Id>>
|
) -> Result<Vec<OwnedEventId>>
|
||||||
where
|
where
|
||||||
F: Fn(E::Id) -> Fut + Sync,
|
F: Fn(OwnedEventId) -> Fut + Sync,
|
||||||
Fut: Future<Output = Option<E>> + Send,
|
Fut: Future<Output = Option<E>> + Send,
|
||||||
E: Event + Clone + Send + Sync,
|
E: Event + Clone + Send + Sync,
|
||||||
E::Id: Borrow<EventId> + Clone + Send + Sync,
|
|
||||||
{
|
{
|
||||||
debug!("mainline sort of events");
|
debug!("mainline sort of events");
|
||||||
|
|
||||||
|
@ -688,7 +672,7 @@ where
|
||||||
|
|
||||||
pl = None;
|
pl = None;
|
||||||
for aid in event.auth_events() {
|
for aid in event.auth_events() {
|
||||||
let ev = fetch_event(aid.clone())
|
let ev = fetch_event(aid.to_owned())
|
||||||
.await
|
.await
|
||||||
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
|
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
|
||||||
|
|
||||||
|
@ -734,26 +718,25 @@ where
|
||||||
/// that has an associated mainline depth.
|
/// that has an associated mainline depth.
|
||||||
async fn get_mainline_depth<E, F, Fut>(
|
async fn get_mainline_depth<E, F, Fut>(
|
||||||
mut event: Option<E>,
|
mut event: Option<E>,
|
||||||
mainline_map: &HashMap<E::Id, usize>,
|
mainline_map: &HashMap<OwnedEventId, usize>,
|
||||||
fetch_event: &F,
|
fetch_event: &F,
|
||||||
) -> Result<usize>
|
) -> Result<usize>
|
||||||
where
|
where
|
||||||
F: Fn(E::Id) -> Fut + Sync,
|
F: Fn(OwnedEventId) -> Fut + Sync,
|
||||||
Fut: Future<Output = Option<E>> + Send,
|
Fut: Future<Output = Option<E>> + Send,
|
||||||
E: Event + Send + Sync,
|
E: Event + Send + Sync,
|
||||||
E::Id: Borrow<EventId> + Send + Sync,
|
|
||||||
{
|
{
|
||||||
while let Some(sort_ev) = event {
|
while let Some(sort_ev) = event {
|
||||||
debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline");
|
debug!(event_id = sort_ev.event_id().as_str(), "mainline");
|
||||||
|
|
||||||
let id = sort_ev.event_id();
|
let id = sort_ev.event_id();
|
||||||
if let Some(depth) = mainline_map.get(id.borrow()) {
|
if let Some(depth) = mainline_map.get(id) {
|
||||||
return Ok(*depth);
|
return Ok(*depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
event = None;
|
event = None;
|
||||||
for aid in sort_ev.auth_events() {
|
for aid in sort_ev.auth_events() {
|
||||||
let aev = fetch_event(aid.clone())
|
let aev = fetch_event(aid.to_owned())
|
||||||
.await
|
.await
|
||||||
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
|
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
|
||||||
|
|
||||||
|
@ -768,15 +751,14 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
|
async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
|
||||||
graph: &mut HashMap<E::Id, HashSet<E::Id>>,
|
graph: &mut HashMap<OwnedEventId, HashSet<OwnedEventId>>,
|
||||||
event_id: E::Id,
|
event_id: OwnedEventId,
|
||||||
auth_diff: &HashSet<E::Id>,
|
auth_diff: &HashSet<OwnedEventId>,
|
||||||
fetch_event: &F,
|
fetch_event: &F,
|
||||||
) where
|
) where
|
||||||
F: Fn(E::Id) -> Fut + Sync,
|
F: Fn(OwnedEventId) -> Fut + Sync,
|
||||||
Fut: Future<Output = Option<E>> + Send,
|
Fut: Future<Output = Option<E>> + Send,
|
||||||
E: Event + Send + Sync,
|
E: Event + Send + Sync,
|
||||||
E::Id: Borrow<EventId> + Clone + Send + Sync,
|
|
||||||
{
|
{
|
||||||
let mut state = vec![event_id];
|
let mut state = vec![event_id];
|
||||||
while let Some(eid) = state.pop() {
|
while let Some(eid) = state.pop() {
|
||||||
|
@ -786,26 +768,27 @@ async fn add_event_and_auth_chain_to_graph<E, F, Fut>(
|
||||||
|
|
||||||
// Prefer the store to event as the store filters dedups the events
|
// Prefer the store to event as the store filters dedups the events
|
||||||
for aid in auth_events {
|
for aid in auth_events {
|
||||||
if auth_diff.contains(aid.borrow()) {
|
if auth_diff.contains(aid) {
|
||||||
if !graph.contains_key(aid.borrow()) {
|
if !graph.contains_key(aid) {
|
||||||
state.push(aid.to_owned());
|
state.push(aid.to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
// We just inserted this at the start of the while loop
|
graph
|
||||||
graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned());
|
.get_mut(&eid)
|
||||||
|
.expect("We just inserted this at the start of the while loop")
|
||||||
|
.insert(aid.to_owned());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn is_power_event_id<E, F, Fut>(event_id: &E::Id, fetch: &F) -> bool
|
async fn is_power_event_id<E, F, Fut>(event_id: &EventId, fetch: &F) -> bool
|
||||||
where
|
where
|
||||||
F: Fn(E::Id) -> Fut + Sync,
|
F: Fn(OwnedEventId) -> Fut + Sync,
|
||||||
Fut: Future<Output = Option<E>> + Send,
|
Fut: Future<Output = Option<E>> + Send,
|
||||||
E: Event + Send,
|
E: Event + Send,
|
||||||
E::Id: Borrow<EventId> + Send + Sync,
|
|
||||||
{
|
{
|
||||||
match fetch(event_id.clone()).await.as_ref() {
|
match fetch(event_id.to_owned()).await.as_ref() {
|
||||||
| Some(state) => is_power_event(state),
|
| Some(state) => is_power_event(state),
|
||||||
| _ => false,
|
| _ => false,
|
||||||
}
|
}
|
||||||
|
@ -909,13 +892,13 @@ mod tests {
|
||||||
|
|
||||||
let fetcher = |id| ready(events.get(&id).cloned());
|
let fetcher = |id| ready(events.get(&id).cloned());
|
||||||
let sorted_power_events =
|
let sorted_power_events =
|
||||||
super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1)
|
super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let resolved_power = super::iterative_auth_check(
|
let resolved_power = super::iterative_auth_check(
|
||||||
&RoomVersion::V6,
|
&RoomVersion::V6,
|
||||||
sorted_power_events.iter().stream(),
|
sorted_power_events.iter().map(AsRef::as_ref).stream(),
|
||||||
HashMap::new(), // unconflicted events
|
HashMap::new(), // unconflicted events
|
||||||
&fetcher,
|
&fetcher,
|
||||||
)
|
)
|
||||||
|
@ -1300,7 +1283,7 @@ mod tests {
|
||||||
let ev_map = store.0.clone();
|
let ev_map = store.0.clone();
|
||||||
let fetcher = |id| ready(ev_map.get(&id).cloned());
|
let fetcher = |id| ready(ev_map.get(&id).cloned());
|
||||||
|
|
||||||
let exists = |id: <PduEvent as Event>::Id| ready(ev_map.get(&*id).is_some());
|
let exists = |id: OwnedEventId| ready(ev_map.get(&*id).is_some());
|
||||||
|
|
||||||
let state_sets = [state_at_bob, state_at_charlie];
|
let state_sets = [state_at_bob, state_at_charlie];
|
||||||
let auth_chain: Vec<_> = state_sets
|
let auth_chain: Vec<_> = state_sets
|
||||||
|
@ -1312,14 +1295,8 @@ mod tests {
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let resolved = match super::resolve(
|
let resolved =
|
||||||
&RoomVersionId::V2,
|
match super::resolve(&RoomVersionId::V2, &state_sets, &auth_chain, &fetcher, &exists)
|
||||||
&state_sets,
|
|
||||||
&auth_chain,
|
|
||||||
&fetcher,
|
|
||||||
&exists,
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Ok(state) => state,
|
| Ok(state) => state,
|
||||||
|
@ -1429,16 +1406,10 @@ mod tests {
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let fetcher = |id: <PduEvent as Event>::Id| ready(ev_map.get(&id).cloned());
|
let fetcher = |id: OwnedEventId| ready(ev_map.get(&id).cloned());
|
||||||
let exists = |id: <PduEvent as Event>::Id| ready(ev_map.get(&id).is_some());
|
let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some());
|
||||||
let resolved = match super::resolve(
|
let resolved =
|
||||||
&RoomVersionId::V6,
|
match super::resolve(&RoomVersionId::V6, &state_sets, &auth_chain, &fetcher, &exists)
|
||||||
&state_sets,
|
|
||||||
&auth_chain,
|
|
||||||
&fetcher,
|
|
||||||
&exists,
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
| Ok(state) => state,
|
| Ok(state) => state,
|
||||||
|
|
|
@ -133,16 +133,10 @@ pub(crate) async fn do_check(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let event_map = &event_map;
|
let event_map = &event_map;
|
||||||
let fetch = |id: <PduEvent as Event>::Id| ready(event_map.get(&id).cloned());
|
let fetch = |id: OwnedEventId| ready(event_map.get(&id).cloned());
|
||||||
let exists = |id: <PduEvent as Event>::Id| ready(event_map.get(&id).is_some());
|
let exists = |id: OwnedEventId| ready(event_map.get(&id).is_some());
|
||||||
let resolved = super::resolve(
|
let resolved =
|
||||||
&RoomVersionId::V6,
|
super::resolve(&RoomVersionId::V6, state_sets, &auth_chain_sets, &fetch, &exists)
|
||||||
state_sets,
|
|
||||||
&auth_chain_sets,
|
|
||||||
&fetch,
|
|
||||||
&exists,
|
|
||||||
1,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
match resolved {
|
match resolved {
|
||||||
|
@ -247,8 +241,8 @@ impl<E: Event + Clone> TestStore<E> {
|
||||||
pub(crate) fn auth_event_ids(
|
pub(crate) fn auth_event_ids(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_ids: Vec<E::Id>,
|
event_ids: Vec<OwnedEventId>,
|
||||||
) -> Result<HashSet<E::Id>> {
|
) -> Result<HashSet<OwnedEventId>> {
|
||||||
let mut result = HashSet::new();
|
let mut result = HashSet::new();
|
||||||
let mut stack = event_ids;
|
let mut stack = event_ids;
|
||||||
|
|
||||||
|
@ -584,7 +578,7 @@ pub(crate) fn INITIAL_EDGES() -> Vec<OwnedEventId> {
|
||||||
|
|
||||||
pub(crate) mod event {
|
pub(crate) mod event {
|
||||||
use ruma::{
|
use ruma::{
|
||||||
MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
|
EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId,
|
||||||
events::{TimelineEventType, pdu::Pdu},
|
events::{TimelineEventType, pdu::Pdu},
|
||||||
};
|
};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
@ -593,9 +587,7 @@ pub(crate) mod event {
|
||||||
use crate::Event;
|
use crate::Event;
|
||||||
|
|
||||||
impl Event for PduEvent {
|
impl Event for PduEvent {
|
||||||
type Id = OwnedEventId;
|
fn event_id(&self) -> &EventId { &self.event_id }
|
||||||
|
|
||||||
fn event_id(&self) -> &Self::Id { &self.event_id }
|
|
||||||
|
|
||||||
fn room_id(&self) -> &RoomId {
|
fn room_id(&self) -> &RoomId {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
|
@ -652,29 +644,31 @@ pub(crate) mod event {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(refining_impl_trait)]
|
#[allow(refining_impl_trait)]
|
||||||
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
|
fn prev_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
| Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)),
|
| Pdu::RoomV1Pdu(ev) =>
|
||||||
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()),
|
Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())),
|
||||||
|
| Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)),
|
||||||
#[allow(unreachable_patterns)]
|
#[allow(unreachable_patterns)]
|
||||||
| _ => unreachable!("new PDU version"),
|
| _ => unreachable!("new PDU version"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(refining_impl_trait)]
|
#[allow(refining_impl_trait)]
|
||||||
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &Self::Id> + Send + '_> {
|
fn auth_events(&self) -> Box<dyn DoubleEndedIterator<Item = &EventId> + Send + '_> {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
| Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)),
|
| Pdu::RoomV1Pdu(ev) =>
|
||||||
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()),
|
Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())),
|
||||||
|
| Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)),
|
||||||
#[allow(unreachable_patterns)]
|
#[allow(unreachable_patterns)]
|
||||||
| _ => unreachable!("new PDU version"),
|
| _ => unreachable!("new PDU version"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn redacts(&self) -> Option<&Self::Id> {
|
fn redacts(&self) -> Option<&EventId> {
|
||||||
match &self.rest {
|
match &self.rest {
|
||||||
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(),
|
| Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(),
|
||||||
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(),
|
| Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(),
|
||||||
#[allow(unreachable_patterns)]
|
#[allow(unreachable_patterns)]
|
||||||
| _ => unreachable!("new PDU version"),
|
| _ => unreachable!("new PDU version"),
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,10 @@ pub use ::toml;
|
||||||
pub use ::tracing;
|
pub use ::tracing;
|
||||||
pub use config::Config;
|
pub use config::Config;
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
pub use info::{rustc_flags_capture, version, version::version};
|
pub use info::{
|
||||||
|
rustc_flags_capture, version,
|
||||||
|
version::{name, version},
|
||||||
|
};
|
||||||
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
|
pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res};
|
||||||
pub use server::Server;
|
pub use server::Server;
|
||||||
pub use utils::{ctor, dtor, implement, result, result::Result};
|
pub use utils::{ctor, dtor, implement, result, result::Result};
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
type Delim<'a> = (&'a str, &'a str);
|
type Delim<'a> = (&'a str, &'a str);
|
||||||
|
|
||||||
/// Slice a string between a pair of delimeters.
|
/// Slice a string between a pair of delimiters.
|
||||||
pub trait Between<'a> {
|
pub trait Between<'a> {
|
||||||
/// Extract a string between the delimeters. If the delimeters were not
|
/// Extract a string between the delimiters. If the delimiters were not
|
||||||
/// found None is returned, otherwise the first extraction is returned.
|
/// found None is returned, otherwise the first extraction is returned.
|
||||||
fn between(&self, delim: Delim<'_>) -> Option<&'a str>;
|
fn between(&self, delim: Delim<'_>) -> Option<&'a str>;
|
||||||
|
|
||||||
/// Extract a string between the delimeters. If the delimeters were not
|
/// Extract a string between the delimiters. If the delimiters were not
|
||||||
/// found the original string is returned; take note of this behavior,
|
/// found the original string is returned; take note of this behavior,
|
||||||
/// if an empty slice is desired for this case use the fallible version and
|
/// if an empty slice is desired for this case use the fallible version and
|
||||||
/// unwrap to EMPTY.
|
/// unwrap to EMPTY.
|
||||||
|
|
|
@ -193,7 +193,7 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option<Cache> {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some cache capacities are overriden by server config in a strange but
|
// Some cache capacities are overridden by server config in a strange but
|
||||||
// legacy-compat way
|
// legacy-compat way
|
||||||
let config = &ctx.server.config;
|
let config = &ctx.server.config;
|
||||||
let cap = match desc.name {
|
let cap = match desc.name {
|
||||||
|
|
|
@ -15,7 +15,7 @@ use conduwuit_core::{
|
||||||
#[clap(
|
#[clap(
|
||||||
about,
|
about,
|
||||||
long_about = None,
|
long_about = None,
|
||||||
name = "conduwuit",
|
name = conduwuit_core::name(),
|
||||||
version = conduwuit_core::version(),
|
version = conduwuit_core::version(),
|
||||||
)]
|
)]
|
||||||
pub(crate) struct Args {
|
pub(crate) struct Args {
|
||||||
|
@ -74,17 +74,30 @@ pub(crate) struct Args {
|
||||||
/// with the exception of the last bucket, try increasing this value to e.g.
|
/// with the exception of the last bucket, try increasing this value to e.g.
|
||||||
/// 50 or 100. Inversely, decrease to 10 etc if the histogram lacks
|
/// 50 or 100. Inversely, decrease to 10 etc if the histogram lacks
|
||||||
/// resolution.
|
/// resolution.
|
||||||
#[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL", default_value = "25")]
|
#[arg(
|
||||||
|
long,
|
||||||
|
hide(true),
|
||||||
|
env = "CONTINUWUITY_RUNTIME_HISTOGRAM_INTERVAL",
|
||||||
|
env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL",
|
||||||
|
default_value = "25"
|
||||||
|
)]
|
||||||
pub(crate) worker_histogram_interval: u64,
|
pub(crate) worker_histogram_interval: u64,
|
||||||
|
|
||||||
/// Set the histogram bucket count (tokio_unstable). Default is 20.
|
/// Set the histogram bucket count (tokio_unstable). Default is 20.
|
||||||
#[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", default_value = "20")]
|
#[arg(
|
||||||
|
long,
|
||||||
|
hide(true),
|
||||||
|
env = "CONTINUWUITY_RUNTIME_HISTOGRAM_BUCKETS",
|
||||||
|
env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS",
|
||||||
|
default_value = "20"
|
||||||
|
)]
|
||||||
pub(crate) worker_histogram_buckets: usize,
|
pub(crate) worker_histogram_buckets: usize,
|
||||||
|
|
||||||
/// Toggles worker affinity feature.
|
/// Toggles worker affinity feature.
|
||||||
#[arg(
|
#[arg(
|
||||||
long,
|
long,
|
||||||
hide(true),
|
hide(true),
|
||||||
|
env = "CONTINUWUITY_RUNTIME_WORKER_AFFINITY",
|
||||||
env = "CONDUWUIT_RUNTIME_WORKER_AFFINITY",
|
env = "CONDUWUIT_RUNTIME_WORKER_AFFINITY",
|
||||||
action = ArgAction::Set,
|
action = ArgAction::Set,
|
||||||
num_args = 0..=1,
|
num_args = 0..=1,
|
||||||
|
@ -99,6 +112,7 @@ pub(crate) struct Args {
|
||||||
#[arg(
|
#[arg(
|
||||||
long,
|
long,
|
||||||
hide(true),
|
hide(true),
|
||||||
|
env = "CONTINUWUITY_RUNTIME_GC_ON_PARK",
|
||||||
env = "CONDUWUIT_RUNTIME_GC_ON_PARK",
|
env = "CONDUWUIT_RUNTIME_GC_ON_PARK",
|
||||||
action = ArgAction::Set,
|
action = ArgAction::Set,
|
||||||
num_args = 0..=1,
|
num_args = 0..=1,
|
||||||
|
|
|
@ -73,7 +73,7 @@ async fn async_main(server: &Arc<Server>) -> Result<(), Error> {
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
.take()
|
.take()
|
||||||
.expect("services initialied"),
|
.expect("services initialized"),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
|
|
@ -13,8 +13,8 @@ pub(super) fn restart() -> ! {
|
||||||
//
|
//
|
||||||
// We can (and do) prevent that panic by checking the result of current_exe()
|
// We can (and do) prevent that panic by checking the result of current_exe()
|
||||||
// prior to committing to restart, returning an error to the user without any
|
// prior to committing to restart, returning an error to the user without any
|
||||||
// unexpected shutdown. In a nutshell that is the execuse for this unsafety.
|
// unexpected shutdown. In a nutshell that is the excuse for this unsafety.
|
||||||
// Nevertheless, we still want a way to override the restart preventation (i.e.
|
// Nevertheless, we still want a way to override the restart presentation (i.e.
|
||||||
// admin server restart --force).
|
// admin server restart --force).
|
||||||
let exe = unsafe { utils::sys::current_exe().expect("program path must be available") };
|
let exe = unsafe { utils::sys::current_exe().expect("program path must be available") };
|
||||||
let envs = env::vars();
|
let envs = env::vars();
|
||||||
|
|
|
@ -98,12 +98,7 @@ pub(super) fn shutdown(server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
|
||||||
Level::INFO
|
Level::INFO
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(
|
wait_shutdown(server, runtime);
|
||||||
timeout = ?SHUTDOWN_TIMEOUT,
|
|
||||||
"Waiting for runtime..."
|
|
||||||
);
|
|
||||||
|
|
||||||
runtime.shutdown_timeout(SHUTDOWN_TIMEOUT);
|
|
||||||
let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default();
|
let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default();
|
||||||
|
|
||||||
event!(LEVEL, ?runtime_metrics, "Final runtime metrics");
|
event!(LEVEL, ?runtime_metrics, "Final runtime metrics");
|
||||||
|
@ -111,13 +106,23 @@ pub(super) fn shutdown(server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
|
||||||
|
|
||||||
#[cfg(not(tokio_unstable))]
|
#[cfg(not(tokio_unstable))]
|
||||||
#[tracing::instrument(name = "stop", level = "info", skip_all)]
|
#[tracing::instrument(name = "stop", level = "info", skip_all)]
|
||||||
pub(super) fn shutdown(_server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
|
pub(super) fn shutdown(server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
|
||||||
|
wait_shutdown(server, runtime);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wait_shutdown(_server: &Arc<Server>, runtime: tokio::runtime::Runtime) {
|
||||||
debug!(
|
debug!(
|
||||||
timeout = ?SHUTDOWN_TIMEOUT,
|
timeout = ?SHUTDOWN_TIMEOUT,
|
||||||
"Waiting for runtime..."
|
"Waiting for runtime..."
|
||||||
);
|
);
|
||||||
|
|
||||||
runtime.shutdown_timeout(SHUTDOWN_TIMEOUT);
|
runtime.shutdown_timeout(SHUTDOWN_TIMEOUT);
|
||||||
|
|
||||||
|
// Join any jemalloc threads so they don't appear in use at exit.
|
||||||
|
#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))]
|
||||||
|
conduwuit_core::alloc::je::background_thread_enable(false)
|
||||||
|
.log_debug_err()
|
||||||
|
.ok();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(
|
#[tracing::instrument(
|
||||||
|
|
|
@ -103,6 +103,7 @@ conduwuit-admin.workspace = true
|
||||||
conduwuit-api.workspace = true
|
conduwuit-api.workspace = true
|
||||||
conduwuit-core.workspace = true
|
conduwuit-core.workspace = true
|
||||||
conduwuit-service.workspace = true
|
conduwuit-service.workspace = true
|
||||||
|
conduwuit-web.workspace = true
|
||||||
const-str.workspace = true
|
const-str.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
|
|
|
@ -6,8 +6,7 @@ use axum::{
|
||||||
};
|
};
|
||||||
use axum_client_ip::SecureClientIpSource;
|
use axum_client_ip::SecureClientIpSource;
|
||||||
use conduwuit::{Result, Server, debug, error};
|
use conduwuit::{Result, Server, debug, error};
|
||||||
use conduwuit_api::router::state::Guard;
|
use conduwuit_service::{Services, state::Guard};
|
||||||
use conduwuit_service::Services;
|
|
||||||
use http::{
|
use http::{
|
||||||
HeaderValue, Method, StatusCode,
|
HeaderValue, Method, StatusCode,
|
||||||
header::{self, HeaderName},
|
header::{self, HeaderName},
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use axum::{Router, response::IntoResponse, routing::get};
|
use axum::{Router, response::IntoResponse};
|
||||||
use conduwuit::Error;
|
use conduwuit::Error;
|
||||||
use conduwuit_api::router::{state, state::Guard};
|
use conduwuit_service::{Services, state, state::Guard};
|
||||||
use conduwuit_service::Services;
|
|
||||||
use http::{StatusCode, Uri};
|
use http::{StatusCode, Uri};
|
||||||
use ruma::api::client::error::ErrorKind;
|
use ruma::api::client::error::ErrorKind;
|
||||||
|
|
||||||
|
@ -11,7 +10,7 @@ pub(crate) fn build(services: &Arc<Services>) -> (Router, Guard) {
|
||||||
let router = Router::<state::State>::new();
|
let router = Router::<state::State>::new();
|
||||||
let (state, guard) = state::create(services.clone());
|
let (state, guard) = state::create(services.clone());
|
||||||
let router = conduwuit_api::router::build(router, &services.server)
|
let router = conduwuit_api::router::build(router, &services.server)
|
||||||
.route("/", get(it_works))
|
.merge(conduwuit_web::build())
|
||||||
.fallback(not_found)
|
.fallback(not_found)
|
||||||
.with_state(state);
|
.with_state(state);
|
||||||
|
|
||||||
|
@ -21,5 +20,3 @@ pub(crate) fn build(services: &Arc<Services>) -> (Router, Guard) {
|
||||||
async fn not_found(_uri: Uri) -> impl IntoResponse {
|
async fn not_found(_uri: Uri) -> impl IntoResponse {
|
||||||
Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND)
|
Error::Request(ErrorKind::Unrecognized, "Not Found".into(), StatusCode::NOT_FOUND)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn it_works() -> &'static str { "hewwo from conduwuit woof!" }
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
||||||
.timeline
|
.timeline
|
||||||
.build_and_append_pdu(
|
.build_and_append_pdu(
|
||||||
PduBuilder::state(String::new(), &RoomTopicEventContent {
|
PduBuilder::state(String::new(), &RoomTopicEventContent {
|
||||||
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name),
|
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name),
|
||||||
}),
|
}),
|
||||||
server_user,
|
server_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
|
|
@ -25,7 +25,7 @@ pub(super) async fn console_auto_stop(&self) {
|
||||||
/// Execute admin commands after startup
|
/// Execute admin commands after startup
|
||||||
#[implement(super::Service)]
|
#[implement(super::Service)]
|
||||||
pub(super) async fn startup_execute(&self) -> Result {
|
pub(super) async fn startup_execute(&self) -> Result {
|
||||||
// List of comamnds to execute
|
// List of commands to execute
|
||||||
let commands = &self.services.server.config.admin_execute;
|
let commands = &self.services.server.config.admin_execute;
|
||||||
|
|
||||||
// Determine if we're running in smoketest-mode which will change some behaviors
|
// Determine if we're running in smoketest-mode which will change some behaviors
|
||||||
|
@ -64,7 +64,7 @@ pub(super) async fn startup_execute(&self) -> Result {
|
||||||
/// Execute admin commands after signal
|
/// Execute admin commands after signal
|
||||||
#[implement(super::Service)]
|
#[implement(super::Service)]
|
||||||
pub(super) async fn signal_execute(&self) -> Result {
|
pub(super) async fn signal_execute(&self) -> Result {
|
||||||
// List of comamnds to execute
|
// List of commands to execute
|
||||||
let commands = self.services.server.config.admin_signal_execute.clone();
|
let commands = self.services.server.config.admin_signal_execute.clone();
|
||||||
|
|
||||||
// When true, errors are ignored and execution continues.
|
// When true, errors are ignored and execution continues.
|
||||||
|
|
|
@ -166,7 +166,7 @@ impl Service {
|
||||||
.map_err(|e| err!("Failed to enqueue admin command: {e:?}"))
|
.map_err(|e| err!("Failed to enqueue admin command: {e:?}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dispatches a comamnd to the processor on the current task and waits for
|
/// Dispatches a command to the processor on the current task and waits for
|
||||||
/// completion.
|
/// completion.
|
||||||
pub async fn command_in_place(
|
pub async fn command_in_place(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -26,7 +26,7 @@ impl NamespaceRegex {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if this namespace has exlusive rights to a namespace
|
/// Checks if this namespace has exclusive rights to a namespace
|
||||||
#[inline]
|
#[inline]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn is_exclusive_match(&self, heystack: &str) -> bool {
|
pub fn is_exclusive_match(&self, heystack: &str) -> bool {
|
||||||
|
|
|
@ -338,7 +338,7 @@ fn handle_federation_error(
|
||||||
return fallback();
|
return fallback();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reached for 5xx errors. This is where we don't fallback given the likelyhood
|
// Reached for 5xx errors. This is where we don't fallback given the likelihood
|
||||||
// the other endpoint will also be a 5xx and we're wasting time.
|
// the other endpoint will also be a 5xx and we're wasting time.
|
||||||
error
|
error
|
||||||
}
|
}
|
||||||
|
@ -356,7 +356,7 @@ pub async fn fetch_remote_thumbnail_legacy(
|
||||||
|
|
||||||
self.check_legacy_freeze()?;
|
self.check_legacy_freeze()?;
|
||||||
self.check_fetch_authorized(&mxc)?;
|
self.check_fetch_authorized(&mxc)?;
|
||||||
let reponse = self
|
let response = self
|
||||||
.services
|
.services
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(mxc.server_name, media::get_content_thumbnail::v3::Request {
|
.send_federation_request(mxc.server_name, media::get_content_thumbnail::v3::Request {
|
||||||
|
@ -373,10 +373,17 @@ pub async fn fetch_remote_thumbnail_legacy(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?;
|
let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?;
|
||||||
self.upload_thumbnail(&mxc, None, None, reponse.content_type.as_deref(), &dim, &reponse.file)
|
self.upload_thumbnail(
|
||||||
|
&mxc,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
response.content_type.as_deref(),
|
||||||
|
&dim,
|
||||||
|
&response.file,
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(reponse)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Service)]
|
#[implement(super::Service)]
|
||||||
|
|
|
@ -5,6 +5,7 @@ mod manager;
|
||||||
mod migrations;
|
mod migrations;
|
||||||
mod service;
|
mod service;
|
||||||
pub mod services;
|
pub mod services;
|
||||||
|
pub mod state;
|
||||||
|
|
||||||
pub mod account_data;
|
pub mod account_data;
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
|
|
|
@ -296,7 +296,7 @@ impl super::Service {
|
||||||
expire: CachedOverride::default_expire(),
|
expire: CachedOverride::default_expire(),
|
||||||
overriding: (hostname != untername)
|
overriding: (hostname != untername)
|
||||||
.then_some(hostname.into())
|
.then_some(hostname.into())
|
||||||
.inspect(|_| debug_info!("{untername:?} overriden by {hostname:?}")),
|
.inspect(|_| debug_info!("{untername:?} overridden by {hostname:?}")),
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -8,7 +8,7 @@ use conduwuit::{
|
||||||
Error, Result, err, implement,
|
Error, Result, err, implement,
|
||||||
state_res::{self, StateMap},
|
state_res::{self, StateMap},
|
||||||
trace,
|
trace,
|
||||||
utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width},
|
utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt},
|
||||||
};
|
};
|
||||||
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join};
|
use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join};
|
||||||
use ruma::{OwnedEventId, RoomId, RoomVersionId};
|
use ruma::{OwnedEventId, RoomId, RoomVersionId};
|
||||||
|
@ -112,14 +112,7 @@ where
|
||||||
{
|
{
|
||||||
let event_fetch = |event_id| self.event_fetch(event_id);
|
let event_fetch = |event_id| self.event_fetch(event_id);
|
||||||
let event_exists = |event_id| self.event_exists(event_id);
|
let event_exists = |event_id| self.event_exists(event_id);
|
||||||
state_res::resolve(
|
state_res::resolve(room_version, state_sets, auth_chain_sets, &event_fetch, &event_exists)
|
||||||
room_version,
|
|
||||||
state_sets,
|
|
||||||
auth_chain_sets,
|
|
||||||
&event_fetch,
|
|
||||||
&event_exists,
|
|
||||||
automatic_width(),
|
|
||||||
)
|
|
||||||
.map_err(|e| err!(error!("State resolution failed: {e:?}")))
|
.map_err(|e| err!(error!("State resolution failed: {e:?}")))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
|
@ -399,7 +399,7 @@ async fn get_room_summary(
|
||||||
Ok(summary)
|
Ok(summary)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// With the given identifier, checks if a room is accessable
|
/// With the given identifier, checks if a room is accessible
|
||||||
#[implement(Service)]
|
#[implement(Service)]
|
||||||
async fn is_accessible_child<'a, I>(
|
async fn is_accessible_child<'a, I>(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -267,15 +267,15 @@ impl Service {
|
||||||
///
|
///
|
||||||
/// Returns pdu id
|
/// Returns pdu id
|
||||||
#[tracing::instrument(level = "debug", skip_all)]
|
#[tracing::instrument(level = "debug", skip_all)]
|
||||||
pub async fn append_pdu<'a, Leafs>(
|
pub async fn append_pdu<'a, Leaves>(
|
||||||
&'a self,
|
&'a self,
|
||||||
pdu: &'a PduEvent,
|
pdu: &'a PduEvent,
|
||||||
mut pdu_json: CanonicalJsonObject,
|
mut pdu_json: CanonicalJsonObject,
|
||||||
leafs: Leafs,
|
leaves: Leaves,
|
||||||
state_lock: &'a RoomMutexGuard,
|
state_lock: &'a RoomMutexGuard,
|
||||||
) -> Result<RawPduId>
|
) -> Result<RawPduId>
|
||||||
where
|
where
|
||||||
Leafs: Iterator<Item = &'a EventId> + Send + 'a,
|
Leaves: Iterator<Item = &'a EventId> + Send + 'a,
|
||||||
{
|
{
|
||||||
// Coalesce database writes for the remainder of this scope.
|
// Coalesce database writes for the remainder of this scope.
|
||||||
let _cork = self.db.db.cork_and_flush();
|
let _cork = self.db.db.cork_and_flush();
|
||||||
|
@ -344,7 +344,7 @@ impl Service {
|
||||||
|
|
||||||
self.services
|
self.services
|
||||||
.state
|
.state
|
||||||
.set_forward_extremities(&pdu.room_id, leafs, state_lock)
|
.set_forward_extremities(&pdu.room_id, leaves, state_lock)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let insert_lock = self.mutex_insert.lock(&pdu.room_id).await;
|
let insert_lock = self.mutex_insert.lock(&pdu.room_id).await;
|
||||||
|
@ -951,17 +951,17 @@ impl Service {
|
||||||
/// Append the incoming event setting the state snapshot to the state from
|
/// Append the incoming event setting the state snapshot to the state from
|
||||||
/// the server that sent the event.
|
/// the server that sent the event.
|
||||||
#[tracing::instrument(level = "debug", skip_all)]
|
#[tracing::instrument(level = "debug", skip_all)]
|
||||||
pub async fn append_incoming_pdu<'a, Leafs>(
|
pub async fn append_incoming_pdu<'a, Leaves>(
|
||||||
&'a self,
|
&'a self,
|
||||||
pdu: &'a PduEvent,
|
pdu: &'a PduEvent,
|
||||||
pdu_json: CanonicalJsonObject,
|
pdu_json: CanonicalJsonObject,
|
||||||
new_room_leafs: Leafs,
|
new_room_leaves: Leaves,
|
||||||
state_ids_compressed: Arc<CompressedState>,
|
state_ids_compressed: Arc<CompressedState>,
|
||||||
soft_fail: bool,
|
soft_fail: bool,
|
||||||
state_lock: &'a RoomMutexGuard,
|
state_lock: &'a RoomMutexGuard,
|
||||||
) -> Result<Option<RawPduId>>
|
) -> Result<Option<RawPduId>>
|
||||||
where
|
where
|
||||||
Leafs: Iterator<Item = &'a EventId> + Send + 'a,
|
Leaves: Iterator<Item = &'a EventId> + Send + 'a,
|
||||||
{
|
{
|
||||||
// We append to state before appending the pdu, so we don't have a moment in
|
// We append to state before appending the pdu, so we don't have a moment in
|
||||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
// time with the pdu without it's state. This is okay because append_pdu can't
|
||||||
|
@ -978,14 +978,14 @@ impl Service {
|
||||||
|
|
||||||
self.services
|
self.services
|
||||||
.state
|
.state
|
||||||
.set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock)
|
.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pdu_id = self
|
let pdu_id = self
|
||||||
.append_pdu(pdu, pdu_json, new_room_leafs, state_lock)
|
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(Some(pdu_id))
|
Ok(Some(pdu_id))
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::{ops::Deref, sync::Arc};
|
use std::{ops::Deref, sync::Arc};
|
||||||
|
|
||||||
use conduwuit_service::Services;
|
use crate::Services;
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
pub struct State {
|
pub struct State {
|
35
src/web/Cargo.toml
Normal file
35
src/web/Cargo.toml
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
[package]
|
||||||
|
name = "conduwuit_web"
|
||||||
|
categories.workspace = true
|
||||||
|
description.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
keywords.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
version.workspace = true
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
path = "mod.rs"
|
||||||
|
crate-type = [
|
||||||
|
"rlib",
|
||||||
|
# "dylib",
|
||||||
|
]
|
||||||
|
|
||||||
|
[features]
|
||||||
|
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
conduwuit-build-metadata.workspace = true
|
||||||
|
conduwuit-service.workspace = true
|
||||||
|
|
||||||
|
askama = "0.14.0"
|
||||||
|
|
||||||
|
axum.workspace = true
|
||||||
|
futures.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
68
src/web/css/index.css
Normal file
68
src/web/css/index.css
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
:root {
|
||||||
|
color-scheme: light;
|
||||||
|
--font-stack: sans-serif;
|
||||||
|
|
||||||
|
--background-color: #fff;
|
||||||
|
--text-color: #000;
|
||||||
|
|
||||||
|
--bg: oklch(0.76 0.0854 317.27);
|
||||||
|
--panel-bg: oklch(0.91 0.042 317.27);
|
||||||
|
|
||||||
|
--name-lightness: 0.45;
|
||||||
|
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
color-scheme: dark;
|
||||||
|
--text-color: #fff;
|
||||||
|
--bg: oklch(0.15 0.042 317.27);
|
||||||
|
--panel-bg: oklch(0.24 0.03 317.27);
|
||||||
|
|
||||||
|
--name-lightness: 0.8;
|
||||||
|
}
|
||||||
|
|
||||||
|
--c1: oklch(0.44 0.177 353.06);
|
||||||
|
--c2: oklch(0.59 0.158 150.88);
|
||||||
|
|
||||||
|
--normal-font-size: 1rem;
|
||||||
|
--small-font-size: 0.8rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
body {
|
||||||
|
color: var(--text-color);
|
||||||
|
font-family: var(--font-stack);
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
display: grid;
|
||||||
|
place-items: center;
|
||||||
|
min-height: 100vh;
|
||||||
|
}
|
||||||
|
|
||||||
|
html {
|
||||||
|
background-color: var(--bg);
|
||||||
|
background-image: linear-gradient(
|
||||||
|
70deg,
|
||||||
|
oklch(from var(--bg) l + 0.2 c h),
|
||||||
|
oklch(from var(--bg) l - 0.2 c h)
|
||||||
|
);
|
||||||
|
font-size: 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.panel {
|
||||||
|
width: min(clamp(24rem, 12rem + 40vw, 48rem), 100vw);
|
||||||
|
border-radius: 15px;
|
||||||
|
background-color: var(--panel-bg);
|
||||||
|
padding-inline: 1.5rem;
|
||||||
|
padding-block: 1rem;
|
||||||
|
box-shadow: 0 0.25em 0.375em hsla(0, 0%, 0%, 0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
.project-name {
|
||||||
|
text-decoration: none;
|
||||||
|
background: linear-gradient(
|
||||||
|
130deg,
|
||||||
|
oklch(from var(--c1) var(--name-lightness) c h),
|
||||||
|
oklch(from var(--c2) var(--name-lightness) c h)
|
||||||
|
);
|
||||||
|
background-clip: text;
|
||||||
|
color: transparent;
|
||||||
|
filter: brightness(1.2);
|
||||||
|
}
|
73
src/web/mod.rs
Normal file
73
src/web/mod.rs
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
use askama::Template;
|
||||||
|
use axum::{
|
||||||
|
Router,
|
||||||
|
extract::State,
|
||||||
|
http::{StatusCode, header},
|
||||||
|
response::{Html, IntoResponse, Response},
|
||||||
|
routing::get,
|
||||||
|
};
|
||||||
|
use conduwuit_build_metadata::{GIT_REMOTE_COMMIT_URL, GIT_REMOTE_WEB_URL, version_tag};
|
||||||
|
use conduwuit_service::state;
|
||||||
|
|
||||||
|
pub fn build() -> Router<state::State> {
|
||||||
|
let router = Router::<state::State>::new();
|
||||||
|
router.route("/", get(index_handler))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn index_handler(
|
||||||
|
State(services): State<state::State>,
|
||||||
|
) -> Result<impl IntoResponse, WebError> {
|
||||||
|
#[derive(Debug, Template)]
|
||||||
|
#[template(path = "index.html.j2")]
|
||||||
|
struct Tmpl<'a> {
|
||||||
|
nonce: &'a str,
|
||||||
|
server_name: &'a str,
|
||||||
|
}
|
||||||
|
let nonce = rand::random::<u64>().to_string();
|
||||||
|
|
||||||
|
let template = Tmpl {
|
||||||
|
nonce: &nonce,
|
||||||
|
server_name: services.config.server_name.as_str(),
|
||||||
|
};
|
||||||
|
Ok((
|
||||||
|
[(header::CONTENT_SECURITY_POLICY, format!("default-src 'none' 'nonce-{nonce}';"))],
|
||||||
|
Html(template.render()?),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
enum WebError {
|
||||||
|
#[error("Failed to render template: {0}")]
|
||||||
|
Render(#[from] askama::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntoResponse for WebError {
|
||||||
|
fn into_response(self) -> Response {
|
||||||
|
#[derive(Debug, Template)]
|
||||||
|
#[template(path = "error.html.j2")]
|
||||||
|
struct Tmpl<'a> {
|
||||||
|
nonce: &'a str,
|
||||||
|
err: WebError,
|
||||||
|
}
|
||||||
|
|
||||||
|
let nonce = rand::random::<u64>().to_string();
|
||||||
|
|
||||||
|
let status = match &self {
|
||||||
|
| Self::Render(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||||
|
};
|
||||||
|
let tmpl = Tmpl { nonce: &nonce, err: self };
|
||||||
|
if let Ok(body) = tmpl.render() {
|
||||||
|
(
|
||||||
|
status,
|
||||||
|
[(
|
||||||
|
header::CONTENT_SECURITY_POLICY,
|
||||||
|
format!("default-src 'none' 'nonce-{nonce}';"),
|
||||||
|
)],
|
||||||
|
Html(body),
|
||||||
|
)
|
||||||
|
.into_response()
|
||||||
|
} else {
|
||||||
|
(status, "Something went wrong").into_response()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
32
src/web/templates/_layout.html.j2
Normal file
32
src/web/templates/_layout.html.j2
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8" />
|
||||||
|
<title>{% block title %}Continuwuity{% endblock %}</title>
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||||
|
|
||||||
|
<style type="text/css" nonce="{{ nonce }}">
|
||||||
|
/*<![CDATA[*/
|
||||||
|
{{ include_str !("css/index.css") | safe }}
|
||||||
|
/*]]>*/
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<main>{%~ block content %}{% endblock ~%}</main>
|
||||||
|
{%~ block footer ~%}
|
||||||
|
<footer>
|
||||||
|
<p>Powered by <a href="https://continuwuity.org">Continuwuity</a>
|
||||||
|
{%~ if let Some(version_info) = self::version_tag() ~%}
|
||||||
|
{%~ if let Some(url) = GIT_REMOTE_COMMIT_URL.or(GIT_REMOTE_WEB_URL) ~%}
|
||||||
|
(<a href="{{ url }}">{{ version_info }}</a>)
|
||||||
|
{%~ else ~%}
|
||||||
|
({{ version_info }})
|
||||||
|
{%~ endif ~%}
|
||||||
|
{%~ endif ~%}</p>
|
||||||
|
</footer>
|
||||||
|
{%~ endblock ~%}
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
20
src/web/templates/error.html.j2
Normal file
20
src/web/templates/error.html.j2
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
{% extends "_layout.html.j2" %}
|
||||||
|
|
||||||
|
{%- block title -%}
|
||||||
|
Server Error
|
||||||
|
{%- endblock -%}
|
||||||
|
|
||||||
|
{%- block content -%}
|
||||||
|
<h1>
|
||||||
|
{%- match err -%}
|
||||||
|
{% else -%} 500: Internal Server Error
|
||||||
|
{%- endmatch -%}
|
||||||
|
</h1>
|
||||||
|
|
||||||
|
{%- match err -%}
|
||||||
|
{% when WebError::Render(err) -%}
|
||||||
|
<pre>{{ err }}</pre>
|
||||||
|
{% else -%} <p>An error occurred</p>
|
||||||
|
{%- endmatch -%}
|
||||||
|
|
||||||
|
{%- endblock -%}
|
16
src/web/templates/index.html.j2
Normal file
16
src/web/templates/index.html.j2
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
{% extends "_layout.html.j2" %}
|
||||||
|
{%- block content -%}
|
||||||
|
<div class="orb"></div>
|
||||||
|
<div class="panel">
|
||||||
|
<h1>Welcome to <a class="project-name" href="https://continuwuity.org">Continuwuity</a>!</h1>
|
||||||
|
<p>Continuwuity is successfully installed and working. </p>
|
||||||
|
<p>To get started, you can:</p>
|
||||||
|
<ul>
|
||||||
|
<li>Read the <a href="https://continuwuity.org/introduction">documentation</a></li>
|
||||||
|
<li>Join the <a href="https://matrix.to/#/#continuwuity:continuwuity.org">Continuwuity Matrix room</a> or <a href="https://matrix.to/#/#space:continuwuity.org">space</a></li>
|
||||||
|
<li>Log in with a <a href="https://matrix.org/ecosystem/clients/">client</a></li>
|
||||||
|
<li>Ensure <a href="https://federationtester.matrix.org/#{{ server_name }}">federation</a> works</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{%- endblock content -%}
|
|
@ -495,7 +495,7 @@ ul#searchresults span.teaser em {
|
||||||
|
|
||||||
.chapter li {
|
.chapter li {
|
||||||
display: flex;
|
display: flex;
|
||||||
color: var(--sidebar-non-existant);
|
color: var(--sidebar-non-existent);
|
||||||
}
|
}
|
||||||
.chapter li a {
|
.chapter li a {
|
||||||
display: block;
|
display: block;
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
--sidebar-bg: #14191f;
|
--sidebar-bg: #14191f;
|
||||||
--sidebar-fg: #c8c9db;
|
--sidebar-fg: #c8c9db;
|
||||||
--sidebar-non-existant: #5c6773;
|
--sidebar-non-existent: #5c6773;
|
||||||
--sidebar-active: #ffb454;
|
--sidebar-active: #ffb454;
|
||||||
--sidebar-spacer: #2d334f;
|
--sidebar-spacer: #2d334f;
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@
|
||||||
|
|
||||||
--sidebar-bg: #292c2f;
|
--sidebar-bg: #292c2f;
|
||||||
--sidebar-fg: #a1adb8;
|
--sidebar-fg: #a1adb8;
|
||||||
--sidebar-non-existant: #505254;
|
--sidebar-non-existent: #505254;
|
||||||
--sidebar-active: #3473ad;
|
--sidebar-active: #3473ad;
|
||||||
--sidebar-spacer: #393939;
|
--sidebar-spacer: #393939;
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@
|
||||||
|
|
||||||
--sidebar-bg: #fafafa;
|
--sidebar-bg: #fafafa;
|
||||||
--sidebar-fg: #AE518E;
|
--sidebar-fg: #AE518E;
|
||||||
--sidebar-non-existant: #aaaaaa;
|
--sidebar-non-existent: #aaaaaa;
|
||||||
--sidebar-active: #2F7E86;
|
--sidebar-active: #2F7E86;
|
||||||
--sidebar-spacer: #f4f4f4;
|
--sidebar-spacer: #f4f4f4;
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@
|
||||||
|
|
||||||
--sidebar-bg: #282d3f;
|
--sidebar-bg: #282d3f;
|
||||||
--sidebar-fg: #fdcbec;
|
--sidebar-fg: #fdcbec;
|
||||||
--sidebar-non-existant: #505274;
|
--sidebar-non-existent: #505274;
|
||||||
--sidebar-active: #5BCEFA;
|
--sidebar-active: #5BCEFA;
|
||||||
--sidebar-spacer: #2d334f;
|
--sidebar-spacer: #2d334f;
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@
|
||||||
|
|
||||||
--sidebar-bg: #3b2e2a;
|
--sidebar-bg: #3b2e2a;
|
||||||
--sidebar-fg: #c8c9db;
|
--sidebar-fg: #c8c9db;
|
||||||
--sidebar-non-existant: #505254;
|
--sidebar-non-existent: #505254;
|
||||||
--sidebar-active: #e69f67;
|
--sidebar-active: #e69f67;
|
||||||
--sidebar-spacer: #45373a;
|
--sidebar-spacer: #45373a;
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@
|
||||||
|
|
||||||
--sidebar-bg: #292c2f;
|
--sidebar-bg: #292c2f;
|
||||||
--sidebar-fg: #a1adb8;
|
--sidebar-fg: #a1adb8;
|
||||||
--sidebar-non-existant: #505254;
|
--sidebar-non-existent: #505254;
|
||||||
--sidebar-active: #3473ad;
|
--sidebar-active: #3473ad;
|
||||||
--sidebar-spacer: #393939;
|
--sidebar-spacer: #393939;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue