diff --git a/Cargo.lock b/Cargo.lock
index 65a89d7f..3f5b7f7e 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -581,7 +581,6 @@ dependencies = [
  "console-subscriber",
  "hardened_malloc-rs",
  "log",
- "num_cpus",
  "opentelemetry",
  "opentelemetry-jaeger",
  "opentelemetry_sdk",
@@ -712,7 +711,6 @@ dependencies = [
  "futures-util",
  "log",
  "lru-cache",
- "num_cpus",
  "parking_lot",
  "ruma",
  "rusqlite",
diff --git a/Cargo.toml b/Cargo.toml
index 04f943b5..0c85b80a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -258,9 +258,6 @@ version = "0.1.80"
 [workspace.dependencies.lru-cache]
 version = "0.1.2"
 
-[workspace.dependencies.num_cpus]
-version = "1.16.0"
-
 # Used for matrix spec type definitions and helpers
 [workspace.dependencies.ruma]
 git = "https://github.com/girlbossceo/ruwuma"
diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs
index a91e6447..5d4e5cba 100644
--- a/src/core/utils/mod.rs
+++ b/src/core/utils/mod.rs
@@ -267,3 +267,15 @@ pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> {
 
 	Ok(())
 }
+
+/// Get the number of threads which could execute in parallel based on the
+/// hardware and administrative constraints of this system. This value should be
+/// used to hint the size of thread-pools and divide-and-conquer algorithms.
+///
+/// * <https://doc.rust-lang.org/std/thread/fn.available_parallelism.html>
+#[must_use]
+pub fn available_parallelism() -> usize {
+	std::thread::available_parallelism()
+		.expect("Unable to query for available parallelism.")
+		.get()
+}
diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml
index 990a303b..1855cbff 100644
--- a/src/database/Cargo.toml
+++ b/src/database/Cargo.toml
@@ -56,7 +56,6 @@ conduit-core.workspace = true
 futures-util.workspace = true
 log.workspace = true
 lru-cache.workspace = true
-num_cpus.workspace = true
 parking_lot.optional = true
 parking_lot.workspace = true
 ruma.workspace = true
diff --git a/src/database/rocksdb/opts.rs b/src/database/rocksdb/opts.rs
index b417b126..afed79a5 100644
--- a/src/database/rocksdb/opts.rs
+++ b/src/database/rocksdb/opts.rs
@@ -1,5 +1,7 @@
 #![allow(dead_code)]
-use std::collections::HashMap;
+use std::{cmp, collections::HashMap};
+
+use conduit::utils;
 
 use super::{
 	rust_rocksdb::{
@@ -21,10 +23,11 @@ pub(crate) fn db_options(config: &Config, env: &mut Env, row_cache: &Cache, col_
 	set_logging_defaults(&mut opts, config);
 
 	// Processing
+	const MIN_PARALLELISM: usize = 2;
 	let threads = if config.rocksdb_parallelism_threads == 0 {
-		std::cmp::max(2, num_cpus::get()) // max cores if user specified 0
+		cmp::max(MIN_PARALLELISM, utils::available_parallelism())
 	} else {
-		config.rocksdb_parallelism_threads
+		cmp::max(MIN_PARALLELISM, config.rocksdb_parallelism_threads)
 	};
 
 	opts.set_max_background_jobs(threads.try_into().unwrap());
diff --git a/src/database/sqlite/mod.rs b/src/database/sqlite/mod.rs
index 4e8c079e..61d78a9a 100644
--- a/src/database/sqlite/mod.rs
+++ b/src/database/sqlite/mod.rs
@@ -108,7 +108,8 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
 			clippy::cast_precision_loss,
 			clippy::cast_sign_loss
 		)]
-		let cache_size_per_thread = ((config.db_cache_capacity_mb * 1024.0) / ((num_cpus::get() as f64 * 2.0) + 1.0)) as u32;
+		let cache_size_per_thread = ((config.db_cache_capacity_mb * 1024.0)
+			/ ((conduit::utils::available_parallelism() as f64 * 2.0) + 1.0)) as u32;
 
 		let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?);
 
diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml
index 2232701a..cecdeb58 100644
--- a/src/main/Cargo.toml
+++ b/src/main/Cargo.toml
@@ -78,7 +78,6 @@ log.workspace = true
 tracing.workspace = true
 tracing-subscriber.workspace = true
 clap.workspace = true
-num_cpus.workspace = true
 
 opentelemetry.workspace = true
 opentelemetry.optional = true
diff --git a/src/main/main.rs b/src/main/main.rs
index 68b7a1c2..a96945a7 100644
--- a/src/main/main.rs
+++ b/src/main/main.rs
@@ -6,7 +6,7 @@ extern crate conduit_core as conduit;
 
 use std::{cmp, sync::Arc, time::Duration};
 
-use conduit::{debug_info, error, Error, Result};
+use conduit::{debug_info, error, utils::available_parallelism, Error, Result};
 use server::Server;
 use tokio::runtime;
 
@@ -20,7 +20,7 @@ fn main() -> Result<(), Error> {
 		.enable_io()
 		.enable_time()
 		.thread_name(WORKER_NAME)
-		.worker_threads(cmp::max(WORKER_MIN, num_cpus::get()))
+		.worker_threads(cmp::max(WORKER_MIN, available_parallelism()))
 		.thread_keep_alive(Duration::from_millis(WORKER_KEEPALIVE_MS))
 		.build()
 		.expect("built runtime");