improve some router tracing spans
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
77d7e8a3ad
commit
b1182fe8a4
2 changed files with 83 additions and 36 deletions
|
@ -12,6 +12,18 @@ use http::{Method, StatusCode, Uri};
|
||||||
parent = None,
|
parent = None,
|
||||||
level = "trace",
|
level = "trace",
|
||||||
skip_all,
|
skip_all,
|
||||||
|
fields(
|
||||||
|
handled = %services
|
||||||
|
.server
|
||||||
|
.metrics
|
||||||
|
.requests_spawn_finished
|
||||||
|
.fetch_add(1, Ordering::Relaxed),
|
||||||
|
active = %services
|
||||||
|
.server
|
||||||
|
.metrics
|
||||||
|
.requests_spawn_active
|
||||||
|
.fetch_add(1, Ordering::Relaxed),
|
||||||
|
)
|
||||||
)]
|
)]
|
||||||
pub(crate) async fn spawn(
|
pub(crate) async fn spawn(
|
||||||
State(services): State<Arc<Services>>,
|
State(services): State<Arc<Services>>,
|
||||||
|
@ -19,34 +31,56 @@ pub(crate) async fn spawn(
|
||||||
next: axum::middleware::Next,
|
next: axum::middleware::Next,
|
||||||
) -> Result<Response, StatusCode> {
|
) -> Result<Response, StatusCode> {
|
||||||
let server = &services.server;
|
let server = &services.server;
|
||||||
|
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
defer! {{
|
||||||
|
_ = server
|
||||||
|
.metrics
|
||||||
|
.requests_spawn_active
|
||||||
|
.fetch_sub(1, Ordering::Relaxed);
|
||||||
|
}};
|
||||||
|
|
||||||
if !server.running() {
|
if !server.running() {
|
||||||
debug_warn!("unavailable pending shutdown");
|
debug_warn!("unavailable pending shutdown");
|
||||||
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
let active = server
|
|
||||||
.metrics
|
|
||||||
.requests_spawn_active
|
|
||||||
.fetch_add(1, Ordering::Relaxed);
|
|
||||||
trace!(active, "enter");
|
|
||||||
defer! {{
|
|
||||||
let active = server.metrics.requests_spawn_active.fetch_sub(1, Ordering::Relaxed);
|
|
||||||
let finished = server.metrics.requests_spawn_finished.fetch_add(1, Ordering::Relaxed);
|
|
||||||
trace!(active, finished, "leave");
|
|
||||||
}};
|
|
||||||
|
|
||||||
let fut = next.run(req);
|
let fut = next.run(req);
|
||||||
let task = server.runtime().spawn(fut);
|
let task = server.runtime().spawn(fut);
|
||||||
task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip_all)]
|
#[tracing::instrument(
|
||||||
|
level = "debug",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
handled = %services
|
||||||
|
.server
|
||||||
|
.metrics
|
||||||
|
.requests_handle_finished
|
||||||
|
.fetch_add(1, Ordering::Relaxed),
|
||||||
|
active = %services
|
||||||
|
.server
|
||||||
|
.metrics
|
||||||
|
.requests_handle_active
|
||||||
|
.fetch_add(1, Ordering::Relaxed),
|
||||||
|
)
|
||||||
|
)]
|
||||||
pub(crate) async fn handle(
|
pub(crate) async fn handle(
|
||||||
State(services): State<Arc<Services>>,
|
State(services): State<Arc<Services>>,
|
||||||
req: http::Request<axum::body::Body>,
|
req: http::Request<axum::body::Body>,
|
||||||
next: axum::middleware::Next,
|
next: axum::middleware::Next,
|
||||||
) -> Result<Response, StatusCode> {
|
) -> Result<Response, StatusCode> {
|
||||||
let server = &services.server;
|
let server = &services.server;
|
||||||
|
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
defer! {{
|
||||||
|
_ = server
|
||||||
|
.metrics
|
||||||
|
.requests_handle_active
|
||||||
|
.fetch_sub(1, Ordering::Relaxed);
|
||||||
|
}};
|
||||||
|
|
||||||
if !server.running() {
|
if !server.running() {
|
||||||
debug_warn!(
|
debug_warn!(
|
||||||
method = %req.method(),
|
method = %req.method(),
|
||||||
|
@ -57,17 +91,6 @@ pub(crate) async fn handle(
|
||||||
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
return Err(StatusCode::SERVICE_UNAVAILABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
let active = server
|
|
||||||
.metrics
|
|
||||||
.requests_handle_active
|
|
||||||
.fetch_add(1, Ordering::Relaxed);
|
|
||||||
trace!(active, "enter");
|
|
||||||
defer! {{
|
|
||||||
let active = server.metrics.requests_handle_active.fetch_sub(1, Ordering::Relaxed);
|
|
||||||
let finished = server.metrics.requests_handle_finished.fetch_add(1, Ordering::Relaxed);
|
|
||||||
trace!(active, finished, "leave");
|
|
||||||
}};
|
|
||||||
|
|
||||||
let uri = req.uri().clone();
|
let uri = req.uri().clone();
|
||||||
let method = req.method().clone();
|
let method = req.method().clone();
|
||||||
let result = next.run(req).await;
|
let result = next.run(req).await;
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
net::{self, IpAddr, Ipv4Addr},
|
net::{self, IpAddr, Ipv4Addr},
|
||||||
|
os::fd::AsRawFd,
|
||||||
path::Path,
|
path::Path,
|
||||||
sync::{atomic::Ordering, Arc},
|
sync::{atomic::Ordering, Arc},
|
||||||
};
|
};
|
||||||
|
@ -60,31 +61,54 @@ pub(super) async fn serve(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(
|
||||||
|
level = "trace",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
?listener,
|
||||||
|
socket = ?conn.0,
|
||||||
|
),
|
||||||
|
)]
|
||||||
async fn accept(
|
async fn accept(
|
||||||
server: &Arc<Server>,
|
server: &Arc<Server>,
|
||||||
listener: &UnixListener,
|
listener: &UnixListener,
|
||||||
tasks: &mut JoinSet<()>,
|
tasks: &mut JoinSet<()>,
|
||||||
mut app: MakeService,
|
app: MakeService,
|
||||||
builder: server::conn::auto::Builder<TokioExecutor>,
|
builder: server::conn::auto::Builder<TokioExecutor>,
|
||||||
conn: (UnixStream, SocketAddr),
|
conn: (UnixStream, SocketAddr),
|
||||||
) {
|
) {
|
||||||
let (socket, remote) = conn;
|
let (socket, _) = conn;
|
||||||
let socket = TokioIo::new(socket);
|
let server_ = server.clone();
|
||||||
trace!(?listener, ?socket, ?remote, "accepted");
|
let task = async move { accepted(server_, builder, socket, app).await };
|
||||||
|
|
||||||
let called = app.call(NULL_ADDR).await.unwrap_infallible();
|
|
||||||
|
|
||||||
let service = move |req: Request<Incoming>| called.clone().oneshot(req);
|
|
||||||
let handler = service_fn(service);
|
|
||||||
let task = async move {
|
|
||||||
// bug on darwin causes all results to be errors. do not unwrap this
|
|
||||||
_ = builder.serve_connection(socket, handler).await;
|
|
||||||
};
|
|
||||||
|
|
||||||
_ = tasks.spawn_on(task, server.runtime());
|
_ = tasks.spawn_on(task, server.runtime());
|
||||||
while tasks.try_join_next().is_some() {}
|
while tasks.try_join_next().is_some() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(
|
||||||
|
level = "trace",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
fd = %socket.as_raw_fd(),
|
||||||
|
path = ?socket.local_addr(),
|
||||||
|
),
|
||||||
|
)]
|
||||||
|
async fn accepted(
|
||||||
|
server: Arc<Server>,
|
||||||
|
builder: server::conn::auto::Builder<TokioExecutor>,
|
||||||
|
socket: UnixStream,
|
||||||
|
mut app: MakeService,
|
||||||
|
) {
|
||||||
|
let socket = TokioIo::new(socket);
|
||||||
|
let called = app.call(NULL_ADDR).await.unwrap_infallible();
|
||||||
|
let service = move |req: Request<Incoming>| called.clone().oneshot(req);
|
||||||
|
let handler = service_fn(service);
|
||||||
|
trace!(?socket, ?handler, "serving connection");
|
||||||
|
|
||||||
|
// bug on darwin causes all results to be errors. do not unwrap this
|
||||||
|
_ = builder.serve_connection(socket, handler).await;
|
||||||
|
}
|
||||||
|
|
||||||
async fn init(server: &Arc<Server>) -> Result<UnixListener> {
|
async fn init(server: &Arc<Server>) -> Result<UnixListener> {
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue