feat: add log-to-file

This commit is contained in:
Jun Kurihara 2025-04-30 17:36:14 +09:00
commit d5b020dcfb
No known key found for this signature in database
GPG key ID: 48ADFD173ED22B03
12 changed files with 130 additions and 70 deletions

View file

@ -32,3 +32,9 @@ pub const MAX_CACHE_EACH_SIZE: usize = 65_535;
pub const MAX_CACHE_EACH_SIZE_ON_MEMORY: usize = 4_096;
// TODO: max cache size in total
/// Logging event name TODO: Other separated logs?
pub mod log_event_names {
/// access log
pub const ACCESS_LOG: &str = "rpxy::access";
}

View file

@ -1,10 +1,10 @@
use super::cache_error::*;
use crate::{
globals::Globals,
hyper_ext::body::{full, BoxBody, ResponseBody, UnboundedStreamBody},
hyper_ext::body::{BoxBody, ResponseBody, UnboundedStreamBody, full},
log::*,
};
use base64::{engine::general_purpose, Engine as _};
use base64::{Engine as _, engine::general_purpose};
use bytes::{Buf, Bytes, BytesMut};
use futures::channel::mpsc;
use http::{Request, Response, Uri};
@ -16,8 +16,8 @@ use sha2::{Digest, Sha256};
use std::{
path::{Path, PathBuf},
sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex,
atomic::{AtomicUsize, Ordering},
},
time::SystemTime,
};
@ -59,9 +59,7 @@ impl RpxyCache {
let max_each_size = globals.proxy_config.cache_max_each_size;
let mut max_each_size_on_memory = globals.proxy_config.cache_max_each_size_on_memory;
if max_each_size < max_each_size_on_memory {
warn!(
"Maximum size of on memory cache per entry must be smaller than or equal to the maximum of each file cache"
);
warn!("Maximum size of on memory cache per entry must be smaller than or equal to the maximum of each file cache");
max_each_size_on_memory = max_each_size;
}
@ -89,12 +87,7 @@ impl RpxyCache {
}
/// Put response into the cache
pub(crate) async fn put(
&self,
uri: &hyper::Uri,
mut body: Incoming,
policy: &CachePolicy,
) -> CacheResult<UnboundedStreamBody> {
pub(crate) async fn put(&self, uri: &hyper::Uri, mut body: Incoming, policy: &CachePolicy) -> CacheResult<UnboundedStreamBody> {
let cache_manager = self.inner.clone();
let mut file_store = self.file_store.clone();
let uri = uri.clone();
@ -155,7 +148,7 @@ impl RpxyCache {
let mut hasher = Sha256::new();
hasher.update(buf.as_ref());
let hash_bytes = Bytes::copy_from_slice(hasher.finalize().as_ref());
debug!("Cached data: {} bytes, hash = {:?}", size, hash_bytes);
trace!("Cached data: {} bytes, hash = {:?}", size, hash_bytes);
// Create cache object
let cache_key = derive_cache_key_from_uri(&uri);
@ -188,10 +181,7 @@ impl RpxyCache {
/// Get cached response
pub(crate) async fn get<R>(&self, req: &Request<R>) -> Option<Response<ResponseBody>> {
debug!(
"Current cache status: (total, on-memory, file) = {:?}",
self.count().await
);
trace!("Current cache status: (total, on-memory, file) = {:?}", self.count().await);
let cache_key = derive_cache_key_from_uri(req.uri());
// First check cache chance
@ -282,11 +272,7 @@ impl FileStore {
};
}
/// Read a temporary file cache
async fn read(
&self,
path: impl AsRef<Path> + Send + Sync + 'static,
hash: &Bytes,
) -> CacheResult<UnboundedStreamBody> {
async fn read(&self, path: impl AsRef<Path> + Send + Sync + 'static, hash: &Bytes) -> CacheResult<UnboundedStreamBody> {
let inner = self.inner.read().await;
inner.read(path, hash).await
}
@ -336,11 +322,7 @@ impl FileStoreInner {
}
/// Retrieve a stored temporary file cache
async fn read(
&self,
path: impl AsRef<Path> + Send + Sync + 'static,
hash: &Bytes,
) -> CacheResult<UnboundedStreamBody> {
async fn read(&self, path: impl AsRef<Path> + Send + Sync + 'static, hash: &Bytes) -> CacheResult<UnboundedStreamBody> {
let Ok(mut file) = File::open(&path).await else {
warn!("Cache file object cannot be opened");
return Err(CacheError::FailedToOpenCacheFile);

View file

@ -27,6 +27,7 @@ use std::sync::Arc;
use tokio_util::sync::CancellationToken;
/* ------------------------------------------------ */
pub use crate::constants::log_event_names;
pub use crate::globals::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri};
pub mod reexports {
pub use hyper::Uri;

View file

@ -1 +1 @@
pub use tracing::{debug, error, info, warn};
pub use tracing::{debug, error, info, trace, warn};

View file

@ -71,7 +71,7 @@ where
Ok(v)
}
Err(e) => {
error!("{e}");
error!("{e}: {log_data}");
let code = StatusCode::from(e);
log_data.status_code(&code).output();
synthetic_error_response(code)

View file

@ -1,11 +1,11 @@
use super::{handler_main::HandlerContext, utils_headers::*, utils_request::update_request_line, HttpMessageHandler};
use super::{HttpMessageHandler, handler_main::HandlerContext, utils_headers::*, utils_request::update_request_line};
use crate::{
backend::{BackendApp, UpstreamCandidates},
constants::RESPONSE_HEADER_SERVER,
log::*,
};
use anyhow::{anyhow, ensure, Result};
use http::{header, HeaderValue, Request, Response, Uri};
use anyhow::{Result, anyhow, ensure};
use http::{HeaderValue, Request, Response, Uri, header};
use hyper_util::client::legacy::connect::Connect;
use std::net::SocketAddr;
@ -66,7 +66,7 @@ where
upstream_candidates: &UpstreamCandidates,
tls_enabled: bool,
) -> Result<HandlerContext> {
debug!("Generate request to be forwarded");
trace!("Generate request to be forwarded");
// Add te: trailer if contained in original request
let contains_te_trailers = {

View file

@ -34,11 +34,7 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
client_addr: "".to_string(),
method: req.method().to_string(),
host: header_mapper(header::HOST),
p_and_q: req
.uri()
.path_and_query()
.map_or_else(|| "", |v| v.as_str())
.to_string(),
p_and_q: req.uri().path_and_query().map_or_else(|| "", |v| v.as_str()).to_string(),
version: req.version(),
uri_scheme: req.uri().scheme_str().unwrap_or("").to_string(),
uri_host: req.uri().host().unwrap_or("").to_string(),
@ -50,6 +46,33 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
}
}
impl std::fmt::Display for HttpMessageLog {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"",
if !self.host.is_empty() {
self.host.as_str()
} else {
self.uri_host.as_str()
},
self.client_addr,
self.method,
self.p_and_q,
self.version,
self.status,
if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() {
format!("{}://{}", self.uri_scheme, self.uri_host)
} else {
"".to_string()
},
self.ua,
self.xff,
self.upstream
)
}
}
impl HttpMessageLog {
pub fn client_addr(&mut self, client_addr: &SocketAddr) -> &mut Self {
self.client_addr = client_addr.to_canonical().to_string();
@ -74,26 +97,8 @@ impl HttpMessageLog {
pub fn output(&self) {
info!(
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"",
if !self.host.is_empty() {
self.host.as_str()
} else {
self.uri_host.as_str()
},
self.client_addr,
self.method,
self.p_and_q,
self.version,
self.status,
if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() {
format!("{}://{}", self.uri_scheme, self.uri_host)
} else {
"".to_string()
},
self.ua,
self.xff,
self.upstream,
// self.tls_server_name
name: crate::constants::log_event_names::ACCESS_LOG,
"{}", self
);
}
}

View file

@ -33,7 +33,7 @@ where
<<C as OpenStreams<Bytes>>::BidiStream as BidiStream<Bytes>>::SendStream: Send,
{
let mut h3_conn = h3::server::Connection::<_, Bytes>::new(quic_connection).await?;
info!(
debug!(
"QUIC/HTTP3 connection established from {:?} {}",
client_addr,
<&ServerName as TryInto<String>>::try_into(&tls_server_name).unwrap_or_default()
@ -115,7 +115,7 @@ where
let mut sender = body_sender;
let mut size = 0usize;
while let Some(mut body) = recv_stream.recv_data().await? {
debug!("HTTP/3 incoming request body: remaining {}", body.remaining());
trace!("HTTP/3 incoming request body: remaining {}", body.remaining());
size += body.remaining();
if size > max_body_size {
error!(
@ -131,7 +131,7 @@ where
// trailers: use inner for work around. (directly get trailer)
let trailers = futures_util::future::poll_fn(|cx| recv_stream.as_mut().poll_recv_trailers(cx)).await?;
if trailers.is_some() {
debug!("HTTP/3 incoming request trailers");
trace!("HTTP/3 incoming request trailers");
sender.send_trailers(trailers.unwrap()).await?;
}
Ok(()) as RpxyResult<()>
@ -154,13 +154,13 @@ where
match send_stream.send_response(new_res).await {
Ok(_) => {
debug!("HTTP/3 response to connection successful");
trace!("HTTP/3 response to connection successful");
// on-demand body streaming to downstream without expanding the object onto memory.
loop {
let frame = match new_body.frame().await {
Some(frame) => frame,
None => {
debug!("Response body finished");
trace!("Response body finished");
break;
}
}

View file

@ -2,8 +2,8 @@ use super::{proxy_main::Proxy, socket::bind_udp_socket};
use crate::{error::*, log::*, name_exp::ByteName};
use hyper_util::client::legacy::connect::Connect;
use quinn::{
crypto::rustls::{HandshakeData, QuicServerConfig},
Endpoint, TransportConfig,
crypto::rustls::{HandshakeData, QuicServerConfig},
};
use rpxy_certs::ServerCrypto;
use rustls::ServerConfig;
@ -82,7 +82,7 @@ where
let client_addr = incoming.remote_address();
let quic_connection = match incoming.await {
Ok(new_conn) => {
info!("New connection established");
trace!("New connection established");
h3_quinn::Connection::new(new_conn)
},
Err(e) => {

View file

@ -110,7 +110,7 @@ where
// quic event loop. this immediately cancels when crypto is updated by tokio::select!
while let Some(new_conn) = server.accept().await {
debug!("New QUIC connection established");
trace!("New QUIC connection established");
let Ok(Some(new_server_name)) = new_conn.server_name() else {
warn!("HTTP/3 no SNI is given");
continue;