This commit is contained in:
Jun Kurihara 2025-05-20 21:09:20 +09:00
commit f3e5c478f1
No known key found for this signature in database
GPG key ID: D992B3E3DE1DED23
5 changed files with 200 additions and 131 deletions

View file

@ -4,18 +4,26 @@ use ahash::HashMap;
use clap::Arg;
use hot_reload::{ReloaderReceiver, ReloaderService};
use rpxy_certs::{CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase, build_cert_reloader};
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig};
use rpxy_lib::{AppConfigList, ProxyConfig};
#[cfg(feature = "acme")]
use rpxy_acme::{ACME_DIR_URL, ACME_REGISTRY_PATH, AcmeManager};
/// Parsed options
/// Parsed options from CLI
/// Options for configuring the application.
///
/// # Fields
/// - `config_file_path`: Path to the configuration file.
/// - `log_dir_path`: Optional path to the log directory.
pub struct Opts {
pub config_file_path: String,
pub log_dir_path: Option<String>,
}
/// Parse arg values passed from cli
/// Parses command-line arguments into an [`Opts`](rpxy-bin/src/config/parse.rs:13) struct.
///
/// Returns a populated [`Opts`](rpxy-bin/src/config/parse.rs:13) on success, or an error if parsing fails.
/// Expects a required `--config` argument and an optional `--log-dir` argument.
pub fn parse_opts() -> Result<Opts, anyhow::Error> {
let _ = include_str!("../../Cargo.toml");
let options = clap::command!()
@ -36,7 +44,6 @@ pub fn parse_opts() -> Result<Opts, anyhow::Error> {
);
let matches = options.get_matches();
///////////////////////////////////
let config_file_path = matches.get_one::<String>("config_file").unwrap().to_owned();
let log_dir_path = matches.get_one::<String>("log_dir").map(|v| v.to_owned());
@ -46,63 +53,45 @@ pub fn parse_opts() -> Result<Opts, anyhow::Error> {
})
}
pub fn build_settings(config: &ConfigToml) -> std::result::Result<(ProxyConfig, AppConfigList), anyhow::Error> {
// build proxy config
let proxy_config: ProxyConfig = config.try_into()?;
use super::toml::ConfigTomlExt;
// backend_apps
let apps = config.apps.clone().ok_or(anyhow!("Missing application spec"))?;
// assertions for all backend apps
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// if only https_port is specified, tls must be configured for all apps
if proxy_config.http_port.is_none() {
ensure!(
apps.0.iter().all(|(_, app)| app.tls.is_some()),
"Some apps serves only plaintext HTTP"
);
}
// https redirection port must be configured only when both http_port and https_port are configured.
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
// https redirection can be configured if both ports are active
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
apps.0.iter().all(|(_, app)| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
}),
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// build applications
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: config.default_app.clone().map(|v| v.to_ascii_lowercase()), // default backend application for plaintext http requests
};
Ok((proxy_config, app_config_list))
/// Build proxy and app settings from config using ConfigTomlExt
pub fn build_settings(config: &ConfigToml) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
config.validate_and_build_settings()
}
/* ----------------------- */
/// Helper to build a CryptoFileSource for an app, handling ACME if enabled
#[cfg(feature = "acme")]
fn build_tls_for_app_acme(
tls: &mut super::toml::TlsOption,
acme_option: &Option<super::toml::AcmeOption>,
server_name: &str,
acme_registry_path: &str,
acme_dir_url: &str,
) -> Result<(), anyhow::Error> {
if let Some(true) = tls.acme {
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
let cert_path = format!("{}/{}", subdir, file_name);
tls.tls_cert_key_path = Some(cert_path.clone());
tls.tls_cert_path = Some(cert_path);
}
Ok(())
}
/// Build cert map
/// Builds the certificate manager for TLS applications.
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
///
/// # Returns
/// Returns an option containing a tuple of certificate reloader service and receiver, or `None` if TLS is not enabled.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_cert_manager(
config: &ConfigToml,
) -> Result<
@ -139,19 +128,9 @@ pub async fn build_cert_manager(
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
#[cfg(feature = "acme")]
let tls = {
let mut tls = tls.clone();
if let Some(true) = tls.acme {
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
// Both of tls_cert_key_path and tls_cert_path must be the same for ACME since it's a single file
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
tls.tls_cert_key_path = Some(format!("{}/{}", subdir, file_name));
tls.tls_cert_path = Some(format!("{}/{}", subdir, file_name));
}
tls
};
let mut tls = tls.clone();
#[cfg(feature = "acme")]
build_tls_for_app_acme(&mut tls, &acme_option, server_name, acme_registry_path, acme_dir_url)?;
let crypto_file_source = CryptoFileSourceBuilder::default()
.tls_cert_path(tls.tls_cert_path.as_ref().unwrap())
@ -168,24 +147,31 @@ pub async fn build_cert_manager(
/* ----------------------- */
#[cfg(feature = "acme")]
/// Build acme manager
/// Builds the ACME manager for automatic certificate management (enabled with the `acme` feature).
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
/// * `runtime_handle` - Tokio runtime handle for async operations.
///
/// # Returns
/// Returns an option containing an [`AcmeManager`](rpxy-bin/src/config/parse.rs:153) if ACME is configured, or `None` otherwise.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_acme_manager(
config: &ConfigToml,
runtime_handle: tokio::runtime::Handle,
) -> Result<Option<AcmeManager>, anyhow::Error> {
let acme_option = config.experimental.as_ref().and_then(|v| v.acme.clone());
if acme_option.is_none() {
let Some(acme_option) = acme_option else {
return Ok(None);
}
let acme_option = acme_option.unwrap();
};
let domains = config
let domains: Vec<String> = config
.apps
.as_ref()
.unwrap()
.0
.values()
.filter_map(|app| {
//
if let Some(tls) = app.tls.as_ref() {
if let Some(true) = tls.acme {
return Some(app.server_name.as_ref().unwrap().to_owned());
@ -193,7 +179,7 @@ pub async fn build_acme_manager(
}
None
})
.collect::<Vec<_>>();
.collect();
if domains.is_empty() {
return Ok(None);

View file

@ -4,12 +4,25 @@ use crate::{
log::warn,
};
use ahash::HashMap;
use rpxy_lib::{AppConfig, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri, reexports::Uri};
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri, reexports::Uri};
use serde::Deserialize;
use std::{fs, net::SocketAddr};
use tokio::time::Duration;
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// Main configuration structure parsed from the TOML file.
///
/// # Fields
/// - `listen_port`: Optional TCP port for HTTP.
/// - `listen_port_tls`: Optional TCP port for HTTPS/TLS.
/// - `listen_ipv6`: Enable IPv6 listening.
/// - `https_redirection_port`: Optional port for HTTP to HTTPS redirection.
/// - `tcp_listen_backlog`: Optional TCP backlog size.
/// - `max_concurrent_streams`: Optional max concurrent streams.
/// - `max_clients`: Optional max client connections.
/// - `apps`: Optional application definitions.
/// - `default_app`: Optional default application name.
/// - `experimental`: Optional experimental features.
pub struct ConfigToml {
pub listen_port: Option<u16>,
pub listen_port_tls: Option<u16>,
@ -23,8 +36,75 @@ pub struct ConfigToml {
pub experimental: Option<Experimental>,
}
/// Extension trait for config validation and building
pub trait ConfigTomlExt {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error>;
}
impl ConfigTomlExt for ConfigToml {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
let proxy_config: ProxyConfig = self.try_into()?;
let apps = self.apps.as_ref().ok_or(anyhow!("Missing application spec"))?;
// Ensure at least one app is defined
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// Helper: all apps have TLS
let all_apps_have_tls = apps.0.values().all(|app| app.tls.is_some());
// Helper: all apps have https_redirection unset
let all_apps_no_https_redirection = apps.0.values().all(|app| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
});
if proxy_config.http_port.is_none() {
ensure!(all_apps_have_tls, "Some apps serve only plaintext HTTP");
}
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
all_apps_no_https_redirection,
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// Build AppConfigList
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: self.default_app.clone().map(|v| v.to_ascii_lowercase()),
};
Ok((proxy_config, app_config_list))
}
}
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// HTTP/3 protocol options for server configuration.
///
/// # Fields
/// - `alt_svc_max_age`: Optional max age for Alt-Svc header.
/// - `request_max_body_size`: Optional maximum request body size.
/// - `max_concurrent_connections`: Optional maximum concurrent connections.
/// - `max_concurrent_bidistream`: Optional maximum concurrent bidirectional streams.
/// - `max_concurrent_unistream`: Optional maximum concurrent unidirectional streams.
/// - `max_idle_timeout`: Optional maximum idle timeout in milliseconds.
pub struct Http3Option {
pub alt_svc_max_age: Option<u32>,
pub request_max_body_size: Option<usize>,

View file

@ -1,9 +1,12 @@
/// Default IPv4 listen addresses for the server.
pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
/// Default IPv6 listen addresses for the server.
pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
/// Delay in seconds before reloading the configuration after changes.
pub const CONFIG_WATCH_DELAY_SECS: u32 = 15;
#[cfg(feature = "cache")]
// Cache directory
/// Directory path for cache storage (enabled with "cache" feature).
pub const CACHE_DIR: &str = "./cache";
pub(crate) const ACCESS_LOG_FILE: &str = "access.log";

View file

@ -8,92 +8,92 @@ pub use tracing::{debug, error, info, warn};
/// Initialize the logger with the RUST_LOG environment variable.
pub fn init_logger(log_dir_path: Option<&str>) {
let level_string = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string());
let level = tracing::Level::from_str(level_string.as_str()).unwrap_or(tracing::Level::INFO);
let level = std::env::var("RUST_LOG")
.ok()
.and_then(|s| tracing::Level::from_str(&s).ok())
.unwrap_or(tracing::Level::INFO);
match log_dir_path {
// log to stdout
None => init_stdio_logger(level),
// log to files
Some(log_dir_path) => init_file_logger(level, log_dir_path),
Some(path) => init_file_logger(level, path),
}
}
/// file logging
fn init_file_logger(level: tracing::Level, log_dir_path: &str) {
println!("Activate logging to files: {log_dir_path}");
let log_dir_path = std::path::PathBuf::from(log_dir_path);
// create the directory if it does not exist
if !log_dir_path.exists() {
println!("Directory does not exist, creating: {}", log_dir_path.display());
std::fs::create_dir_all(&log_dir_path).expect("Failed to create log directory");
println!("Activate logging to files: {}", log_dir_path);
let log_dir = std::path::Path::new(log_dir_path);
if !log_dir.exists() {
println!("Directory does not exist, creating: {}", log_dir.display());
std::fs::create_dir_all(log_dir).expect("Failed to create log directory");
}
let access_log_path = log_dir_path.join(ACCESS_LOG_FILE);
let system_log_path = log_dir_path.join(SYSTEM_LOG_FILE);
let access_log_path = log_dir.join(ACCESS_LOG_FILE);
let system_log_path = log_dir.join(SYSTEM_LOG_FILE);
println!("Access log: {}", access_log_path.display());
println!("System and error log: {}", system_log_path.display());
let access_log = open_log_file(&access_log_path);
let system_log = open_log_file(&system_log_path);
let reg = tracing_subscriber::registry();
let access_log_base = fmt::layer()
let access_layer = fmt::layer()
.with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false)
.with_level(false)
.compact()
.with_ansi(false);
let reg = reg.with(access_log_base.with_writer(access_log).with_filter(AccessLogFilter));
.with_ansi(false)
.with_writer(access_log)
.with_filter(AccessLogFilter);
let system_log_base = fmt::layer()
let system_layer = fmt::layer()
.with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false)
.with_level(true) // with level for system log
.with_level(true)
.compact()
.with_ansi(false);
let reg = reg.with(
system_log_base
.with_writer(system_log)
.with_filter(filter_fn(move |metadata| {
(is_cargo_pkg(metadata) && metadata.name() != log_event_names::ACCESS_LOG && metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::WARN.min(level)
})),
);
.with_ansi(false)
.with_writer(system_log)
.with_filter(filter_fn(move |metadata| {
(is_cargo_pkg(metadata) && metadata.name() != log_event_names::ACCESS_LOG && metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::WARN.min(level)
}));
reg.init();
tracing_subscriber::registry().with(access_layer).with(system_layer).init();
}
/// stdio logging
fn init_stdio_logger(level: tracing::Level) {
// This limits the logger to emits only this crate with any level above RUST_LOG, for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
let stdio_layer = fmt::layer().with_level(true).with_thread_ids(false);
if level <= tracing::Level::INFO {
// in normal deployment environment
let stdio_layer = stdio_layer
.with_target(false)
.compact()
.with_filter(filter_fn(move |metadata| {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::WARN.min(level)
}));
tracing_subscriber::registry().with(stdio_layer).init();
} else {
// debugging
let stdio_layer = stdio_layer
// This limits the logger to emit only this crate with any level above RUST_LOG,
// for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
let base_layer = fmt::layer().with_level(true).with_thread_ids(false);
let debug = level > tracing::Level::INFO;
let filter = filter_fn(move |metadata| {
if debug {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::INFO.min(level)
} else {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::WARN.min(level)
}
});
let stdio_layer = if debug {
base_layer
.with_line_number(true)
.with_target(true)
.with_thread_names(true)
.with_target(true)
.compact()
.with_filter(filter_fn(move |metadata| {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::INFO.min(level)
}));
tracing_subscriber::registry().with(stdio_layer).init();
.with_filter(filter)
} else {
base_layer.with_target(false).compact().with_filter(filter)
};
tracing_subscriber::registry().with(stdio_layer).init();
}
/// Access log filter
@ -110,7 +110,7 @@ fn open_log_file<P>(path: P) -> std::fs::File
where
P: AsRef<std::path::Path>,
{
// crate a file if it does not exist
// create a file if it does not exist
std::fs::OpenOptions::new()
.create(true)
.append(true)
@ -119,9 +119,8 @@ where
}
#[inline]
/// Mached with cargo package name with `_` instead of `-`
/// Matches cargo package name with `_` instead of `-`
fn is_cargo_pkg(metadata: &tracing::Metadata<'_>) -> bool {
metadata
.target()
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
let pkg_name = env!("CARGO_PKG_NAME").replace('-', "_");
metadata.target().starts_with(&pkg_name)
}

View file

@ -71,6 +71,7 @@ struct RpxyService {
}
impl RpxyService {
/// Create a new RpxyService from config and runtime handle.
async fn new(config_toml: &ConfigToml, runtime_handle: tokio::runtime::Handle) -> Result<Self, anyhow::Error> {
let (proxy_conf, app_conf) = build_settings(config_toml).map_err(|e| anyhow!("Invalid configuration: {e}"))?;
@ -80,7 +81,7 @@ impl RpxyService {
.map(|(s, r)| (Some(Arc::new(s)), Some(r)))
.unwrap_or((None, None));
Ok(RpxyService {
Ok(Self {
runtime_handle: runtime_handle.clone(),
proxy_conf,
app_conf,