Merge branch 'tmp/sticky-cookie' into feat/sticky-cookie-feature

This commit is contained in:
Jun Kurihara 2025-06-03 14:50:00 +09:00 committed by GitHub
commit d8cadf06af
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
80 changed files with 4870 additions and 867 deletions

View file

@ -13,10 +13,8 @@ publish.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
# default = ["http3-quinn", "cache", "rustls-backend", "acme", "sticky-cookie", "post-quantum"]
# default = ["http3-s2n", "cache", "rustls-backend", "acme", "sticky-cookie", "post-quantum"]
default = ["http3-quinn", "cache", "rustls-backend", "acme", "sticky-cookie"]
# default = ["http3-s2n", "cache", "rustls-backend", "acme", "sticky-cookie"]
default = ["http3-quinn", "cache", "rustls-backend", "sticky-cookie", "acme", "post-quantum"]
# default = ["http3-s2n", "cache", "rustls-backend", "sticky-cookie", "acme", "post-quantum"]
http3-quinn = ["rpxy-lib/http3-quinn"]
http3-s2n = ["rpxy-lib/http3-s2n"]
native-tls-backend = ["rpxy-lib/native-tls-backend"]
@ -30,30 +28,32 @@ sticky-cookie = ["rpxy-lib/sticky-cookie"]
[dependencies]
rpxy-lib = { path = "../rpxy-lib/", default-features = false }
mimalloc = { version = "*", default-features = false }
anyhow = "1.0.91"
rustc-hash = "2.0.0"
serde = { version = "1.0.214", default-features = false, features = ["derive"] }
tokio = { version = "1.41.0", default-features = false, features = [
# TODO: pin mimalloc due to compilation failure by musl
mimalloc = { version = "=0.1.44", default-features = false }
libmimalloc-sys = { version = "=0.1.40" }
anyhow = "1.0.98"
ahash = "0.8.12"
serde = { version = "1.0.219", default-features = false, features = ["derive"] }
tokio = { version = "1.45.1", default-features = false, features = [
"net",
"rt-multi-thread",
"time",
"sync",
"macros",
] }
tokio-util = { version = "0.7.12", default-features = false }
async-trait = "0.1.83"
tokio-util = { version = "0.7.15", default-features = false }
async-trait = "0.1.88"
futures-util = { version = "0.3.31", default-features = false }
# config
clap = { version = "4.5.20", features = ["std", "cargo", "wrap_help"] }
toml = { version = "0.8.19", default-features = false, features = ["parse"] }
hot_reload = "0.1.6"
serde_ignored = "0.1.10"
clap = { version = "4.5.39", features = ["std", "cargo", "wrap_help"] }
toml = { version = "0.8.22", default-features = false, features = ["parse"] }
hot_reload = "0.1.9"
serde_ignored = "0.1.12"
# logging
tracing = { version = "0.1.40" }
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
tracing = { version = "0.1.41" }
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
################################
# cert management

View file

@ -1,21 +1,29 @@
use super::toml::ConfigToml;
use super::toml::{ConfigToml, ConfigTomlExt};
use crate::error::{anyhow, ensure};
use clap::{Arg, ArgAction};
use ahash::HashMap;
use clap::Arg;
use hot_reload::{ReloaderReceiver, ReloaderService};
use rpxy_certs::{build_cert_reloader, CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase};
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig};
use rustc_hash::FxHashMap as HashMap;
use rpxy_certs::{CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase, build_cert_reloader};
use rpxy_lib::{AppConfigList, ProxyConfig};
#[cfg(feature = "acme")]
use rpxy_acme::{AcmeManager, ACME_DIR_URL, ACME_REGISTRY_PATH};
use rpxy_acme::{ACME_DIR_URL, ACME_REGISTRY_PATH, AcmeManager};
/// Parsed options
/// Parsed options from CLI
/// Options for configuring the application.
///
/// # Fields
/// - `config_file_path`: Path to the configuration file.
/// - `log_dir_path`: Optional path to the log directory.
pub struct Opts {
pub config_file_path: String,
pub watch: bool,
pub log_dir_path: Option<String>,
}
/// Parse arg values passed from cli
/// Parses command-line arguments into an [`Opts`](rpxy-bin/src/config/parse.rs:13) struct.
///
/// Returns a populated [`Opts`](rpxy-bin/src/config/parse.rs:13) on success, or an error if parsing fails.
/// Expects a required `--config` argument and an optional `--log-dir` argument.
pub fn parse_opts() -> Result<Opts, anyhow::Error> {
let _ = include_str!("../../Cargo.toml");
let options = clap::command!()
@ -28,78 +36,60 @@ pub fn parse_opts() -> Result<Opts, anyhow::Error> {
.help("Configuration file path like ./config.toml"),
)
.arg(
Arg::new("watch")
.long("watch")
.short('w')
.action(ArgAction::SetTrue)
.help("Activate dynamic reloading of the config file via continuous monitoring"),
Arg::new("log_dir")
.long("log-dir")
.short('l')
.value_name("LOG_DIR")
.help("Directory for log files. If not specified, logs are printed to stdout."),
);
let matches = options.get_matches();
///////////////////////////////////
let config_file_path = matches.get_one::<String>("config_file").unwrap().to_owned();
let watch = matches.get_one::<bool>("watch").unwrap().to_owned();
let log_dir_path = matches.get_one::<String>("log_dir").map(|v| v.to_owned());
Ok(Opts { config_file_path, watch })
Ok(Opts {
config_file_path,
log_dir_path,
})
}
pub fn build_settings(config: &ConfigToml) -> std::result::Result<(ProxyConfig, AppConfigList), anyhow::Error> {
// build proxy config
let proxy_config: ProxyConfig = config.try_into()?;
// backend_apps
let apps = config.apps.clone().ok_or(anyhow!("Missing application spec"))?;
// assertions for all backend apps
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// if only https_port is specified, tls must be configured for all apps
if proxy_config.http_port.is_none() {
ensure!(
apps.0.iter().all(|(_, app)| app.tls.is_some()),
"Some apps serves only plaintext HTTP"
);
}
// https redirection port must be configured only when both http_port and https_port are configured.
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
// https redirection can be configured if both ports are active
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
apps.0.iter().all(|(_, app)| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
}),
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// build applications
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: config.default_app.clone().map(|v| v.to_ascii_lowercase()), // default backend application for plaintext http requests
};
Ok((proxy_config, app_config_list))
/// Build proxy and app settings from config using ConfigTomlExt
pub fn build_settings(config: &ConfigToml) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
config.validate_and_build_settings()
}
/* ----------------------- */
/// Helper to build a CryptoFileSource for an app, handling ACME if enabled
#[cfg(feature = "acme")]
fn build_tls_for_app_acme(
tls: &mut super::toml::TlsOption,
acme_option: &Option<super::toml::AcmeOption>,
server_name: &str,
acme_registry_path: &str,
acme_dir_url: &str,
) -> Result<(), anyhow::Error> {
if let Some(true) = tls.acme {
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
let cert_path = format!("{}/{}", subdir, file_name);
tls.tls_cert_key_path = Some(cert_path.clone());
tls.tls_cert_path = Some(cert_path);
}
Ok(())
}
/// Build cert map
/// Builds the certificate manager for TLS applications.
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
///
/// # Returns
/// Returns an option containing a tuple of certificate reloader service and receiver, or `None` if TLS is not enabled.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_cert_manager(
config: &ConfigToml,
) -> Result<
@ -136,19 +126,9 @@ pub async fn build_cert_manager(
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
#[cfg(feature = "acme")]
let tls = {
let mut tls = tls.clone();
if let Some(true) = tls.acme {
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
// Both of tls_cert_key_path and tls_cert_path must be the same for ACME since it's a single file
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
tls.tls_cert_key_path = Some(format!("{}/{}", subdir, file_name));
tls.tls_cert_path = Some(format!("{}/{}", subdir, file_name));
}
tls
};
let mut tls = tls.clone();
#[cfg(feature = "acme")]
build_tls_for_app_acme(&mut tls, &acme_option, server_name, acme_registry_path, acme_dir_url)?;
let crypto_file_source = CryptoFileSourceBuilder::default()
.tls_cert_path(tls.tls_cert_path.as_ref().unwrap())
@ -165,24 +145,31 @@ pub async fn build_cert_manager(
/* ----------------------- */
#[cfg(feature = "acme")]
/// Build acme manager
/// Builds the ACME manager for automatic certificate management (enabled with the `acme` feature).
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
/// * `runtime_handle` - Tokio runtime handle for async operations.
///
/// # Returns
/// Returns an option containing an [`AcmeManager`](rpxy-bin/src/config/parse.rs:153) if ACME is configured, or `None` otherwise.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_acme_manager(
config: &ConfigToml,
runtime_handle: tokio::runtime::Handle,
) -> Result<Option<AcmeManager>, anyhow::Error> {
let acme_option = config.experimental.as_ref().and_then(|v| v.acme.clone());
if acme_option.is_none() {
let Some(acme_option) = acme_option else {
return Ok(None);
}
let acme_option = acme_option.unwrap();
};
let domains = config
let domains: Vec<String> = config
.apps
.as_ref()
.unwrap()
.0
.values()
.filter_map(|app| {
//
if let Some(tls) = app.tls.as_ref() {
if let Some(true) = tls.acme {
return Some(app.server_name.as_ref().unwrap().to_owned());
@ -190,7 +177,7 @@ pub async fn build_acme_manager(
}
None
})
.collect::<Vec<_>>();
.collect();
if domains.is_empty() {
return Ok(None);

View file

@ -8,17 +8,16 @@ pub struct ConfigTomlReloader {
}
#[async_trait]
impl Reload<ConfigToml> for ConfigTomlReloader {
impl Reload<ConfigToml, String> for ConfigTomlReloader {
type Source = String;
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ConfigToml>> {
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ConfigToml, String>> {
Ok(Self {
config_path: source.clone(),
})
}
async fn reload(&self) -> Result<Option<ConfigToml>, ReloaderError<ConfigToml>> {
let conf = ConfigToml::new(&self.config_path)
.map_err(|_e| ReloaderError::<ConfigToml>::Reload("Failed to reload config toml"))?;
async fn reload(&self) -> Result<Option<ConfigToml>, ReloaderError<ConfigToml, String>> {
let conf = ConfigToml::new(&self.config_path).map_err(|e| ReloaderError::<ConfigToml, String>::Reload(e.to_string()))?;
Ok(Some(conf))
}
}

View file

@ -3,13 +3,26 @@ use crate::{
error::{anyhow, ensure},
log::warn,
};
use rpxy_lib::{reexports::Uri, AppConfig, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri};
use rustc_hash::FxHashMap as HashMap;
use ahash::HashMap;
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri, reexports::Uri};
use serde::Deserialize;
use std::{fs, net::SocketAddr};
use tokio::time::Duration;
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// Main configuration structure parsed from the TOML file.
///
/// # Fields
/// - `listen_port`: Optional TCP port for HTTP.
/// - `listen_port_tls`: Optional TCP port for HTTPS/TLS.
/// - `listen_ipv6`: Enable IPv6 listening.
/// - `https_redirection_port`: Optional port for HTTP to HTTPS redirection.
/// - `tcp_listen_backlog`: Optional TCP backlog size.
/// - `max_concurrent_streams`: Optional max concurrent streams.
/// - `max_clients`: Optional max client connections.
/// - `apps`: Optional application definitions.
/// - `default_app`: Optional default application name.
/// - `experimental`: Optional experimental features.
pub struct ConfigToml {
pub listen_port: Option<u16>,
pub listen_port_tls: Option<u16>,
@ -23,8 +36,75 @@ pub struct ConfigToml {
pub experimental: Option<Experimental>,
}
/// Extension trait for config validation and building
pub trait ConfigTomlExt {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error>;
}
impl ConfigTomlExt for ConfigToml {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
let proxy_config: ProxyConfig = self.try_into()?;
let apps = self.apps.as_ref().ok_or(anyhow!("Missing application spec"))?;
// Ensure at least one app is defined
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// Helper: all apps have TLS
let all_apps_have_tls = apps.0.values().all(|app| app.tls.is_some());
// Helper: all apps have https_redirection unset
let all_apps_no_https_redirection = apps.0.values().all(|app| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
});
if proxy_config.http_port.is_none() {
ensure!(all_apps_have_tls, "Some apps serve only plaintext HTTP");
}
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
all_apps_no_https_redirection,
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// Build AppConfigList
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: self.default_app.clone().map(|v| v.to_ascii_lowercase()),
};
Ok((proxy_config, app_config_list))
}
}
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// HTTP/3 protocol options for server configuration.
///
/// # Fields
/// - `alt_svc_max_age`: Optional max age for Alt-Svc header.
/// - `request_max_body_size`: Optional maximum request body size.
/// - `max_concurrent_connections`: Optional maximum concurrent connections.
/// - `max_concurrent_bidistream`: Optional maximum concurrent bidirectional streams.
/// - `max_concurrent_unistream`: Optional maximum concurrent unidirectional streams.
/// - `max_idle_timeout`: Optional maximum idle timeout in milliseconds.
pub struct Http3Option {
pub alt_svc_max_age: Option<u32>,
pub request_max_body_size: Option<usize>,
@ -232,7 +312,7 @@ impl ConfigToml {
// Check unused fields during deserialization
let t = toml::de::Deserializer::new(&config_str);
let mut unused = rustc_hash::FxHashSet::default();
let mut unused = ahash::HashSet::default();
let res = serde_ignored::deserialize(t, |path| {
unused.insert(path.to_string());

View file

@ -1,7 +1,13 @@
/// Default IPv4 listen addresses for the server.
pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
/// Default IPv6 listen addresses for the server.
pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
/// Delay in seconds before reloading the configuration after changes.
pub const CONFIG_WATCH_DELAY_SECS: u32 = 15;
#[cfg(feature = "cache")]
// Cache directory
/// Directory path for cache storage (enabled with "cache" feature).
pub const CACHE_DIR: &str = "./cache";
pub(crate) const ACCESS_LOG_FILE: &str = "access.log";
pub(crate) const SYSTEM_LOG_FILE: &str = "rpxy.log";

View file

@ -1,2 +1,2 @@
#[allow(unused)]
pub use anyhow::{anyhow, bail, ensure, Context};
pub use anyhow::{Context, anyhow, bail, ensure};

View file

@ -1,44 +1,126 @@
use crate::constants::{ACCESS_LOG_FILE, SYSTEM_LOG_FILE};
use rpxy_lib::log_event_names;
use std::str::FromStr;
use tracing_subscriber::{fmt, prelude::*};
use tracing_subscriber::{filter::filter_fn, fmt, prelude::*};
#[allow(unused)]
pub use tracing::{debug, error, info, warn};
/// Initialize the logger with the RUST_LOG environment variable.
pub fn init_logger() {
let level_string = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string());
let level = tracing::Level::from_str(level_string.as_str()).unwrap_or(tracing::Level::INFO);
pub fn init_logger(log_dir_path: Option<&str>) {
let level = std::env::var("RUST_LOG")
.ok()
.and_then(|s| tracing::Level::from_str(&s).ok())
.unwrap_or(tracing::Level::INFO);
// This limits the logger to emits only this crate with any level above RUST_LOG, for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
let stdio_layer = fmt::layer().with_level(true).with_thread_ids(false);
if level <= tracing::Level::INFO {
// in normal deployment environment
let stdio_layer = stdio_layer
.with_target(false)
.compact()
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| {
(metadata
.target()
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::ERROR.min(level)
}));
tracing_subscriber::registry().with(stdio_layer).init();
} else {
// debugging
let stdio_layer = stdio_layer
match log_dir_path {
None => init_stdio_logger(level),
Some(path) => init_file_logger(level, path),
}
}
/// file logging
fn init_file_logger(level: tracing::Level, log_dir_path: &str) {
println!("Activate logging to files: {}", log_dir_path);
let log_dir = std::path::Path::new(log_dir_path);
if !log_dir.exists() {
println!("Directory does not exist, creating: {}", log_dir.display());
std::fs::create_dir_all(log_dir).expect("Failed to create log directory");
}
let access_log_path = log_dir.join(ACCESS_LOG_FILE);
let system_log_path = log_dir.join(SYSTEM_LOG_FILE);
println!("Access log: {}", access_log_path.display());
println!("System and error log: {}", system_log_path.display());
let access_log = open_log_file(&access_log_path);
let system_log = open_log_file(&system_log_path);
let access_layer = fmt::layer()
.with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false)
.with_level(false)
.compact()
.with_ansi(false)
.with_writer(access_log)
.with_filter(AccessLogFilter);
let system_layer = fmt::layer()
.with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false)
.with_level(true)
.compact()
.with_ansi(false)
.with_writer(system_log)
.with_filter(filter_fn(move |metadata| {
(is_cargo_pkg(metadata) && metadata.name() != log_event_names::ACCESS_LOG && metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::WARN.min(level)
}));
tracing_subscriber::registry().with(access_layer).with(system_layer).init();
}
/// stdio logging
fn init_stdio_logger(level: tracing::Level) {
// This limits the logger to emit only this crate with any level above RUST_LOG,
// for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
let base_layer = fmt::layer().with_level(true).with_thread_ids(false);
let debug = level > tracing::Level::INFO;
let filter = filter_fn(move |metadata| {
if debug {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::INFO.min(level)
} else {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::WARN.min(level)
}
});
let stdio_layer = if debug {
base_layer
.with_line_number(true)
.with_target(true)
.with_thread_names(true)
.with_target(true)
.compact()
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| {
(metadata
.target()
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::INFO.min(level)
}));
tracing_subscriber::registry().with(stdio_layer).init();
.with_filter(filter)
} else {
base_layer.with_target(false).compact().with_filter(filter)
};
tracing_subscriber::registry().with(stdio_layer).init();
}
/// Access log filter
struct AccessLogFilter;
impl<S> tracing_subscriber::layer::Filter<S> for AccessLogFilter {
fn enabled(&self, metadata: &tracing::Metadata<'_>, _: &tracing_subscriber::layer::Context<'_, S>) -> bool {
is_cargo_pkg(metadata) && metadata.name().contains(log_event_names::ACCESS_LOG) && metadata.level() <= &tracing::Level::INFO
}
}
#[inline]
/// Create a file for logging
fn open_log_file<P>(path: P) -> std::fs::File
where
P: AsRef<std::path::Path>,
{
// create a file if it does not exist
std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)
.expect("Failed to open the log file")
}
#[inline]
/// Matches cargo package name with `_` instead of `-`
fn is_cargo_pkg(metadata: &tracing::Metadata<'_>) -> bool {
let pkg_name = env!("CARGO_PKG_NAME").replace('-', "_");
metadata.target().starts_with(&pkg_name)
}

View file

@ -9,19 +9,17 @@ mod log;
#[cfg(feature = "acme")]
use crate::config::build_acme_manager;
use crate::{
config::{build_cert_manager, build_settings, parse_opts, ConfigToml, ConfigTomlReloader},
config::{ConfigToml, ConfigTomlReloader, build_cert_manager, build_settings, parse_opts},
constants::CONFIG_WATCH_DELAY_SECS,
error::*,
log::*,
};
use hot_reload::{ReloaderReceiver, ReloaderService};
use rpxy_lib::{entrypoint, RpxyOptions, RpxyOptionsBuilder};
use rpxy_lib::{RpxyOptions, RpxyOptionsBuilder, entrypoint};
use std::sync::Arc;
use tokio_util::sync::CancellationToken;
fn main() {
init_logger();
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
runtime_builder.enable_all();
runtime_builder.thread_name("rpxy");
@ -30,37 +28,34 @@ fn main() {
runtime.block_on(async {
// Initially load options
let Ok(parsed_opts) = parse_opts() else {
error!("Invalid toml file");
std::process::exit(1);
};
if !parsed_opts.watch {
if let Err(e) = rpxy_service_without_watcher(&parsed_opts.config_file_path, runtime.handle().clone()).await {
error!("rpxy service existed: {e}");
std::process::exit(1);
}
} else {
let (config_service, config_rx) =
ReloaderService::<ConfigTomlReloader, ConfigToml>::new(&parsed_opts.config_file_path, CONFIG_WATCH_DELAY_SECS, false)
.await
.unwrap();
init_logger(parsed_opts.log_dir_path.as_deref());
tokio::select! {
config_res = config_service.start() => {
if let Err(e) = config_res {
error!("config reloader service exited: {e}");
std::process::exit(1);
}
}
rpxy_res = rpxy_service_with_watcher(config_rx, runtime.handle().clone()) => {
if let Err(e) = rpxy_res {
error!("rpxy service existed: {e}");
std::process::exit(1);
}
let (config_service, config_rx) = ReloaderService::<ConfigTomlReloader, ConfigToml, String>::new(
&parsed_opts.config_file_path,
CONFIG_WATCH_DELAY_SECS,
false,
)
.await
.unwrap();
tokio::select! {
config_res = config_service.start() => {
if let Err(e) = config_res {
error!("config reloader service exited: {e}");
std::process::exit(1);
}
}
rpxy_res = rpxy_service(config_rx, runtime.handle().clone()) => {
if let Err(e) = rpxy_res {
error!("rpxy service existed: {e}");
std::process::exit(1);
}
}
std::process::exit(0);
}
std::process::exit(0);
});
}
@ -76,6 +71,7 @@ struct RpxyService {
}
impl RpxyService {
/// Create a new RpxyService from config and runtime handle.
async fn new(config_toml: &ConfigToml, runtime_handle: tokio::runtime::Handle) -> Result<Self, anyhow::Error> {
let (proxy_conf, app_conf) = build_settings(config_toml).map_err(|e| anyhow!("Invalid configuration: {e}"))?;
@ -85,7 +81,7 @@ impl RpxyService {
.map(|(s, r)| (Some(Arc::new(s)), Some(r)))
.unwrap_or((None, None));
Ok(RpxyService {
Ok(Self {
runtime_handle: runtime_handle.clone(),
proxy_conf,
app_conf,
@ -96,7 +92,7 @@ impl RpxyService {
})
}
async fn start(&self, cancel_token: Option<CancellationToken>) -> Result<(), anyhow::Error> {
async fn start(&self, cancel_token: CancellationToken) -> Result<(), anyhow::Error> {
let RpxyService {
runtime_handle,
proxy_conf,
@ -111,17 +107,19 @@ impl RpxyService {
{
let (acme_join_handles, server_config_acme_challenge) = acme_manager
.as_ref()
.map(|m| m.spawn_manager_tasks(cancel_token.as_ref().map(|t| t.child_token())))
.map(|m| m.spawn_manager_tasks(cancel_token.child_token()))
.unwrap_or((vec![], Default::default()));
let rpxy_opts = RpxyOptionsBuilder::default()
.proxy_config(proxy_conf.clone())
.app_config_list(app_conf.clone())
.cert_rx(cert_rx.clone())
.runtime_handle(runtime_handle.clone())
.cancel_token(cancel_token.as_ref().map(|t| t.child_token()))
.server_configs_acme_challenge(Arc::new(server_config_acme_challenge))
.build()?;
self.start_inner(rpxy_opts, acme_join_handles).await.map_err(|e| anyhow!(e))
self
.start_inner(rpxy_opts, cancel_token, acme_join_handles)
.await
.map_err(|e| anyhow!(e))
}
#[cfg(not(feature = "acme"))]
@ -131,9 +129,8 @@ impl RpxyService {
.app_config_list(app_conf.clone())
.cert_rx(cert_rx.clone())
.runtime_handle(runtime_handle.clone())
.cancel_token(cancel_token.as_ref().map(|t| t.child_token()))
.build()?;
self.start_inner(rpxy_opts).await.map_err(|e| anyhow!(e))
self.start_inner(rpxy_opts, cancel_token).await.map_err(|e| anyhow!(e))
}
}
@ -141,19 +138,19 @@ impl RpxyService {
async fn start_inner(
&self,
rpxy_opts: RpxyOptions,
cancel_token: CancellationToken,
#[cfg(feature = "acme")] acme_task_handles: Vec<tokio::task::JoinHandle<()>>,
) -> Result<(), anyhow::Error> {
let cancel_token = rpxy_opts.cancel_token.clone();
let cancel_token = cancel_token.clone();
let runtime_handle = rpxy_opts.runtime_handle.clone();
// spawn rpxy entrypoint, where cancellation token is possibly contained inside the service
let cancel_token_clone = cancel_token.clone();
let child_cancel_token = cancel_token.child_token();
let rpxy_handle = runtime_handle.spawn(async move {
if let Err(e) = entrypoint(&rpxy_opts).await {
if let Err(e) = entrypoint(&rpxy_opts, child_cancel_token).await {
error!("rpxy entrypoint exited on error: {e}");
if let Some(cancel_token) = cancel_token_clone {
cancel_token.cancel();
}
cancel_token_clone.cancel();
return Err(anyhow!(e));
}
Ok(())
@ -166,24 +163,20 @@ impl RpxyService {
// spawn certificate reloader service, where cert service does not have cancellation token inside the service
let cert_service = self.cert_service.as_ref().unwrap().clone();
let cancel_token_clone = cancel_token.clone();
let child_cancel_token = cancel_token.as_ref().map(|c| c.child_token());
let child_cancel_token = cancel_token.child_token();
let cert_handle = runtime_handle.spawn(async move {
if let Some(child_cancel_token) = child_cancel_token {
tokio::select! {
cert_res = cert_service.start() => {
if let Err(ref e) = cert_res {
error!("cert reloader service exited on error: {e}");
}
cancel_token_clone.unwrap().cancel();
cert_res.map_err(|e| anyhow!(e))
}
_ = child_cancel_token.cancelled() => {
debug!("cert reloader service terminated");
Ok(())
tokio::select! {
cert_res = cert_service.start() => {
if let Err(ref e) = cert_res {
error!("cert reloader service exited on error: {e}");
}
cancel_token_clone.cancel();
cert_res.map_err(|e| anyhow!(e))
}
_ = child_cancel_token.cancelled() => {
debug!("cert reloader service terminated");
Ok(())
}
} else {
cert_service.start().await.map_err(|e| anyhow!(e))
}
});
@ -218,9 +211,7 @@ impl RpxyService {
if let Err(ref e) = acme_res {
error!("acme manager exited on error: {e}");
}
if let Some(cancel_token) = cancel_token_clone {
cancel_token.cancel();
}
cancel_token_clone.cancel();
acme_res.map_err(|e| anyhow!(e))
});
let (rpxy_res, cert_res, acme_res) = tokio::join!(rpxy_handle, cert_handle, acme_handle);
@ -235,18 +226,8 @@ impl RpxyService {
}
}
async fn rpxy_service_without_watcher(
config_file_path: &str,
runtime_handle: tokio::runtime::Handle,
) -> Result<(), anyhow::Error> {
info!("Start rpxy service");
let config_toml = ConfigToml::new(config_file_path).map_err(|e| anyhow!("Invalid toml file: {e}"))?;
let service = RpxyService::new(&config_toml, runtime_handle).await?;
service.start(None).await
}
async fn rpxy_service_with_watcher(
mut config_rx: ReloaderReceiver<ConfigToml>,
async fn rpxy_service(
mut config_rx: ReloaderReceiver<ConfigToml, String>,
runtime_handle: tokio::runtime::Handle,
) -> Result<(), anyhow::Error> {
info!("Start rpxy service with dynamic config reloader");
@ -265,7 +246,7 @@ async fn rpxy_service_with_watcher(
tokio::select! {
/* ---------- */
rpxy_res = service.start(Some(cancel_token.clone())) => {
rpxy_res = service.start(cancel_token.clone()) => {
if let Err(ref e) = rpxy_res {
error!("rpxy service exited on error: {e}");
} else {