refactor: make some config parameters in globals belong to other struct

This commit is contained in:
Jun Kurihara 2023-07-07 21:54:56 +09:00
commit f8d37f7846
No known key found for this signature in database
GPG key ID: 48ADFD173ED22B03
8 changed files with 109 additions and 101 deletions

View file

@ -218,6 +218,15 @@ pub struct Backends {
pub default_server_name_bytes: Option<ServerNameBytesExp>, // for plaintext http pub default_server_name_bytes: Option<ServerNameBytesExp>, // for plaintext http
} }
impl Default for Backends {
fn default() -> Self {
Self {
default_server_name_bytes: None,
apps: HashMap::<ServerNameBytesExp, Backend>::default(),
}
}
}
pub type SniServerCryptoMap = HashMap<ServerNameBytesExp, Arc<ServerConfig>>; pub type SniServerCryptoMap = HashMap<ServerNameBytesExp, Arc<ServerConfig>>;
pub struct ServerCrypto { pub struct ServerCrypto {
// For Quic/HTTP3, only servers with no client authentication // For Quic/HTTP3, only servers with no client authentication

View file

@ -30,11 +30,11 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
}; };
// listen port and socket // listen port and socket
globals.http_port = config.listen_port; globals.proxy_config.http_port = config.listen_port;
globals.https_port = config.listen_port_tls; globals.proxy_config.https_port = config.listen_port_tls;
ensure!( ensure!(
{ globals.http_port.is_some() || globals.https_port.is_some() } && { { globals.proxy_config.http_port.is_some() || globals.proxy_config.https_port.is_some() } && {
if let (Some(p), Some(t)) = (globals.http_port, globals.https_port) { if let (Some(p), Some(t)) = (globals.proxy_config.http_port, globals.proxy_config.https_port) {
p != t p != t
} else { } else {
true true
@ -53,32 +53,32 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
LISTEN_ADDRESSES_V4.to_vec() LISTEN_ADDRESSES_V4.to_vec()
} }
}; };
globals.listen_sockets = listen_addresses globals.proxy_config.listen_sockets = listen_addresses
.iter() .iter()
.flat_map(|x| { .flat_map(|x| {
let mut v: Vec<SocketAddr> = vec![]; let mut v: Vec<SocketAddr> = vec![];
if let Some(p) = globals.http_port { if let Some(p) = globals.proxy_config.http_port {
v.push(format!("{x}:{p}").parse().unwrap()); v.push(format!("{x}:{p}").parse().unwrap());
} }
if let Some(p) = globals.https_port { if let Some(p) = globals.proxy_config.https_port {
v.push(format!("{x}:{p}").parse().unwrap()); v.push(format!("{x}:{p}").parse().unwrap());
} }
v v
}) })
.collect(); .collect();
if globals.http_port.is_some() { if globals.proxy_config.http_port.is_some() {
info!("Listen port: {}", globals.http_port.unwrap()); info!("Listen port: {}", globals.proxy_config.http_port.unwrap());
} }
if globals.https_port.is_some() { if globals.proxy_config.https_port.is_some() {
info!("Listen port: {} (for TLS)", globals.https_port.unwrap()); info!("Listen port: {} (for TLS)", globals.proxy_config.https_port.unwrap());
} }
// max values // max values
if let Some(c) = config.max_clients { if let Some(c) = config.max_clients {
globals.max_clients = c as usize; globals.proxy_config.max_clients = c as usize;
} }
if let Some(c) = config.max_concurrent_streams { if let Some(c) = config.max_concurrent_streams {
globals.max_concurrent_streams = c; globals.proxy_config.max_concurrent_streams = c;
} }
// backend apps // backend apps
@ -90,7 +90,7 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
for (app_name, app) in apps.0.iter() { for (app_name, app) in apps.0.iter() {
ensure!(app.server_name.is_some(), "Missing server_name"); ensure!(app.server_name.is_some(), "Missing server_name");
let server_name_string = app.server_name.as_ref().unwrap(); let server_name_string = app.server_name.as_ref().unwrap();
if globals.http_port.is_none() { if globals.proxy_config.http_port.is_none() {
// if only https_port is specified, tls must be configured // if only https_port is specified, tls must be configured
ensure!(app.tls.is_some()) ensure!(app.tls.is_some())
} }
@ -108,7 +108,7 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
// TLS settings and build backend instance // TLS settings and build backend instance
let backend = if app.tls.is_none() { let backend = if app.tls.is_none() {
ensure!(globals.http_port.is_some(), "Required HTTP port"); ensure!(globals.proxy_config.http_port.is_some(), "Required HTTP port");
backend_builder.build()? backend_builder.build()?
} else { } else {
let tls = app.tls.as_ref().unwrap(); let tls = app.tls.as_ref().unwrap();
@ -117,7 +117,7 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
let https_redirection = if tls.https_redirection.is_none() { let https_redirection = if tls.https_redirection.is_none() {
Some(true) // Default true Some(true) // Default true
} else { } else {
ensure!(globals.https_port.is_some()); // only when both https ports are configured. ensure!(globals.proxy_config.https_port.is_some()); // only when both https ports are configured.
tls.https_redirection tls.https_redirection
}; };
@ -159,28 +159,28 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
#[cfg(feature = "http3")] #[cfg(feature = "http3")]
{ {
if let Some(h3option) = exp.h3 { if let Some(h3option) = exp.h3 {
globals.http3 = true; globals.proxy_config.http3 = true;
info!("Experimental HTTP/3.0 is enabled. Note it is still very unstable."); info!("Experimental HTTP/3.0 is enabled. Note it is still very unstable.");
if let Some(x) = h3option.alt_svc_max_age { if let Some(x) = h3option.alt_svc_max_age {
globals.h3_alt_svc_max_age = x; globals.proxy_config.h3_alt_svc_max_age = x;
} }
if let Some(x) = h3option.request_max_body_size { if let Some(x) = h3option.request_max_body_size {
globals.h3_request_max_body_size = x; globals.proxy_config.h3_request_max_body_size = x;
} }
if let Some(x) = h3option.max_concurrent_connections { if let Some(x) = h3option.max_concurrent_connections {
globals.h3_max_concurrent_connections = x; globals.proxy_config.h3_max_concurrent_connections = x;
} }
if let Some(x) = h3option.max_concurrent_bidistream { if let Some(x) = h3option.max_concurrent_bidistream {
globals.h3_max_concurrent_bidistream = x.into(); globals.proxy_config.h3_max_concurrent_bidistream = x.into();
} }
if let Some(x) = h3option.max_concurrent_unistream { if let Some(x) = h3option.max_concurrent_unistream {
globals.h3_max_concurrent_unistream = x.into(); globals.proxy_config.h3_max_concurrent_unistream = x.into();
} }
if let Some(x) = h3option.max_idle_timeout { if let Some(x) = h3option.max_idle_timeout {
if x == 0u64 { if x == 0u64 {
globals.h3_max_idle_timeout = None; globals.proxy_config.h3_max_idle_timeout = None;
} else { } else {
globals.h3_max_idle_timeout = globals.proxy_config.h3_max_idle_timeout =
Some(quinn::IdleTimeout::try_from(tokio::time::Duration::from_secs(x)).unwrap()) Some(quinn::IdleTimeout::try_from(tokio::time::Duration::from_secs(x)).unwrap())
} }
} }
@ -188,7 +188,7 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
} }
if let Some(b) = exp.ignore_sni_consistency { if let Some(b) = exp.ignore_sni_consistency {
globals.sni_consistency = !b; globals.proxy_config.sni_consistency = !b;
if b { if b {
info!("Ignore consistency between TLS SNI and Host header (or Request line). Note it violates RFC."); info!("Ignore consistency between TLS SNI and Host header (or Request line). Note it violates RFC.");
} }

View file

@ -1,4 +1,4 @@
use crate::backend::Backends; use crate::{backend::Backends, constants::*};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::{ use std::sync::{
atomic::{AtomicUsize, Ordering}, atomic::{AtomicUsize, Ordering},
@ -9,6 +9,21 @@ use tokio::time::Duration;
/// Global object containing proxy configurations and shared object like counters. /// Global object containing proxy configurations and shared object like counters.
/// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks. /// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks.
pub struct Globals { pub struct Globals {
/// Configuration parameters for proxy transport and request handlers
pub proxy_config: ProxyConfig,
/// Shared context - Backend application objects to which http request handler forward incoming requests
pub backends: Backends,
/// Shared context - Counter for serving requests
pub request_count: RequestCount,
/// Shared context - Async task runtime handler
pub runtime_handle: tokio::runtime::Handle,
}
/// Configuration parameters for proxy transport and request handlers
pub struct ProxyConfig {
pub listen_sockets: Vec<SocketAddr>, // when instantiate server pub listen_sockets: Vec<SocketAddr>, // when instantiate server
pub http_port: Option<u16>, // when instantiate server pub http_port: Option<u16>, // when instantiate server
pub https_port: Option<u16>, // when instantiate server pub https_port: Option<u16>, // when instantiate server
@ -22,7 +37,6 @@ pub struct Globals {
// experimentals // experimentals
pub sni_consistency: bool, // Handler pub sni_consistency: bool, // Handler
// All need to make packet acceptor // All need to make packet acceptor
#[cfg(feature = "http3")] #[cfg(feature = "http3")]
pub http3: bool, pub http3: bool,
@ -38,19 +52,42 @@ pub struct Globals {
pub h3_max_concurrent_connections: u32, pub h3_max_concurrent_connections: u32,
#[cfg(feature = "http3")] #[cfg(feature = "http3")]
pub h3_max_idle_timeout: Option<quinn::IdleTimeout>, pub h3_max_idle_timeout: Option<quinn::IdleTimeout>,
// Shared context
// Backend application objects to which http request handler forward incoming requests
pub backends: Backends,
// Counter for serving requests
pub request_count: RequestCount,
// Async task runtime handler
pub runtime_handle: tokio::runtime::Handle,
} }
// // TODO: Implement default for default values impl Default for ProxyConfig {
// #[derive(Debug, Clone)] fn default() -> Self {
// pub struct ProxyConfig {} Self {
listen_sockets: Vec::new(),
http_port: None,
https_port: None,
// TODO: Reconsider each timeout values
proxy_timeout: Duration::from_secs(PROXY_TIMEOUT_SEC),
upstream_timeout: Duration::from_secs(UPSTREAM_TIMEOUT_SEC),
max_clients: MAX_CLIENTS,
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
keepalive: true,
sni_consistency: true,
#[cfg(feature = "http3")]
http3: false,
#[cfg(feature = "http3")]
h3_alt_svc_max_age: H3::ALT_SVC_MAX_AGE,
#[cfg(feature = "http3")]
h3_request_max_body_size: H3::REQUEST_MAX_BODY_SIZE,
#[cfg(feature = "http3")]
h3_max_concurrent_connections: H3::MAX_CONCURRENT_CONNECTIONS,
#[cfg(feature = "http3")]
h3_max_concurrent_bidistream: H3::MAX_CONCURRENT_BIDISTREAM.into(),
#[cfg(feature = "http3")]
h3_max_concurrent_unistream: H3::MAX_CONCURRENT_UNISTREAM.into(),
#[cfg(feature = "http3")]
h3_max_idle_timeout: Some(quinn::IdleTimeout::try_from(Duration::from_secs(H3::MAX_IDLE_TIMEOUT)).unwrap()),
}
}
}
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]
/// Counter for serving requests /// Counter for serving requests

View file

@ -56,7 +56,7 @@ where
}; };
// check consistency of between TLS SNI and HOST/Request URI Line. // check consistency of between TLS SNI and HOST/Request URI Line.
#[allow(clippy::collapsible_if)] #[allow(clippy::collapsible_if)]
if tls_enabled && self.globals.sni_consistency { if tls_enabled && self.globals.proxy_config.sni_consistency {
if server_name != tls_server_name.unwrap_or_default() { if server_name != tls_server_name.unwrap_or_default() {
return self.return_with_error_log(StatusCode::MISDIRECTED_REQUEST, &mut log_data); return self.return_with_error_log(StatusCode::MISDIRECTED_REQUEST, &mut log_data);
} }
@ -75,7 +75,7 @@ where
if !tls_enabled && backend.https_redirection.unwrap_or(false) { if !tls_enabled && backend.https_redirection.unwrap_or(false) {
debug!("Redirect to secure connection: {}", &backend.server_name); debug!("Redirect to secure connection: {}", &backend.server_name);
log_data.status_code(&StatusCode::PERMANENT_REDIRECT).output(); log_data.status_code(&StatusCode::PERMANENT_REDIRECT).output();
return secure_redirection(&backend.server_name, self.globals.https_port, &req); return secure_redirection(&backend.server_name, self.globals.proxy_config.https_port, &req);
} }
// Find reverse proxy for given path and choose one of upstream host // Find reverse proxy for given path and choose one of upstream host
@ -112,7 +112,7 @@ where
// Forward request to // Forward request to
let mut res_backend = { let mut res_backend = {
match timeout(self.globals.upstream_timeout, self.forwarder.request(req)).await { match timeout(self.globals.proxy_config.upstream_timeout, self.forwarder.request(req)).await {
Err(_) => { Err(_) => {
return self.return_with_error_log(StatusCode::GATEWAY_TIMEOUT, &mut log_data); return self.return_with_error_log(StatusCode::GATEWAY_TIMEOUT, &mut log_data);
} }
@ -207,14 +207,14 @@ where
#[cfg(feature = "http3")] #[cfg(feature = "http3")]
{ {
// TODO: Workaround for avoid h3 for client authentication // TODO: Workaround for avoid h3 for client authentication
if self.globals.http3 && chosen_backend.client_ca_cert_path.is_none() { if self.globals.proxy_config.http3 && chosen_backend.client_ca_cert_path.is_none() {
if let Some(port) = self.globals.https_port { if let Some(port) = self.globals.proxy_config.https_port {
add_header_entry_overwrite_if_exist( add_header_entry_overwrite_if_exist(
headers, headers,
header::ALT_SVC.as_str(), header::ALT_SVC.as_str(),
format!( format!(
"h3=\":{}\"; ma={}, h3-29=\":{}\"; ma={}", "h3=\":{}\"; ma={}, h3-29=\":{}\"; ma={}",
port, self.globals.h3_alt_svc_max_age, port, self.globals.h3_alt_svc_max_age port, self.globals.proxy_config.h3_alt_svc_max_age, port, self.globals.proxy_config.h3_alt_svc_max_age
), ),
)?; )?;
} }
@ -225,7 +225,7 @@ where
} }
#[cfg(not(feature = "http3"))] #[cfg(not(feature = "http3"))]
{ {
if let Some(port) = self.globals.https_port { if let Some(port) = self.globals.proxy_config.https_port {
headers.remove(header::ALT_SVC.as_str()); headers.remove(header::ALT_SVC.as_str());
} }
} }

View file

@ -16,22 +16,13 @@ mod proxy;
mod utils; mod utils;
use crate::{ use crate::{
backend::{Backend, Backends}, backend::Backends, config::parse_opts, error::*, globals::*, handler::HttpMessageHandlerBuilder, log::*,
config::parse_opts,
constants::*,
error::*,
globals::*,
handler::HttpMessageHandlerBuilder,
log::*,
proxy::ProxyBuilder, proxy::ProxyBuilder,
utils::ServerNameBytesExp,
}; };
use futures::future::select_all; use futures::future::select_all;
use hyper::Client; use hyper::Client;
// use hyper_trust_dns::TrustDnsResolver; // use hyper_trust_dns::TrustDnsResolver;
use rustc_hash::FxHashMap as HashMap;
use std::sync::Arc; use std::sync::Arc;
use tokio::time::Duration;
fn main() { fn main() {
init_logger(); init_logger();
@ -43,41 +34,12 @@ fn main() {
runtime.block_on(async { runtime.block_on(async {
let mut globals = Globals { let mut globals = Globals {
listen_sockets: Vec::new(), // TODO: proxy configはarcに包んでこいつだけ使いまわせばいいように変えていく。backendsも
http_port: None, proxy_config: ProxyConfig::default(),
https_port: None, backends: Backends::default(),
// TODO: Reconsider each timeout values
proxy_timeout: Duration::from_secs(PROXY_TIMEOUT_SEC),
upstream_timeout: Duration::from_secs(UPSTREAM_TIMEOUT_SEC),
max_clients: MAX_CLIENTS,
request_count: Default::default(), request_count: Default::default(),
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
keepalive: true,
runtime_handle: runtime.handle().clone(), runtime_handle: runtime.handle().clone(),
backends: Backends {
default_server_name_bytes: None,
apps: HashMap::<ServerNameBytesExp, Backend>::default(),
},
sni_consistency: true,
#[cfg(feature = "http3")]
http3: false,
#[cfg(feature = "http3")]
h3_alt_svc_max_age: H3::ALT_SVC_MAX_AGE,
#[cfg(feature = "http3")]
h3_request_max_body_size: H3::REQUEST_MAX_BODY_SIZE,
#[cfg(feature = "http3")]
h3_max_concurrent_connections: H3::MAX_CONCURRENT_CONNECTIONS,
#[cfg(feature = "http3")]
h3_max_concurrent_bidistream: H3::MAX_CONCURRENT_BIDISTREAM.into(),
#[cfg(feature = "http3")]
h3_max_concurrent_unistream: H3::MAX_CONCURRENT_UNISTREAM.into(),
#[cfg(feature = "http3")]
h3_max_idle_timeout: Some(quinn::IdleTimeout::try_from(Duration::from_secs(H3::MAX_IDLE_TIMEOUT)).unwrap()),
}; };
if let Err(e) = parse_opts(&mut globals) { if let Err(e) = parse_opts(&mut globals) {
@ -105,10 +67,10 @@ async fn entrypoint(globals: Arc<Globals>) -> Result<()> {
.globals(globals.clone()) .globals(globals.clone())
.build()?; .build()?;
let addresses = globals.listen_sockets.clone(); let addresses = globals.proxy_config.listen_sockets.clone();
let futures = select_all(addresses.into_iter().map(|addr| { let futures = select_all(addresses.into_iter().map(|addr| {
let mut tls_enabled = false; let mut tls_enabled = false;
if let Some(https_port) = globals.https_port { if let Some(https_port) = globals.proxy_config.https_port {
tls_enabled = https_port == addr.port() tls_enabled = https_port == addr.port()
} }

View file

@ -43,7 +43,7 @@ where
// We consider the connection count separately from the stream count. // We consider the connection count separately from the stream count.
// Max clients for h1/h2 = max 'stream' for h3. // Max clients for h1/h2 = max 'stream' for h3.
let request_count = self.globals.request_count.clone(); let request_count = self.globals.request_count.clone();
if request_count.increment() > self.globals.max_clients { if request_count.increment() > self.globals.proxy_config.max_clients {
request_count.decrement(); request_count.decrement();
h3_conn.shutdown(0).await?; h3_conn.shutdown(0).await?;
break; break;
@ -54,7 +54,7 @@ where
let tls_server_name_inner = tls_server_name.clone(); let tls_server_name_inner = tls_server_name.clone();
self.globals.runtime_handle.spawn(async move { self.globals.runtime_handle.spawn(async move {
if let Err(e) = timeout( if let Err(e) = timeout(
self_inner.globals.proxy_timeout + Duration::from_secs(1), // timeout per stream are considered as same as one in http2 self_inner.globals.proxy_config.proxy_timeout + Duration::from_secs(1), // timeout per stream are considered as same as one in http2
self_inner.stream_serve_h3(req, stream, client_addr, tls_server_name_inner), self_inner.stream_serve_h3(req, stream, client_addr, tls_server_name_inner),
) )
.await .await
@ -97,7 +97,7 @@ where
// Buffering and sending body through channel for protocol conversion like h3 -> h2/http1.1 // Buffering and sending body through channel for protocol conversion like h3 -> h2/http1.1
// The underling buffering, i.e., buffer given by the API recv_data.await?, is handled by quinn. // The underling buffering, i.e., buffer given by the API recv_data.await?, is handled by quinn.
let max_body_size = self.globals.h3_request_max_body_size; let max_body_size = self.globals.proxy_config.h3_request_max_body_size;
self.globals.runtime_handle.spawn(async move { self.globals.runtime_handle.spawn(async move {
let mut sender = body_sender; let mut sender = body_sender;
let mut size = 0usize; let mut size = 0usize;

View file

@ -56,7 +56,7 @@ where
I: AsyncRead + AsyncWrite + Send + Unpin + 'static, I: AsyncRead + AsyncWrite + Send + Unpin + 'static,
{ {
let request_count = self.globals.request_count.clone(); let request_count = self.globals.request_count.clone();
if request_count.increment() > self.globals.max_clients { if request_count.increment() > self.globals.proxy_config.max_clients {
request_count.decrement(); request_count.decrement();
return; return;
} }
@ -64,7 +64,7 @@ where
self.globals.runtime_handle.clone().spawn(async move { self.globals.runtime_handle.clone().spawn(async move {
timeout( timeout(
self.globals.proxy_timeout + Duration::from_secs(1), self.globals.proxy_config.proxy_timeout + Duration::from_secs(1),
server server
.serve_connection( .serve_connection(
stream, stream,
@ -103,8 +103,8 @@ where
pub async fn start(self) -> Result<()> { pub async fn start(self) -> Result<()> {
let mut server = Http::new(); let mut server = Http::new();
server.http1_keep_alive(self.globals.keepalive); server.http1_keep_alive(self.globals.proxy_config.keepalive);
server.http2_max_concurrent_streams(self.globals.max_concurrent_streams); server.http2_max_concurrent_streams(self.globals.proxy_config.max_concurrent_streams);
server.pipeline_flush(true); server.pipeline_flush(true);
let executor = LocalExecutor::new(self.globals.runtime_handle.clone()); let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
let server = server.with_executor(executor); let server = server.with_executor(executor);

View file

@ -129,13 +129,13 @@ where
let mut transport_config_quic = TransportConfig::default(); let mut transport_config_quic = TransportConfig::default();
transport_config_quic transport_config_quic
.max_concurrent_bidi_streams(self.globals.h3_max_concurrent_bidistream) .max_concurrent_bidi_streams(self.globals.proxy_config.h3_max_concurrent_bidistream)
.max_concurrent_uni_streams(self.globals.h3_max_concurrent_unistream) .max_concurrent_uni_streams(self.globals.proxy_config.h3_max_concurrent_unistream)
.max_idle_timeout(self.globals.h3_max_idle_timeout); .max_idle_timeout(self.globals.proxy_config.h3_max_idle_timeout);
let mut server_config_h3 = QuicServerConfig::with_crypto(Arc::new(rustls_server_config)); let mut server_config_h3 = QuicServerConfig::with_crypto(Arc::new(rustls_server_config));
server_config_h3.transport = Arc::new(transport_config_quic); server_config_h3.transport = Arc::new(transport_config_quic);
server_config_h3.concurrent_connections(self.globals.h3_max_concurrent_connections); server_config_h3.concurrent_connections(self.globals.proxy_config.h3_max_concurrent_connections);
let endpoint = Endpoint::server(server_config_h3, self.listening_on)?; let endpoint = Endpoint::server(server_config_h3, self.listening_on)?;
let mut server_crypto: Option<Arc<ServerCrypto>> = None; let mut server_crypto: Option<Arc<ServerCrypto>> = None;
@ -212,7 +212,7 @@ where
} }
#[cfg(feature = "http3")] #[cfg(feature = "http3")]
{ {
if self.globals.http3 { if self.globals.proxy_config.http3 {
tokio::select! { tokio::select! {
_= self.cert_service(tx) => { _= self.cert_service(tx) => {
error!("Cert service for TLS exited"); error!("Cert service for TLS exited");