Merge pull request #54 from junkurihara/clean-up-global-object
Clean up global object and change cert reloader pattern
This commit is contained in:
commit
7311dbc68e
19 changed files with 773 additions and 641 deletions
|
|
@ -22,7 +22,7 @@ clap = { version = "4.3.11", features = ["std", "cargo", "wrap_help"] }
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
toml = { version = "0.7.6", default-features = false, features = ["parse"] }
|
toml = { version = "0.7.6", default-features = false, features = ["parse"] }
|
||||||
rustc-hash = "1.1.0"
|
rustc-hash = "1.1.0"
|
||||||
serde = { version = "1.0.167", default-features = false, features = ["derive"] }
|
serde = { version = "1.0.171", default-features = false, features = ["derive"] }
|
||||||
bytes = "1.4.0"
|
bytes = "1.4.0"
|
||||||
thiserror = "1.0.43"
|
thiserror = "1.0.43"
|
||||||
x509-parser = "0.15.0"
|
x509-parser = "0.15.0"
|
||||||
|
|
@ -31,11 +31,12 @@ futures = { version = "0.3.28", features = ["alloc", "async-await"] }
|
||||||
tokio = { version = "1.29.1", default-features = false, features = [
|
tokio = { version = "1.29.1", default-features = false, features = [
|
||||||
"net",
|
"net",
|
||||||
"rt-multi-thread",
|
"rt-multi-thread",
|
||||||
"parking_lot",
|
|
||||||
"time",
|
"time",
|
||||||
"sync",
|
"sync",
|
||||||
"macros",
|
"macros",
|
||||||
] }
|
] }
|
||||||
|
async-trait = "0.1.71"
|
||||||
|
hot_reload = "0.1.2" # reloading certs
|
||||||
|
|
||||||
# http and tls
|
# http and tls
|
||||||
hyper = { version = "0.14.27", default-features = false, features = [
|
hyper = { version = "0.14.27", default-features = false, features = [
|
||||||
|
|
|
||||||
2
quinn
2
quinn
|
|
@ -1 +1 @@
|
||||||
Subproject commit b30711f5595983989b60bbbad0ac3f067be7a596
|
Subproject commit e652b6d999f053ffe21eeea247854882ae480281
|
||||||
|
|
@ -13,26 +13,10 @@ pub use self::{
|
||||||
upstream::{ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder},
|
upstream::{ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder},
|
||||||
upstream_opts::UpstreamOption,
|
upstream_opts::UpstreamOption,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::utils::{BytesName, PathNameBytesExp, ServerNameBytesExp};
|
||||||
log::*,
|
|
||||||
utils::{BytesName, PathNameBytesExp, ServerNameBytesExp},
|
|
||||||
};
|
|
||||||
use derive_builder::Builder;
|
use derive_builder::Builder;
|
||||||
use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet};
|
use rustc_hash::FxHashMap as HashMap;
|
||||||
use rustls::{OwnedTrustAnchor, RootCertStore};
|
use std::{borrow::Cow, path::PathBuf};
|
||||||
use std::{
|
|
||||||
borrow::Cow,
|
|
||||||
fs::File,
|
|
||||||
io::{self, BufReader, Cursor, Read},
|
|
||||||
path::PathBuf,
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
use tokio_rustls::rustls::{
|
|
||||||
server::ResolvesServerCertUsingSni,
|
|
||||||
sign::{any_supported_type, CertifiedKey},
|
|
||||||
Certificate, PrivateKey, ServerConfig,
|
|
||||||
};
|
|
||||||
use x509_parser::prelude::*;
|
|
||||||
|
|
||||||
/// Struct serving information to route incoming connections, like server name to be handled and tls certs/keys settings.
|
/// Struct serving information to route incoming connections, like server name to be handled and tls certs/keys settings.
|
||||||
#[derive(Builder)]
|
#[derive(Builder)]
|
||||||
|
|
@ -79,264 +63,9 @@ fn opt_string_to_opt_pathbuf(input: &Option<String>) -> Option<PathBuf> {
|
||||||
input.to_owned().as_ref().map(PathBuf::from)
|
input.to_owned().as_ref().map(PathBuf::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Backend {
|
#[derive(Default)]
|
||||||
pub fn read_certs_and_key(&self) -> io::Result<CertifiedKey> {
|
|
||||||
debug!("Read TLS server certificates and private key");
|
|
||||||
let (Some(certs_path), Some(certs_keys_path)) = (self.tls_cert_path.as_ref(), self.tls_cert_key_path.as_ref()) else {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::Other, "Invalid certs and keys paths"));
|
|
||||||
};
|
|
||||||
let certs: Vec<_> = {
|
|
||||||
let certs_path_str = certs_path.display().to_string();
|
|
||||||
let mut reader = BufReader::new(File::open(certs_path).map_err(|e| {
|
|
||||||
io::Error::new(
|
|
||||||
e.kind(),
|
|
||||||
format!("Unable to load the certificates [{certs_path_str}]: {e}"),
|
|
||||||
)
|
|
||||||
})?);
|
|
||||||
rustls_pemfile::certs(&mut reader)
|
|
||||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the certificates"))?
|
|
||||||
}
|
|
||||||
.drain(..)
|
|
||||||
.map(Certificate)
|
|
||||||
.collect();
|
|
||||||
let certs_keys: Vec<_> = {
|
|
||||||
let certs_keys_path_str = certs_keys_path.display().to_string();
|
|
||||||
let encoded_keys = {
|
|
||||||
let mut encoded_keys = vec![];
|
|
||||||
File::open(certs_keys_path)
|
|
||||||
.map_err(|e| {
|
|
||||||
io::Error::new(
|
|
||||||
e.kind(),
|
|
||||||
format!("Unable to load the certificate keys [{certs_keys_path_str}]: {e}"),
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.read_to_end(&mut encoded_keys)?;
|
|
||||||
encoded_keys
|
|
||||||
};
|
|
||||||
let mut reader = Cursor::new(encoded_keys);
|
|
||||||
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
|
|
||||||
io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"Unable to parse the certificates private keys (PKCS8)",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
reader.set_position(0);
|
|
||||||
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader)?;
|
|
||||||
let mut keys = pkcs8_keys;
|
|
||||||
keys.append(&mut rsa_keys);
|
|
||||||
if keys.is_empty() {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"No private keys found - Make sure that they are in PKCS#8/PEM format",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
keys.drain(..).map(PrivateKey).collect()
|
|
||||||
};
|
|
||||||
let signing_key = certs_keys
|
|
||||||
.iter()
|
|
||||||
.find_map(|k| {
|
|
||||||
if let Ok(sk) = any_supported_type(k) {
|
|
||||||
Some(sk)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.ok_or_else(|| {
|
|
||||||
io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
"Unable to find a valid certificate and key",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Ok(CertifiedKey::new(certs, signing_key))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_client_ca_certs(&self) -> io::Result<(Vec<OwnedTrustAnchor>, HashSet<Vec<u8>>)> {
|
|
||||||
debug!("Read CA certificates for client authentication");
|
|
||||||
// Reads client certificate and returns client
|
|
||||||
let client_ca_cert_path = {
|
|
||||||
let Some(c) = self.client_ca_cert_path.as_ref() else {
|
|
||||||
return Err(io::Error::new(io::ErrorKind::Other, "Invalid certs and keys paths"));
|
|
||||||
};
|
|
||||||
c
|
|
||||||
};
|
|
||||||
let certs: Vec<_> = {
|
|
||||||
let certs_path_str = client_ca_cert_path.display().to_string();
|
|
||||||
let mut reader = BufReader::new(File::open(client_ca_cert_path).map_err(|e| {
|
|
||||||
io::Error::new(
|
|
||||||
e.kind(),
|
|
||||||
format!("Unable to load the client certificates [{certs_path_str}]: {e}"),
|
|
||||||
)
|
|
||||||
})?);
|
|
||||||
rustls_pemfile::certs(&mut reader)
|
|
||||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the client certificates"))?
|
|
||||||
}
|
|
||||||
.drain(..)
|
|
||||||
.map(Certificate)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let owned_trust_anchors: Vec<_> = certs
|
|
||||||
.iter()
|
|
||||||
.map(|v| {
|
|
||||||
// let trust_anchor = tokio_rustls::webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
|
||||||
let trust_anchor = webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
|
||||||
rustls::OwnedTrustAnchor::from_subject_spki_name_constraints(
|
|
||||||
trust_anchor.subject,
|
|
||||||
trust_anchor.spki,
|
|
||||||
trust_anchor.name_constraints,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// TODO: SKID is not used currently
|
|
||||||
let subject_key_identifiers: HashSet<_> = certs
|
|
||||||
.iter()
|
|
||||||
.filter_map(|v| {
|
|
||||||
// retrieve ca key id (subject key id)
|
|
||||||
let cert = parse_x509_certificate(&v.0).unwrap().1;
|
|
||||||
let subject_key_ids = cert
|
|
||||||
.iter_extensions()
|
|
||||||
.filter_map(|ext| match ext.parsed_extension() {
|
|
||||||
ParsedExtension::SubjectKeyIdentifier(skid) => Some(skid),
|
|
||||||
_ => None,
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if !subject_key_ids.is_empty() {
|
|
||||||
Some(subject_key_ids[0].0.to_owned())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok((owned_trust_anchors, subject_key_identifiers))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HashMap and some meta information for multiple Backend structs.
|
/// HashMap and some meta information for multiple Backend structs.
|
||||||
pub struct Backends {
|
pub struct Backends {
|
||||||
pub apps: HashMap<ServerNameBytesExp, Backend>, // hyper::uriで抜いたhostで引っ掛ける
|
pub apps: HashMap<ServerNameBytesExp, Backend>, // hyper::uriで抜いたhostで引っ掛ける
|
||||||
pub default_server_name_bytes: Option<ServerNameBytesExp>, // for plaintext http
|
pub default_server_name_bytes: Option<ServerNameBytesExp>, // for plaintext http
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type SniServerCryptoMap = HashMap<ServerNameBytesExp, Arc<ServerConfig>>;
|
|
||||||
pub struct ServerCrypto {
|
|
||||||
// For Quic/HTTP3, only servers with no client authentication
|
|
||||||
pub inner_global_no_client_auth: Arc<ServerConfig>,
|
|
||||||
// For TLS over TCP/HTTP2 and 1.1, map of SNI to server_crypto for all given servers
|
|
||||||
pub inner_local_map: Arc<SniServerCryptoMap>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Backends {
|
|
||||||
pub async fn generate_server_crypto(&self) -> Result<ServerCrypto, anyhow::Error> {
|
|
||||||
let mut resolver_global = ResolvesServerCertUsingSni::new();
|
|
||||||
let mut server_crypto_local_map: SniServerCryptoMap = HashMap::default();
|
|
||||||
|
|
||||||
for (server_name_bytes_exp, backend) in self.apps.iter() {
|
|
||||||
if backend.tls_cert_key_path.is_some() && backend.tls_cert_path.is_some() {
|
|
||||||
match backend.read_certs_and_key() {
|
|
||||||
Ok(certified_key) => {
|
|
||||||
let mut resolver_local = ResolvesServerCertUsingSni::new();
|
|
||||||
let mut client_ca_roots_local = RootCertStore::empty();
|
|
||||||
|
|
||||||
// add server certificate and key
|
|
||||||
if let Err(e) = resolver_local.add(backend.server_name.as_str(), certified_key.to_owned()) {
|
|
||||||
error!(
|
|
||||||
"{}: Failed to read some certificates and keys {}",
|
|
||||||
backend.server_name.as_str(),
|
|
||||||
e
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if backend.client_ca_cert_path.is_none() {
|
|
||||||
// aggregated server config for no client auth server for http3
|
|
||||||
if let Err(e) = resolver_global.add(backend.server_name.as_str(), certified_key) {
|
|
||||||
error!(
|
|
||||||
"{}: Failed to read some certificates and keys {}",
|
|
||||||
backend.server_name.as_str(),
|
|
||||||
e
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// add client certificate if specified
|
|
||||||
match backend.read_client_ca_certs() {
|
|
||||||
Ok((owned_trust_anchors, _subject_key_ids)) => {
|
|
||||||
client_ca_roots_local.add_server_trust_anchors(owned_trust_anchors.into_iter());
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Failed to add client CA certificate for {}: {}",
|
|
||||||
backend.server_name.as_str(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut server_config_local = if client_ca_roots_local.is_empty() {
|
|
||||||
// with no client auth, enable http1.1 -- 3
|
|
||||||
#[cfg(not(feature = "http3"))]
|
|
||||||
{
|
|
||||||
ServerConfig::builder()
|
|
||||||
.with_safe_defaults()
|
|
||||||
.with_no_client_auth()
|
|
||||||
.with_cert_resolver(Arc::new(resolver_local))
|
|
||||||
}
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
{
|
|
||||||
let mut sc = ServerConfig::builder()
|
|
||||||
.with_safe_defaults()
|
|
||||||
.with_no_client_auth()
|
|
||||||
.with_cert_resolver(Arc::new(resolver_local));
|
|
||||||
sc.alpn_protocols = vec![b"h3".to_vec(), b"hq-29".to_vec()]; // TODO: remove hq-29 later?
|
|
||||||
sc
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// with client auth, enable only http1.1 and 2
|
|
||||||
// let client_certs_verifier = rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(client_ca_roots);
|
|
||||||
let client_certs_verifier = rustls::server::AllowAnyAuthenticatedClient::new(client_ca_roots_local);
|
|
||||||
ServerConfig::builder()
|
|
||||||
.with_safe_defaults()
|
|
||||||
.with_client_cert_verifier(Arc::new(client_certs_verifier))
|
|
||||||
.with_cert_resolver(Arc::new(resolver_local))
|
|
||||||
};
|
|
||||||
server_config_local.alpn_protocols.push(b"h2".to_vec());
|
|
||||||
server_config_local.alpn_protocols.push(b"http/1.1".to_vec());
|
|
||||||
|
|
||||||
server_crypto_local_map.insert(server_name_bytes_exp.to_owned(), Arc::new(server_config_local));
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Failed to add certificate for {}: {}", backend.server_name.as_str(), e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// debug!("Load certificate chain for {} server_name's", cnt);
|
|
||||||
|
|
||||||
//////////////
|
|
||||||
let mut server_crypto_global = ServerConfig::builder()
|
|
||||||
.with_safe_defaults()
|
|
||||||
.with_no_client_auth()
|
|
||||||
.with_cert_resolver(Arc::new(resolver_global));
|
|
||||||
|
|
||||||
//////////////////////////////
|
|
||||||
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
{
|
|
||||||
server_crypto_global.alpn_protocols = vec![
|
|
||||||
b"h3".to_vec(),
|
|
||||||
b"hq-29".to_vec(), // TODO: remove later?
|
|
||||||
b"h2".to_vec(),
|
|
||||||
b"http/1.1".to_vec(),
|
|
||||||
];
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "http3"))]
|
|
||||||
{
|
|
||||||
server_crypto_global.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ServerCrypto {
|
|
||||||
inner_global_no_client_auth: Arc::new(server_crypto_global),
|
|
||||||
inner_local_map: Arc::new(server_crypto_local_map),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
93
src/cert_reader.rs
Normal file
93
src/cert_reader.rs
Normal file
|
|
@ -0,0 +1,93 @@
|
||||||
|
use crate::{log::*, proxy::CertsAndKeys};
|
||||||
|
use rustls::{Certificate, PrivateKey};
|
||||||
|
use std::{
|
||||||
|
fs::File,
|
||||||
|
io::{self, BufReader, Cursor, Read},
|
||||||
|
path::PathBuf,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Read certificates and private keys from file
|
||||||
|
pub(crate) fn read_certs_and_keys(
|
||||||
|
cert_path: &PathBuf,
|
||||||
|
cert_key_path: &PathBuf,
|
||||||
|
client_ca_cert_path: Option<&PathBuf>,
|
||||||
|
) -> Result<CertsAndKeys, io::Error> {
|
||||||
|
debug!("Read TLS server certificates and private key");
|
||||||
|
|
||||||
|
let certs: Vec<_> = {
|
||||||
|
let certs_path_str = cert_path.display().to_string();
|
||||||
|
let mut reader = BufReader::new(File::open(cert_path).map_err(|e| {
|
||||||
|
io::Error::new(
|
||||||
|
e.kind(),
|
||||||
|
format!("Unable to load the certificates [{certs_path_str}]: {e}"),
|
||||||
|
)
|
||||||
|
})?);
|
||||||
|
rustls_pemfile::certs(&mut reader)
|
||||||
|
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the certificates"))?
|
||||||
|
}
|
||||||
|
.drain(..)
|
||||||
|
.map(Certificate)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let cert_keys: Vec<_> = {
|
||||||
|
let cert_key_path_str = cert_key_path.display().to_string();
|
||||||
|
let encoded_keys = {
|
||||||
|
let mut encoded_keys = vec![];
|
||||||
|
File::open(cert_key_path)
|
||||||
|
.map_err(|e| {
|
||||||
|
io::Error::new(
|
||||||
|
e.kind(),
|
||||||
|
format!("Unable to load the certificate keys [{cert_key_path_str}]: {e}"),
|
||||||
|
)
|
||||||
|
})?
|
||||||
|
.read_to_end(&mut encoded_keys)?;
|
||||||
|
encoded_keys
|
||||||
|
};
|
||||||
|
let mut reader = Cursor::new(encoded_keys);
|
||||||
|
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"Unable to parse the certificates private keys (PKCS8)",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
reader.set_position(0);
|
||||||
|
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader)?;
|
||||||
|
let mut keys = pkcs8_keys;
|
||||||
|
keys.append(&mut rsa_keys);
|
||||||
|
if keys.is_empty() {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"No private keys found - Make sure that they are in PKCS#8/PEM format",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
keys.drain(..).map(PrivateKey).collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
let client_ca_certs = if let Some(path) = client_ca_cert_path {
|
||||||
|
debug!("Read CA certificates for client authentication");
|
||||||
|
// Reads client certificate and returns client
|
||||||
|
let certs: Vec<_> = {
|
||||||
|
let certs_path_str = path.display().to_string();
|
||||||
|
let mut reader = BufReader::new(File::open(path).map_err(|e| {
|
||||||
|
io::Error::new(
|
||||||
|
e.kind(),
|
||||||
|
format!("Unable to load the client certificates [{certs_path_str}]: {e}"),
|
||||||
|
)
|
||||||
|
})?);
|
||||||
|
rustls_pemfile::certs(&mut reader)
|
||||||
|
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the client certificates"))?
|
||||||
|
}
|
||||||
|
.drain(..)
|
||||||
|
.map(Certificate)
|
||||||
|
.collect();
|
||||||
|
Some(certs)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(CertsAndKeys {
|
||||||
|
certs,
|
||||||
|
cert_keys,
|
||||||
|
client_ca_certs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
mod parse;
|
mod parse;
|
||||||
mod toml;
|
mod toml;
|
||||||
|
|
||||||
pub use parse::parse_opts;
|
pub use parse::build_globals;
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,9 @@
|
||||||
use super::toml::{ConfigToml, ReverseProxyOption};
|
use super::toml::ConfigToml;
|
||||||
use crate::{
|
use crate::{backend::Backends, error::*, globals::*, log::*, utils::BytesName};
|
||||||
backend::{BackendBuilder, ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder, UpstreamOption},
|
|
||||||
constants::*,
|
|
||||||
error::*,
|
|
||||||
globals::*,
|
|
||||||
log::*,
|
|
||||||
utils::{BytesName, PathNameBytesExp},
|
|
||||||
};
|
|
||||||
use clap::Arg;
|
use clap::Arg;
|
||||||
use rustc_hash::FxHashMap as HashMap;
|
use tokio::runtime::Handle;
|
||||||
use std::net::SocketAddr;
|
|
||||||
|
|
||||||
pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Error> {
|
pub fn build_globals(runtime_handle: Handle) -> std::result::Result<Globals, anyhow::Error> {
|
||||||
let _ = include_str!("../../Cargo.toml");
|
let _ = include_str!("../../Cargo.toml");
|
||||||
let options = clap::command!().arg(
|
let options = clap::command!().arg(
|
||||||
Arg::new("config_file")
|
Arg::new("config_file")
|
||||||
|
|
@ -22,6 +14,7 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
|
||||||
);
|
);
|
||||||
let matches = options.get_matches();
|
let matches = options.get_matches();
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
let config = if let Some(config_file_path) = matches.get_one::<String>("config_file") {
|
let config = if let Some(config_file_path) = matches.get_one::<String>("config_file") {
|
||||||
ConfigToml::new(config_file_path)?
|
ConfigToml::new(config_file_path)?
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -29,117 +22,67 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
|
||||||
ConfigToml::default()
|
ConfigToml::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
// listen port and socket
|
///////////////////////////////////
|
||||||
globals.http_port = config.listen_port;
|
// build proxy config
|
||||||
globals.https_port = config.listen_port_tls;
|
let proxy_config: ProxyConfig = (&config).try_into()?;
|
||||||
ensure!(
|
// For loggings
|
||||||
{ globals.http_port.is_some() || globals.https_port.is_some() } && {
|
if proxy_config.listen_sockets.iter().any(|addr| addr.is_ipv6()) {
|
||||||
if let (Some(p), Some(t)) = (globals.http_port, globals.https_port) {
|
info!("Listen both IPv4 and IPv6")
|
||||||
p != t
|
} else {
|
||||||
} else {
|
info!("Listen IPv4")
|
||||||
true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
anyhow!("Wrong port spec.")
|
|
||||||
);
|
|
||||||
// NOTE: when [::]:xx is bound, both v4 and v6 listeners are enabled.
|
|
||||||
let listen_addresses: Vec<&str> = match config.listen_ipv6 {
|
|
||||||
Some(true) => {
|
|
||||||
info!("Listen both IPv4 and IPv6");
|
|
||||||
LISTEN_ADDRESSES_V6.to_vec()
|
|
||||||
}
|
|
||||||
Some(false) | None => {
|
|
||||||
info!("Listen IPv4");
|
|
||||||
LISTEN_ADDRESSES_V4.to_vec()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
globals.listen_sockets = listen_addresses
|
|
||||||
.iter()
|
|
||||||
.flat_map(|x| {
|
|
||||||
let mut v: Vec<SocketAddr> = vec![];
|
|
||||||
if let Some(p) = globals.http_port {
|
|
||||||
v.push(format!("{x}:{p}").parse().unwrap());
|
|
||||||
}
|
|
||||||
if let Some(p) = globals.https_port {
|
|
||||||
v.push(format!("{x}:{p}").parse().unwrap());
|
|
||||||
}
|
|
||||||
v
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
if globals.http_port.is_some() {
|
|
||||||
info!("Listen port: {}", globals.http_port.unwrap());
|
|
||||||
}
|
}
|
||||||
if globals.https_port.is_some() {
|
if proxy_config.http_port.is_some() {
|
||||||
info!("Listen port: {} (for TLS)", globals.https_port.unwrap());
|
info!("Listen port: {}", proxy_config.http_port.unwrap());
|
||||||
|
}
|
||||||
|
if proxy_config.https_port.is_some() {
|
||||||
|
info!("Listen port: {} (for TLS)", proxy_config.https_port.unwrap());
|
||||||
|
}
|
||||||
|
if proxy_config.http3 {
|
||||||
|
info!("Experimental HTTP/3.0 is enabled. Note it is still very unstable.");
|
||||||
|
}
|
||||||
|
if !proxy_config.sni_consistency {
|
||||||
|
info!("Ignore consistency between TLS SNI and Host header (or Request line). Note it violates RFC.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// max values
|
///////////////////////////////////
|
||||||
if let Some(c) = config.max_clients {
|
// backend_apps
|
||||||
globals.max_clients = c as usize;
|
let apps = config.apps.ok_or(anyhow!("Missing application spec"))?;
|
||||||
}
|
|
||||||
if let Some(c) = config.max_concurrent_streams {
|
|
||||||
globals.max_concurrent_streams = c;
|
|
||||||
}
|
|
||||||
|
|
||||||
// backend apps
|
// assertions for all backend apps
|
||||||
ensure!(config.apps.is_some(), "Missing application spec.");
|
|
||||||
let apps = config.apps.unwrap();
|
|
||||||
ensure!(!apps.0.is_empty(), "Wrong application spec.");
|
ensure!(!apps.0.is_empty(), "Wrong application spec.");
|
||||||
|
// if only https_port is specified, tls must be configured for all apps
|
||||||
|
if proxy_config.http_port.is_none() {
|
||||||
|
ensure!(
|
||||||
|
apps.0.iter().all(|(_, app)| app.tls.is_some()),
|
||||||
|
"Some apps serves only plaintext HTTP"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// https redirection can be configured if both ports are active
|
||||||
|
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
|
||||||
|
ensure!(
|
||||||
|
apps.0.iter().all(|(_, app)| {
|
||||||
|
if let Some(tls) = app.tls.as_ref() {
|
||||||
|
tls.https_redirection.is_none()
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
"https_redirection can be specified only when both http_port and https_port are specified"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// each app
|
// build backends
|
||||||
|
let mut backends = Backends::default();
|
||||||
for (app_name, app) in apps.0.iter() {
|
for (app_name, app) in apps.0.iter() {
|
||||||
ensure!(app.server_name.is_some(), "Missing server_name");
|
let server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
|
||||||
let server_name_string = app.server_name.as_ref().unwrap();
|
let backend = app.try_into()?;
|
||||||
if globals.http_port.is_none() {
|
backends.apps.insert(server_name_string.to_server_name_vec(), backend);
|
||||||
// if only https_port is specified, tls must be configured
|
|
||||||
ensure!(app.tls.is_some())
|
|
||||||
}
|
|
||||||
|
|
||||||
// backend builder
|
|
||||||
let mut backend_builder = BackendBuilder::default();
|
|
||||||
// reverse proxy settings
|
|
||||||
ensure!(app.reverse_proxy.is_some(), "Missing reverse_proxy");
|
|
||||||
let reverse_proxy = get_reverse_proxy(server_name_string, app.reverse_proxy.as_ref().unwrap())?;
|
|
||||||
|
|
||||||
backend_builder
|
|
||||||
.app_name(server_name_string)
|
|
||||||
.server_name(server_name_string)
|
|
||||||
.reverse_proxy(reverse_proxy);
|
|
||||||
|
|
||||||
// TLS settings and build backend instance
|
|
||||||
let backend = if app.tls.is_none() {
|
|
||||||
ensure!(globals.http_port.is_some(), "Required HTTP port");
|
|
||||||
backend_builder.build()?
|
|
||||||
} else {
|
|
||||||
let tls = app.tls.as_ref().unwrap();
|
|
||||||
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
|
|
||||||
|
|
||||||
let https_redirection = if tls.https_redirection.is_none() {
|
|
||||||
Some(true) // Default true
|
|
||||||
} else {
|
|
||||||
ensure!(globals.https_port.is_some()); // only when both https ports are configured.
|
|
||||||
tls.https_redirection
|
|
||||||
};
|
|
||||||
|
|
||||||
backend_builder
|
|
||||||
.tls_cert_path(&tls.tls_cert_path)
|
|
||||||
.tls_cert_key_path(&tls.tls_cert_key_path)
|
|
||||||
.https_redirection(https_redirection)
|
|
||||||
.client_ca_cert_path(&tls.client_ca_cert_path)
|
|
||||||
.build()?
|
|
||||||
};
|
|
||||||
|
|
||||||
globals
|
|
||||||
.backends
|
|
||||||
.apps
|
|
||||||
.insert(server_name_string.to_server_name_vec(), backend);
|
|
||||||
info!("Registering application: {} ({})", app_name, server_name_string);
|
info!("Registering application: {} ({})", app_name, server_name_string);
|
||||||
}
|
}
|
||||||
|
|
||||||
// default backend application for plaintext http requests
|
// default backend application for plaintext http requests
|
||||||
if let Some(d) = config.default_app {
|
if let Some(d) = config.default_app {
|
||||||
let d_sn: Vec<&str> = globals
|
let d_sn: Vec<&str> = backends
|
||||||
.backends
|
|
||||||
.apps
|
.apps
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|(_k, v)| v.app_name == d)
|
.filter(|(_k, v)| v.app_name == d)
|
||||||
|
|
@ -150,86 +93,17 @@ pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Erro
|
||||||
"Serving plaintext http for requests to unconfigured server_name by app {} (server_name: {}).",
|
"Serving plaintext http for requests to unconfigured server_name by app {} (server_name: {}).",
|
||||||
d, d_sn[0]
|
d, d_sn[0]
|
||||||
);
|
);
|
||||||
globals.backends.default_server_name_bytes = Some(d_sn[0].to_server_name_vec());
|
backends.default_server_name_bytes = Some(d_sn[0].to_server_name_vec());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// experimental
|
///////////////////////////////////
|
||||||
if let Some(exp) = config.experimental {
|
let globals = Globals {
|
||||||
#[cfg(feature = "http3")]
|
proxy_config,
|
||||||
{
|
backends,
|
||||||
if let Some(h3option) = exp.h3 {
|
request_count: Default::default(),
|
||||||
globals.http3 = true;
|
runtime_handle,
|
||||||
info!("Experimental HTTP/3.0 is enabled. Note it is still very unstable.");
|
};
|
||||||
if let Some(x) = h3option.alt_svc_max_age {
|
|
||||||
globals.h3_alt_svc_max_age = x;
|
|
||||||
}
|
|
||||||
if let Some(x) = h3option.request_max_body_size {
|
|
||||||
globals.h3_request_max_body_size = x;
|
|
||||||
}
|
|
||||||
if let Some(x) = h3option.max_concurrent_connections {
|
|
||||||
globals.h3_max_concurrent_connections = x;
|
|
||||||
}
|
|
||||||
if let Some(x) = h3option.max_concurrent_bidistream {
|
|
||||||
globals.h3_max_concurrent_bidistream = x.into();
|
|
||||||
}
|
|
||||||
if let Some(x) = h3option.max_concurrent_unistream {
|
|
||||||
globals.h3_max_concurrent_unistream = x.into();
|
|
||||||
}
|
|
||||||
if let Some(x) = h3option.max_idle_timeout {
|
|
||||||
if x == 0u64 {
|
|
||||||
globals.h3_max_idle_timeout = None;
|
|
||||||
} else {
|
|
||||||
globals.h3_max_idle_timeout =
|
|
||||||
Some(quinn::IdleTimeout::try_from(tokio::time::Duration::from_secs(x)).unwrap())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(b) = exp.ignore_sni_consistency {
|
Ok(globals)
|
||||||
globals.sni_consistency = !b;
|
|
||||||
if b {
|
|
||||||
info!("Ignore consistency between TLS SNI and Host header (or Request line). Note it violates RFC.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_reverse_proxy(
|
|
||||||
server_name_string: &str,
|
|
||||||
rp_settings: &[ReverseProxyOption],
|
|
||||||
) -> std::result::Result<ReverseProxy, anyhow::Error> {
|
|
||||||
let mut upstream: HashMap<PathNameBytesExp, UpstreamGroup> = HashMap::default();
|
|
||||||
|
|
||||||
rp_settings.iter().for_each(|rpo| {
|
|
||||||
let upstream_vec: Vec<Upstream> = rpo.upstream.iter().map(|x| x.to_upstream().unwrap()).collect();
|
|
||||||
// let upstream_iter = rpo.upstream.iter().map(|x| x.to_upstream().unwrap());
|
|
||||||
// let lb_upstream_num = vec_upstream.len();
|
|
||||||
let elem = UpstreamGroupBuilder::default()
|
|
||||||
.upstream(&upstream_vec)
|
|
||||||
.path(&rpo.path)
|
|
||||||
.replace_path(&rpo.replace_path)
|
|
||||||
.lb(&rpo.load_balance, &upstream_vec, server_name_string, &rpo.path)
|
|
||||||
.opts(&rpo.upstream_options)
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
upstream.insert(elem.path.clone(), elem);
|
|
||||||
});
|
|
||||||
ensure!(
|
|
||||||
rp_settings.iter().filter(|rpo| rpo.path.is_none()).count() < 2,
|
|
||||||
"Multiple default reverse proxy setting"
|
|
||||||
);
|
|
||||||
ensure!(
|
|
||||||
upstream
|
|
||||||
.iter()
|
|
||||||
.all(|(_, elem)| !(elem.opts.contains(&UpstreamOption::ConvertHttpsTo11)
|
|
||||||
&& elem.opts.contains(&UpstreamOption::ConvertHttpsTo2))),
|
|
||||||
"either one of force_http11 or force_http2 can be enabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(ReverseProxy { upstream })
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,13 @@
|
||||||
use crate::{backend::Upstream, error::*};
|
use crate::{
|
||||||
|
backend::{Backend, BackendBuilder, ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder, UpstreamOption},
|
||||||
|
constants::*,
|
||||||
|
error::*,
|
||||||
|
globals::ProxyConfig,
|
||||||
|
utils::PathNameBytesExp,
|
||||||
|
};
|
||||||
use rustc_hash::FxHashMap as HashMap;
|
use rustc_hash::FxHashMap as HashMap;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::fs;
|
use std::{fs, net::SocketAddr};
|
||||||
|
|
||||||
#[derive(Deserialize, Debug, Default)]
|
#[derive(Deserialize, Debug, Default)]
|
||||||
pub struct ConfigToml {
|
pub struct ConfigToml {
|
||||||
|
|
@ -65,25 +71,196 @@ pub struct UpstreamParams {
|
||||||
pub location: String,
|
pub location: String,
|
||||||
pub tls: Option<bool>,
|
pub tls: Option<bool>,
|
||||||
}
|
}
|
||||||
impl UpstreamParams {
|
|
||||||
pub fn to_upstream(&self) -> Result<Upstream> {
|
impl TryInto<ProxyConfig> for &ConfigToml {
|
||||||
let mut scheme = "http";
|
type Error = anyhow::Error;
|
||||||
if let Some(t) = self.tls {
|
|
||||||
if t {
|
fn try_into(self) -> std::result::Result<ProxyConfig, Self::Error> {
|
||||||
scheme = "https";
|
let mut proxy_config = ProxyConfig {
|
||||||
|
// listen port and socket
|
||||||
|
http_port: self.listen_port,
|
||||||
|
https_port: self.listen_port_tls,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
ensure!(
|
||||||
|
proxy_config.http_port.is_some() || proxy_config.https_port.is_some(),
|
||||||
|
anyhow!("Either/Both of http_port or https_port must be specified")
|
||||||
|
);
|
||||||
|
if proxy_config.http_port.is_some() && proxy_config.https_port.is_some() {
|
||||||
|
ensure!(
|
||||||
|
proxy_config.http_port.unwrap() != proxy_config.https_port.unwrap(),
|
||||||
|
anyhow!("http_port and https_port must be different")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: when [::]:xx is bound, both v4 and v6 listeners are enabled.
|
||||||
|
let listen_addresses: Vec<&str> = if let Some(true) = self.listen_ipv6 {
|
||||||
|
LISTEN_ADDRESSES_V6.to_vec()
|
||||||
|
} else {
|
||||||
|
LISTEN_ADDRESSES_V4.to_vec()
|
||||||
|
};
|
||||||
|
proxy_config.listen_sockets = listen_addresses
|
||||||
|
.iter()
|
||||||
|
.flat_map(|addr| {
|
||||||
|
let mut v: Vec<SocketAddr> = vec![];
|
||||||
|
if let Some(port) = proxy_config.http_port {
|
||||||
|
v.push(format!("{addr}:{port}").parse().unwrap());
|
||||||
|
}
|
||||||
|
if let Some(port) = proxy_config.https_port {
|
||||||
|
v.push(format!("{addr}:{port}").parse().unwrap());
|
||||||
|
}
|
||||||
|
v
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// max values
|
||||||
|
if let Some(c) = self.max_clients {
|
||||||
|
proxy_config.max_clients = c as usize;
|
||||||
|
}
|
||||||
|
if let Some(c) = self.max_concurrent_streams {
|
||||||
|
proxy_config.max_concurrent_streams = c;
|
||||||
|
}
|
||||||
|
|
||||||
|
// experimental
|
||||||
|
if let Some(exp) = &self.experimental {
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
{
|
||||||
|
if let Some(h3option) = &exp.h3 {
|
||||||
|
proxy_config.http3 = true;
|
||||||
|
if let Some(x) = h3option.alt_svc_max_age {
|
||||||
|
proxy_config.h3_alt_svc_max_age = x;
|
||||||
|
}
|
||||||
|
if let Some(x) = h3option.request_max_body_size {
|
||||||
|
proxy_config.h3_request_max_body_size = x;
|
||||||
|
}
|
||||||
|
if let Some(x) = h3option.max_concurrent_connections {
|
||||||
|
proxy_config.h3_max_concurrent_connections = x;
|
||||||
|
}
|
||||||
|
if let Some(x) = h3option.max_concurrent_bidistream {
|
||||||
|
proxy_config.h3_max_concurrent_bidistream = x.into();
|
||||||
|
}
|
||||||
|
if let Some(x) = h3option.max_concurrent_unistream {
|
||||||
|
proxy_config.h3_max_concurrent_unistream = x.into();
|
||||||
|
}
|
||||||
|
if let Some(x) = h3option.max_idle_timeout {
|
||||||
|
if x == 0u64 {
|
||||||
|
proxy_config.h3_max_idle_timeout = None;
|
||||||
|
} else {
|
||||||
|
proxy_config.h3_max_idle_timeout =
|
||||||
|
Some(quinn::IdleTimeout::try_from(tokio::time::Duration::from_secs(x)).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ignore) = exp.ignore_sni_consistency {
|
||||||
|
proxy_config.sni_consistency = !ignore;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(proxy_config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConfigToml {
|
||||||
|
pub fn new(config_file: &str) -> std::result::Result<Self, RpxyError> {
|
||||||
|
let config_str = fs::read_to_string(config_file).map_err(RpxyError::Io)?;
|
||||||
|
|
||||||
|
toml::from_str(&config_str).map_err(RpxyError::TomlDe)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryInto<Backend> for &Application {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_into(self) -> std::result::Result<Backend, Self::Error> {
|
||||||
|
let server_name_string = self.server_name.as_ref().ok_or(anyhow!("Missing server_name"))?;
|
||||||
|
|
||||||
|
// backend builder
|
||||||
|
let mut backend_builder = BackendBuilder::default();
|
||||||
|
// reverse proxy settings
|
||||||
|
let reverse_proxy = self.try_into()?;
|
||||||
|
|
||||||
|
backend_builder
|
||||||
|
.app_name(server_name_string)
|
||||||
|
.server_name(server_name_string)
|
||||||
|
.reverse_proxy(reverse_proxy);
|
||||||
|
|
||||||
|
// TLS settings and build backend instance
|
||||||
|
let backend = if self.tls.is_none() {
|
||||||
|
backend_builder.build()?
|
||||||
|
} else {
|
||||||
|
let tls = self.tls.as_ref().unwrap();
|
||||||
|
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
|
||||||
|
|
||||||
|
let https_redirection = if tls.https_redirection.is_none() {
|
||||||
|
Some(true) // Default true
|
||||||
|
} else {
|
||||||
|
tls.https_redirection
|
||||||
|
};
|
||||||
|
|
||||||
|
backend_builder
|
||||||
|
.tls_cert_path(&tls.tls_cert_path)
|
||||||
|
.tls_cert_key_path(&tls.tls_cert_key_path)
|
||||||
|
.https_redirection(https_redirection)
|
||||||
|
.client_ca_cert_path(&tls.client_ca_cert_path)
|
||||||
|
.build()?
|
||||||
|
};
|
||||||
|
Ok(backend)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryInto<ReverseProxy> for &Application {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_into(self) -> std::result::Result<ReverseProxy, Self::Error> {
|
||||||
|
let server_name_string = self.server_name.as_ref().ok_or(anyhow!("Missing server_name"))?;
|
||||||
|
let rp_settings = self.reverse_proxy.as_ref().ok_or(anyhow!("Missing reverse_proxy"))?;
|
||||||
|
|
||||||
|
let mut upstream: HashMap<PathNameBytesExp, UpstreamGroup> = HashMap::default();
|
||||||
|
|
||||||
|
rp_settings.iter().for_each(|rpo| {
|
||||||
|
let upstream_vec: Vec<Upstream> = rpo.upstream.iter().map(|x| x.try_into().unwrap()).collect();
|
||||||
|
// let upstream_iter = rpo.upstream.iter().map(|x| x.to_upstream().unwrap());
|
||||||
|
// let lb_upstream_num = vec_upstream.len();
|
||||||
|
let elem = UpstreamGroupBuilder::default()
|
||||||
|
.upstream(&upstream_vec)
|
||||||
|
.path(&rpo.path)
|
||||||
|
.replace_path(&rpo.replace_path)
|
||||||
|
.lb(&rpo.load_balance, &upstream_vec, server_name_string, &rpo.path)
|
||||||
|
.opts(&rpo.upstream_options)
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
upstream.insert(elem.path.clone(), elem);
|
||||||
|
});
|
||||||
|
ensure!(
|
||||||
|
rp_settings.iter().filter(|rpo| rpo.path.is_none()).count() < 2,
|
||||||
|
"Multiple default reverse proxy setting"
|
||||||
|
);
|
||||||
|
ensure!(
|
||||||
|
upstream
|
||||||
|
.iter()
|
||||||
|
.all(|(_, elem)| !(elem.opts.contains(&UpstreamOption::ConvertHttpsTo11)
|
||||||
|
&& elem.opts.contains(&UpstreamOption::ConvertHttpsTo2))),
|
||||||
|
"either one of force_http11 or force_http2 can be enabled"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(ReverseProxy { upstream })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryInto<Upstream> for &UpstreamParams {
|
||||||
|
type Error = RpxyError;
|
||||||
|
|
||||||
|
fn try_into(self) -> std::result::Result<Upstream, Self::Error> {
|
||||||
|
let scheme = match self.tls {
|
||||||
|
Some(true) => "https",
|
||||||
|
_ => "http",
|
||||||
|
};
|
||||||
let location = format!("{}://{}", scheme, self.location);
|
let location = format!("{}://{}", scheme, self.location);
|
||||||
Ok(Upstream {
|
Ok(Upstream {
|
||||||
uri: location.parse::<hyper::Uri>().map_err(|e| anyhow!("{}", e))?,
|
uri: location.parse::<hyper::Uri>().map_err(|e| anyhow!("{}", e))?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConfigToml {
|
|
||||||
pub fn new(config_file: &str) -> std::result::Result<Self, anyhow::Error> {
|
|
||||||
let config_str = fs::read_to_string(config_file).context("Failed to read config file")?;
|
|
||||||
|
|
||||||
toml::from_str(&config_str).context("Failed to parse toml config")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ pub const TLS_HANDSHAKE_TIMEOUT_SEC: u64 = 15; // default as with firefox browse
|
||||||
pub const MAX_CLIENTS: usize = 512;
|
pub const MAX_CLIENTS: usize = 512;
|
||||||
pub const MAX_CONCURRENT_STREAMS: u32 = 64;
|
pub const MAX_CONCURRENT_STREAMS: u32 = 64;
|
||||||
pub const CERTS_WATCH_DELAY_SECS: u32 = 60;
|
pub const CERTS_WATCH_DELAY_SECS: u32 = 60;
|
||||||
|
pub const LOAD_CERTS_ONLY_WHEN_UPDATED: bool = true;
|
||||||
|
|
||||||
// #[cfg(feature = "http3")]
|
// #[cfg(feature = "http3")]
|
||||||
// pub const H3_RESPONSE_BUF_SIZE: usize = 65_536; // 64KB
|
// pub const H3_RESPONSE_BUF_SIZE: usize = 65_536; // 64KB
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,9 @@ pub enum RpxyError {
|
||||||
#[error("I/O Error")]
|
#[error("I/O Error")]
|
||||||
Io(#[from] io::Error),
|
Io(#[from] io::Error),
|
||||||
|
|
||||||
|
#[error("Toml Deserialization Error")]
|
||||||
|
TomlDe(#[from] toml::de::Error),
|
||||||
|
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
#[error("Quic Connection Error")]
|
#[error("Quic Connection Error")]
|
||||||
QuicConn(#[from] quinn::ConnectionError),
|
QuicConn(#[from] quinn::ConnectionError),
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::backend::Backends;
|
use crate::{backend::Backends, constants::*};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::{
|
use std::sync::{
|
||||||
atomic::{AtomicUsize, Ordering},
|
atomic::{AtomicUsize, Ordering},
|
||||||
|
|
@ -6,25 +6,38 @@ use std::sync::{
|
||||||
};
|
};
|
||||||
use tokio::time::Duration;
|
use tokio::time::Duration;
|
||||||
|
|
||||||
|
/// Global object containing proxy configurations and shared object like counters.
|
||||||
|
/// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks.
|
||||||
pub struct Globals {
|
pub struct Globals {
|
||||||
pub listen_sockets: Vec<SocketAddr>,
|
/// Configuration parameters for proxy transport and request handlers
|
||||||
pub http_port: Option<u16>,
|
pub proxy_config: ProxyConfig, // TODO: proxy configはarcに包んでこいつだけ使いまわせばいいように変えていく。backendsも?
|
||||||
pub https_port: Option<u16>,
|
|
||||||
|
|
||||||
pub proxy_timeout: Duration,
|
/// Backend application objects to which http request handler forward incoming requests
|
||||||
pub upstream_timeout: Duration,
|
|
||||||
|
|
||||||
pub max_clients: usize,
|
|
||||||
pub request_count: RequestCount,
|
|
||||||
pub max_concurrent_streams: u32,
|
|
||||||
pub keepalive: bool,
|
|
||||||
|
|
||||||
pub runtime_handle: tokio::runtime::Handle,
|
|
||||||
pub backends: Backends,
|
pub backends: Backends,
|
||||||
|
|
||||||
// experimentals
|
/// Shared context - Counter for serving requests
|
||||||
pub sni_consistency: bool,
|
pub request_count: RequestCount,
|
||||||
|
|
||||||
|
/// Shared context - Async task runtime handler
|
||||||
|
pub runtime_handle: tokio::runtime::Handle,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration parameters for proxy transport and request handlers
|
||||||
|
pub struct ProxyConfig {
|
||||||
|
pub listen_sockets: Vec<SocketAddr>, // when instantiate server
|
||||||
|
pub http_port: Option<u16>, // when instantiate server
|
||||||
|
pub https_port: Option<u16>, // when instantiate server
|
||||||
|
|
||||||
|
pub proxy_timeout: Duration, // when serving requests at Proxy
|
||||||
|
pub upstream_timeout: Duration, // when serving requests at Handler
|
||||||
|
|
||||||
|
pub max_clients: usize, // when serving requests
|
||||||
|
pub max_concurrent_streams: u32, // when instantiate server
|
||||||
|
pub keepalive: bool, // when instantiate server
|
||||||
|
|
||||||
|
// experimentals
|
||||||
|
pub sni_consistency: bool, // Handler
|
||||||
|
// All need to make packet acceptor
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
pub http3: bool,
|
pub http3: bool,
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
|
|
@ -41,7 +54,43 @@ pub struct Globals {
|
||||||
pub h3_max_idle_timeout: Option<quinn::IdleTimeout>,
|
pub h3_max_idle_timeout: Option<quinn::IdleTimeout>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for ProxyConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
listen_sockets: Vec::new(),
|
||||||
|
http_port: None,
|
||||||
|
https_port: None,
|
||||||
|
|
||||||
|
// TODO: Reconsider each timeout values
|
||||||
|
proxy_timeout: Duration::from_secs(PROXY_TIMEOUT_SEC),
|
||||||
|
upstream_timeout: Duration::from_secs(UPSTREAM_TIMEOUT_SEC),
|
||||||
|
|
||||||
|
max_clients: MAX_CLIENTS,
|
||||||
|
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
|
||||||
|
keepalive: true,
|
||||||
|
|
||||||
|
sni_consistency: true,
|
||||||
|
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
http3: false,
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
h3_alt_svc_max_age: H3::ALT_SVC_MAX_AGE,
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
h3_request_max_body_size: H3::REQUEST_MAX_BODY_SIZE,
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
h3_max_concurrent_connections: H3::MAX_CONCURRENT_CONNECTIONS,
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
h3_max_concurrent_bidistream: H3::MAX_CONCURRENT_BIDISTREAM.into(),
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
h3_max_concurrent_unistream: H3::MAX_CONCURRENT_UNISTREAM.into(),
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
h3_max_idle_timeout: Some(quinn::IdleTimeout::try_from(Duration::from_secs(H3::MAX_IDLE_TIMEOUT)).unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
|
/// Counter for serving requests
|
||||||
pub struct RequestCount(Arc<AtomicUsize>);
|
pub struct RequestCount(Arc<AtomicUsize>);
|
||||||
|
|
||||||
impl RequestCount {
|
impl RequestCount {
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ where
|
||||||
};
|
};
|
||||||
// check consistency of between TLS SNI and HOST/Request URI Line.
|
// check consistency of between TLS SNI and HOST/Request URI Line.
|
||||||
#[allow(clippy::collapsible_if)]
|
#[allow(clippy::collapsible_if)]
|
||||||
if tls_enabled && self.globals.sni_consistency {
|
if tls_enabled && self.globals.proxy_config.sni_consistency {
|
||||||
if server_name != tls_server_name.unwrap_or_default() {
|
if server_name != tls_server_name.unwrap_or_default() {
|
||||||
return self.return_with_error_log(StatusCode::MISDIRECTED_REQUEST, &mut log_data);
|
return self.return_with_error_log(StatusCode::MISDIRECTED_REQUEST, &mut log_data);
|
||||||
}
|
}
|
||||||
|
|
@ -75,7 +75,7 @@ where
|
||||||
if !tls_enabled && backend.https_redirection.unwrap_or(false) {
|
if !tls_enabled && backend.https_redirection.unwrap_or(false) {
|
||||||
debug!("Redirect to secure connection: {}", &backend.server_name);
|
debug!("Redirect to secure connection: {}", &backend.server_name);
|
||||||
log_data.status_code(&StatusCode::PERMANENT_REDIRECT).output();
|
log_data.status_code(&StatusCode::PERMANENT_REDIRECT).output();
|
||||||
return secure_redirection(&backend.server_name, self.globals.https_port, &req);
|
return secure_redirection(&backend.server_name, self.globals.proxy_config.https_port, &req);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find reverse proxy for given path and choose one of upstream host
|
// Find reverse proxy for given path and choose one of upstream host
|
||||||
|
|
@ -112,7 +112,7 @@ where
|
||||||
|
|
||||||
// Forward request to
|
// Forward request to
|
||||||
let mut res_backend = {
|
let mut res_backend = {
|
||||||
match timeout(self.globals.upstream_timeout, self.forwarder.request(req)).await {
|
match timeout(self.globals.proxy_config.upstream_timeout, self.forwarder.request(req)).await {
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return self.return_with_error_log(StatusCode::GATEWAY_TIMEOUT, &mut log_data);
|
return self.return_with_error_log(StatusCode::GATEWAY_TIMEOUT, &mut log_data);
|
||||||
}
|
}
|
||||||
|
|
@ -207,14 +207,14 @@ where
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
{
|
{
|
||||||
// TODO: Workaround for avoid h3 for client authentication
|
// TODO: Workaround for avoid h3 for client authentication
|
||||||
if self.globals.http3 && chosen_backend.client_ca_cert_path.is_none() {
|
if self.globals.proxy_config.http3 && chosen_backend.client_ca_cert_path.is_none() {
|
||||||
if let Some(port) = self.globals.https_port {
|
if let Some(port) = self.globals.proxy_config.https_port {
|
||||||
add_header_entry_overwrite_if_exist(
|
add_header_entry_overwrite_if_exist(
|
||||||
headers,
|
headers,
|
||||||
header::ALT_SVC.as_str(),
|
header::ALT_SVC.as_str(),
|
||||||
format!(
|
format!(
|
||||||
"h3=\":{}\"; ma={}, h3-29=\":{}\"; ma={}",
|
"h3=\":{}\"; ma={}, h3-29=\":{}\"; ma={}",
|
||||||
port, self.globals.h3_alt_svc_max_age, port, self.globals.h3_alt_svc_max_age
|
port, self.globals.proxy_config.h3_alt_svc_max_age, port, self.globals.proxy_config.h3_alt_svc_max_age
|
||||||
),
|
),
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
@ -225,7 +225,7 @@ where
|
||||||
}
|
}
|
||||||
#[cfg(not(feature = "http3"))]
|
#[cfg(not(feature = "http3"))]
|
||||||
{
|
{
|
||||||
if let Some(port) = self.globals.https_port {
|
if let Some(port) = self.globals.proxy_config.https_port {
|
||||||
headers.remove(header::ALT_SVC.as_str());
|
headers.remove(header::ALT_SVC.as_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
66
src/main.rs
66
src/main.rs
|
|
@ -6,6 +6,7 @@ use tikv_jemallocator::Jemalloc;
|
||||||
static GLOBAL: Jemalloc = Jemalloc;
|
static GLOBAL: Jemalloc = Jemalloc;
|
||||||
|
|
||||||
mod backend;
|
mod backend;
|
||||||
|
mod cert_reader;
|
||||||
mod config;
|
mod config;
|
||||||
mod constants;
|
mod constants;
|
||||||
mod error;
|
mod error;
|
||||||
|
|
@ -16,22 +17,12 @@ mod proxy;
|
||||||
mod utils;
|
mod utils;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
backend::{Backend, Backends},
|
config::build_globals, error::*, globals::*, handler::HttpMessageHandlerBuilder, log::*, proxy::ProxyBuilder,
|
||||||
config::parse_opts,
|
|
||||||
constants::*,
|
|
||||||
error::*,
|
|
||||||
globals::*,
|
|
||||||
handler::HttpMessageHandlerBuilder,
|
|
||||||
log::*,
|
|
||||||
proxy::ProxyBuilder,
|
|
||||||
utils::ServerNameBytesExp,
|
|
||||||
};
|
};
|
||||||
use futures::future::select_all;
|
use futures::future::select_all;
|
||||||
use hyper::Client;
|
use hyper::Client;
|
||||||
// use hyper_trust_dns::TrustDnsResolver;
|
// use hyper_trust_dns::TrustDnsResolver;
|
||||||
use rustc_hash::FxHashMap as HashMap;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::time::Duration;
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
init_logger();
|
init_logger();
|
||||||
|
|
@ -42,52 +33,17 @@ fn main() {
|
||||||
let runtime = runtime_builder.build().unwrap();
|
let runtime = runtime_builder.build().unwrap();
|
||||||
|
|
||||||
runtime.block_on(async {
|
runtime.block_on(async {
|
||||||
let mut globals = Globals {
|
let globals = match build_globals(runtime.handle().clone()) {
|
||||||
listen_sockets: Vec::new(),
|
Ok(g) => g,
|
||||||
http_port: None,
|
Err(e) => {
|
||||||
https_port: None,
|
error!("Invalid configuration: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
// TODO: Reconsider each timeout values
|
}
|
||||||
proxy_timeout: Duration::from_secs(PROXY_TIMEOUT_SEC),
|
|
||||||
upstream_timeout: Duration::from_secs(UPSTREAM_TIMEOUT_SEC),
|
|
||||||
|
|
||||||
max_clients: MAX_CLIENTS,
|
|
||||||
request_count: Default::default(),
|
|
||||||
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
|
|
||||||
keepalive: true,
|
|
||||||
|
|
||||||
runtime_handle: runtime.handle().clone(),
|
|
||||||
backends: Backends {
|
|
||||||
default_server_name_bytes: None,
|
|
||||||
apps: HashMap::<ServerNameBytesExp, Backend>::default(),
|
|
||||||
},
|
|
||||||
|
|
||||||
sni_consistency: true,
|
|
||||||
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
http3: false,
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
h3_alt_svc_max_age: H3::ALT_SVC_MAX_AGE,
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
h3_request_max_body_size: H3::REQUEST_MAX_BODY_SIZE,
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
h3_max_concurrent_connections: H3::MAX_CONCURRENT_CONNECTIONS,
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
h3_max_concurrent_bidistream: H3::MAX_CONCURRENT_BIDISTREAM.into(),
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
h3_max_concurrent_unistream: H3::MAX_CONCURRENT_UNISTREAM.into(),
|
|
||||||
#[cfg(feature = "http3")]
|
|
||||||
h3_max_idle_timeout: Some(quinn::IdleTimeout::try_from(Duration::from_secs(H3::MAX_IDLE_TIMEOUT)).unwrap()),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(e) = parse_opts(&mut globals) {
|
|
||||||
error!("Invalid configuration: {}", e);
|
|
||||||
std::process::exit(1);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
entrypoint(Arc::new(globals)).await.unwrap()
|
entrypoint(Arc::new(globals)).await.unwrap()
|
||||||
});
|
});
|
||||||
warn!("Exit the program");
|
warn!("rpxy exited!");
|
||||||
}
|
}
|
||||||
|
|
||||||
// entrypoint creates and spawns tasks of proxy services
|
// entrypoint creates and spawns tasks of proxy services
|
||||||
|
|
@ -105,10 +61,10 @@ async fn entrypoint(globals: Arc<Globals>) -> Result<()> {
|
||||||
.globals(globals.clone())
|
.globals(globals.clone())
|
||||||
.build()?;
|
.build()?;
|
||||||
|
|
||||||
let addresses = globals.listen_sockets.clone();
|
let addresses = globals.proxy_config.listen_sockets.clone();
|
||||||
let futures = select_all(addresses.into_iter().map(|addr| {
|
let futures = select_all(addresses.into_iter().map(|addr| {
|
||||||
let mut tls_enabled = false;
|
let mut tls_enabled = false;
|
||||||
if let Some(https_port) = globals.https_port {
|
if let Some(https_port) = globals.proxy_config.https_port {
|
||||||
tls_enabled = https_port == addr.port()
|
tls_enabled = https_port == addr.port()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
253
src/proxy/crypto_service.rs
Normal file
253
src/proxy/crypto_service.rs
Normal file
|
|
@ -0,0 +1,253 @@
|
||||||
|
use crate::{
|
||||||
|
cert_reader::read_certs_and_keys, // TODO: Trait defining read_certs_and_keys and add struct implementing the trait to backend when build backend
|
||||||
|
globals::Globals,
|
||||||
|
log::*,
|
||||||
|
utils::ServerNameBytesExp,
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use hot_reload::*;
|
||||||
|
use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet};
|
||||||
|
use rustls::{
|
||||||
|
server::ResolvesServerCertUsingSni,
|
||||||
|
sign::{any_supported_type, CertifiedKey},
|
||||||
|
Certificate, OwnedTrustAnchor, PrivateKey, RootCertStore, ServerConfig,
|
||||||
|
};
|
||||||
|
use std::{io, sync::Arc};
|
||||||
|
use x509_parser::prelude::*;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
/// Reloader service for certificates and keys for TLS
|
||||||
|
pub struct CryptoReloader {
|
||||||
|
globals: Arc<Globals>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Certificates and private keys in rustls loaded from files
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub struct CertsAndKeys {
|
||||||
|
pub certs: Vec<Certificate>,
|
||||||
|
pub cert_keys: Vec<PrivateKey>,
|
||||||
|
pub client_ca_certs: Option<Vec<Certificate>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type SniServerCryptoMap = HashMap<ServerNameBytesExp, Arc<ServerConfig>>;
|
||||||
|
pub struct ServerCrypto {
|
||||||
|
// For Quic/HTTP3, only servers with no client authentication
|
||||||
|
pub inner_global_no_client_auth: Arc<ServerConfig>,
|
||||||
|
// For TLS over TCP/HTTP2 and 1.1, map of SNI to server_crypto for all given servers
|
||||||
|
pub inner_local_map: Arc<SniServerCryptoMap>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reloader target for the certificate reloader service
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Default)]
|
||||||
|
pub struct ServerCryptoBase {
|
||||||
|
inner: HashMap<ServerNameBytesExp, CertsAndKeys>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Reload<ServerCryptoBase> for CryptoReloader {
|
||||||
|
type Source = Arc<Globals>;
|
||||||
|
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ServerCryptoBase>> {
|
||||||
|
Ok(Self {
|
||||||
|
globals: source.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn reload(&self) -> Result<Option<ServerCryptoBase>, ReloaderError<ServerCryptoBase>> {
|
||||||
|
let mut certs_and_keys_map = ServerCryptoBase::default();
|
||||||
|
|
||||||
|
for (server_name_bytes_exp, backend) in self.globals.backends.apps.iter() {
|
||||||
|
if backend.tls_cert_key_path.is_some() && backend.tls_cert_path.is_some() {
|
||||||
|
let tls_cert_key_path = backend.tls_cert_key_path.as_ref().unwrap();
|
||||||
|
let tls_cert_path = backend.tls_cert_path.as_ref().unwrap();
|
||||||
|
let tls_client_ca_cert_path = backend.client_ca_cert_path.as_ref();
|
||||||
|
let certs_and_keys = read_certs_and_keys(tls_cert_path, tls_cert_key_path, tls_client_ca_cert_path)
|
||||||
|
.map_err(|_e| ReloaderError::<ServerCryptoBase>::Reload("Failed to reload cert, key or ca cert"))?;
|
||||||
|
|
||||||
|
certs_and_keys_map
|
||||||
|
.inner
|
||||||
|
.insert(server_name_bytes_exp.to_owned(), certs_and_keys);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(certs_and_keys_map))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CertsAndKeys {
|
||||||
|
fn parse_server_certs_and_keys(&self) -> Result<CertifiedKey, anyhow::Error> {
|
||||||
|
// for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() {
|
||||||
|
let signing_key = self
|
||||||
|
.cert_keys
|
||||||
|
.iter()
|
||||||
|
.find_map(|k| {
|
||||||
|
if let Ok(sk) = any_supported_type(k) {
|
||||||
|
Some(sk)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.ok_or_else(|| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
"Unable to find a valid certificate and key",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Ok(CertifiedKey::new(self.certs.clone(), signing_key))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_client_ca_certs(&self) -> Result<(Vec<OwnedTrustAnchor>, HashSet<Vec<u8>>), anyhow::Error> {
|
||||||
|
let certs = self.client_ca_certs.as_ref().ok_or(anyhow::anyhow!("No client cert"))?;
|
||||||
|
|
||||||
|
let owned_trust_anchors: Vec<_> = certs
|
||||||
|
.iter()
|
||||||
|
.map(|v| {
|
||||||
|
// let trust_anchor = tokio_rustls::webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
||||||
|
let trust_anchor = webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
||||||
|
rustls::OwnedTrustAnchor::from_subject_spki_name_constraints(
|
||||||
|
trust_anchor.subject,
|
||||||
|
trust_anchor.spki,
|
||||||
|
trust_anchor.name_constraints,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// TODO: SKID is not used currently
|
||||||
|
let subject_key_identifiers: HashSet<_> = certs
|
||||||
|
.iter()
|
||||||
|
.filter_map(|v| {
|
||||||
|
// retrieve ca key id (subject key id)
|
||||||
|
let cert = parse_x509_certificate(&v.0).unwrap().1;
|
||||||
|
let subject_key_ids = cert
|
||||||
|
.iter_extensions()
|
||||||
|
.filter_map(|ext| match ext.parsed_extension() {
|
||||||
|
ParsedExtension::SubjectKeyIdentifier(skid) => Some(skid),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if !subject_key_ids.is_empty() {
|
||||||
|
Some(subject_key_ids[0].0.to_owned())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok((owned_trust_anchors, subject_key_identifiers))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryInto<Arc<ServerCrypto>> for &ServerCryptoBase {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_into(self) -> Result<Arc<ServerCrypto>, Self::Error> {
|
||||||
|
let mut resolver_global = ResolvesServerCertUsingSni::new();
|
||||||
|
let mut server_crypto_local_map: SniServerCryptoMap = HashMap::default();
|
||||||
|
|
||||||
|
for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() {
|
||||||
|
let server_name: String = server_name_bytes_exp.try_into()?;
|
||||||
|
|
||||||
|
// Parse server certificates and private keys
|
||||||
|
let Ok(certified_key): Result<CertifiedKey, _> = certs_and_keys.parse_server_certs_and_keys() else {
|
||||||
|
warn!("Failed to add certificate for {}", server_name);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut resolver_local = ResolvesServerCertUsingSni::new();
|
||||||
|
let mut client_ca_roots_local = RootCertStore::empty();
|
||||||
|
|
||||||
|
// add server certificate and key
|
||||||
|
if let Err(e) = resolver_local.add(server_name.as_str(), certified_key.to_owned()) {
|
||||||
|
error!(
|
||||||
|
"{}: Failed to read some certificates and keys {}",
|
||||||
|
server_name.as_str(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add client certificate if specified
|
||||||
|
if certs_and_keys.client_ca_certs.is_none() {
|
||||||
|
// aggregated server config for no client auth server for http3
|
||||||
|
if let Err(e) = resolver_global.add(server_name.as_str(), certified_key) {
|
||||||
|
error!(
|
||||||
|
"{}: Failed to read some certificates and keys {}",
|
||||||
|
server_name.as_str(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// add client certificate if specified
|
||||||
|
match certs_and_keys.parse_client_ca_certs() {
|
||||||
|
Ok((owned_trust_anchors, _subject_key_ids)) => {
|
||||||
|
client_ca_roots_local.add_server_trust_anchors(owned_trust_anchors.into_iter());
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Failed to add client CA certificate for {}: {}",
|
||||||
|
server_name.as_str(),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut server_config_local = if client_ca_roots_local.is_empty() {
|
||||||
|
// with no client auth, enable http1.1 -- 3
|
||||||
|
#[cfg(not(feature = "http3"))]
|
||||||
|
{
|
||||||
|
ServerConfig::builder()
|
||||||
|
.with_safe_defaults()
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_cert_resolver(Arc::new(resolver_local))
|
||||||
|
}
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
{
|
||||||
|
let mut sc = ServerConfig::builder()
|
||||||
|
.with_safe_defaults()
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_cert_resolver(Arc::new(resolver_local));
|
||||||
|
sc.alpn_protocols = vec![b"h3".to_vec(), b"hq-29".to_vec()]; // TODO: remove hq-29 later?
|
||||||
|
sc
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// with client auth, enable only http1.1 and 2
|
||||||
|
// let client_certs_verifier = rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(client_ca_roots);
|
||||||
|
let client_certs_verifier = rustls::server::AllowAnyAuthenticatedClient::new(client_ca_roots_local);
|
||||||
|
ServerConfig::builder()
|
||||||
|
.with_safe_defaults()
|
||||||
|
.with_client_cert_verifier(Arc::new(client_certs_verifier))
|
||||||
|
.with_cert_resolver(Arc::new(resolver_local))
|
||||||
|
};
|
||||||
|
server_config_local.alpn_protocols.push(b"h2".to_vec());
|
||||||
|
server_config_local.alpn_protocols.push(b"http/1.1".to_vec());
|
||||||
|
|
||||||
|
server_crypto_local_map.insert(server_name_bytes_exp.to_owned(), Arc::new(server_config_local));
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////
|
||||||
|
let mut server_crypto_global = ServerConfig::builder()
|
||||||
|
.with_safe_defaults()
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_cert_resolver(Arc::new(resolver_global));
|
||||||
|
|
||||||
|
//////////////////////////////
|
||||||
|
|
||||||
|
#[cfg(feature = "http3")]
|
||||||
|
{
|
||||||
|
server_crypto_global.alpn_protocols = vec![
|
||||||
|
b"h3".to_vec(),
|
||||||
|
b"hq-29".to_vec(), // TODO: remove later?
|
||||||
|
b"h2".to_vec(),
|
||||||
|
b"http/1.1".to_vec(),
|
||||||
|
];
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "http3"))]
|
||||||
|
{
|
||||||
|
server_crypto_global.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Arc::new(ServerCrypto {
|
||||||
|
inner_global_no_client_auth: Arc::new(server_crypto_global),
|
||||||
|
inner_local_map: Arc::new(server_crypto_local_map),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,7 +1,9 @@
|
||||||
|
mod crypto_service;
|
||||||
mod proxy_client_cert;
|
mod proxy_client_cert;
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
mod proxy_h3;
|
mod proxy_h3;
|
||||||
mod proxy_main;
|
mod proxy_main;
|
||||||
mod proxy_tls;
|
mod proxy_tls;
|
||||||
|
|
||||||
|
pub use crypto_service::CertsAndKeys;
|
||||||
pub use proxy_main::{Proxy, ProxyBuilder, ProxyBuilderError};
|
pub use proxy_main::{Proxy, ProxyBuilder, ProxyBuilderError};
|
||||||
|
|
|
||||||
|
|
@ -10,26 +10,18 @@ pub(super) fn check_client_authentication(
|
||||||
client_certs: Option<&[Certificate]>,
|
client_certs: Option<&[Certificate]>,
|
||||||
client_ca_keyids_set_for_sni: Option<&HashSet<Vec<u8>>>,
|
client_ca_keyids_set_for_sni: Option<&HashSet<Vec<u8>>>,
|
||||||
) -> std::result::Result<(), ClientCertsError> {
|
) -> std::result::Result<(), ClientCertsError> {
|
||||||
let client_ca_keyids_set = match client_ca_keyids_set_for_sni {
|
let Some(client_ca_keyids_set) = client_ca_keyids_set_for_sni else {
|
||||||
Some(c) => c,
|
// No client cert settings for given server name
|
||||||
None => {
|
return Ok(());
|
||||||
// No client cert settings for given server name
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let client_certs = match client_certs {
|
let Some(client_certs) = client_certs else {
|
||||||
Some(c) => {
|
error!("Client certificate is needed for given server name");
|
||||||
debug!("Incoming TLS client is (temporarily) authenticated via client cert");
|
return Err(ClientCertsError::ClientCertRequired(
|
||||||
c
|
"Client certificate is needed for given server name".to_string(),
|
||||||
}
|
));
|
||||||
None => {
|
|
||||||
error!("Client certificate is needed for given server name");
|
|
||||||
return Err(ClientCertsError::ClientCertRequired(
|
|
||||||
"Client certificate is needed for given server name".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
debug!("Incoming TLS client is (temporarily) authenticated via client cert");
|
||||||
|
|
||||||
// Check client certificate key ids
|
// Check client certificate key ids
|
||||||
let mut client_certs_parsed_iter = client_certs.iter().filter_map(|d| parse_x509_certificate(&d.0).ok());
|
let mut client_certs_parsed_iter = client_certs.iter().filter_map(|d| parse_x509_certificate(&d.0).ok());
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ where
|
||||||
// We consider the connection count separately from the stream count.
|
// We consider the connection count separately from the stream count.
|
||||||
// Max clients for h1/h2 = max 'stream' for h3.
|
// Max clients for h1/h2 = max 'stream' for h3.
|
||||||
let request_count = self.globals.request_count.clone();
|
let request_count = self.globals.request_count.clone();
|
||||||
if request_count.increment() > self.globals.max_clients {
|
if request_count.increment() > self.globals.proxy_config.max_clients {
|
||||||
request_count.decrement();
|
request_count.decrement();
|
||||||
h3_conn.shutdown(0).await?;
|
h3_conn.shutdown(0).await?;
|
||||||
break;
|
break;
|
||||||
|
|
@ -54,7 +54,7 @@ where
|
||||||
let tls_server_name_inner = tls_server_name.clone();
|
let tls_server_name_inner = tls_server_name.clone();
|
||||||
self.globals.runtime_handle.spawn(async move {
|
self.globals.runtime_handle.spawn(async move {
|
||||||
if let Err(e) = timeout(
|
if let Err(e) = timeout(
|
||||||
self_inner.globals.proxy_timeout + Duration::from_secs(1), // timeout per stream are considered as same as one in http2
|
self_inner.globals.proxy_config.proxy_timeout + Duration::from_secs(1), // timeout per stream are considered as same as one in http2
|
||||||
self_inner.stream_serve_h3(req, stream, client_addr, tls_server_name_inner),
|
self_inner.stream_serve_h3(req, stream, client_addr, tls_server_name_inner),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
@ -97,7 +97,7 @@ where
|
||||||
|
|
||||||
// Buffering and sending body through channel for protocol conversion like h3 -> h2/http1.1
|
// Buffering and sending body through channel for protocol conversion like h3 -> h2/http1.1
|
||||||
// The underling buffering, i.e., buffer given by the API recv_data.await?, is handled by quinn.
|
// The underling buffering, i.e., buffer given by the API recv_data.await?, is handled by quinn.
|
||||||
let max_body_size = self.globals.h3_request_max_body_size;
|
let max_body_size = self.globals.proxy_config.h3_request_max_body_size;
|
||||||
self.globals.runtime_handle.spawn(async move {
|
self.globals.runtime_handle.spawn(async move {
|
||||||
let mut sender = body_sender;
|
let mut sender = body_sender;
|
||||||
let mut size = 0usize;
|
let mut size = 0usize;
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ where
|
||||||
I: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
I: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||||
{
|
{
|
||||||
let request_count = self.globals.request_count.clone();
|
let request_count = self.globals.request_count.clone();
|
||||||
if request_count.increment() > self.globals.max_clients {
|
if request_count.increment() > self.globals.proxy_config.max_clients {
|
||||||
request_count.decrement();
|
request_count.decrement();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -64,7 +64,7 @@ where
|
||||||
|
|
||||||
self.globals.runtime_handle.clone().spawn(async move {
|
self.globals.runtime_handle.clone().spawn(async move {
|
||||||
timeout(
|
timeout(
|
||||||
self.globals.proxy_timeout + Duration::from_secs(1),
|
self.globals.proxy_config.proxy_timeout + Duration::from_secs(1),
|
||||||
server
|
server
|
||||||
.serve_connection(
|
.serve_connection(
|
||||||
stream,
|
stream,
|
||||||
|
|
@ -103,8 +103,8 @@ where
|
||||||
|
|
||||||
pub async fn start(self) -> Result<()> {
|
pub async fn start(self) -> Result<()> {
|
||||||
let mut server = Http::new();
|
let mut server = Http::new();
|
||||||
server.http1_keep_alive(self.globals.keepalive);
|
server.http1_keep_alive(self.globals.proxy_config.keepalive);
|
||||||
server.http2_max_concurrent_streams(self.globals.max_concurrent_streams);
|
server.http2_max_concurrent_streams(self.globals.proxy_config.max_concurrent_streams);
|
||||||
server.pipeline_flush(true);
|
server.pipeline_flush(true);
|
||||||
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
|
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
|
||||||
let server = server.with_executor(executor);
|
let server = server.with_executor(executor);
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,9 @@
|
||||||
use super::proxy_main::{LocalExecutor, Proxy};
|
use super::{
|
||||||
use crate::{
|
crypto_service::{CryptoReloader, ServerCrypto, ServerCryptoBase, SniServerCryptoMap},
|
||||||
backend::{ServerCrypto, SniServerCryptoMap},
|
proxy_main::{LocalExecutor, Proxy},
|
||||||
constants::*,
|
|
||||||
error::*,
|
|
||||||
log::*,
|
|
||||||
utils::BytesName,
|
|
||||||
};
|
};
|
||||||
|
use crate::{constants::*, error::*, log::*, utils::BytesName};
|
||||||
|
use hot_reload::{ReloaderReceiver, ReloaderService};
|
||||||
use hyper::{client::connect::Connect, server::conn::Http};
|
use hyper::{client::connect::Connect, server::conn::Http};
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
use quinn::{crypto::rustls::HandshakeData, Endpoint, ServerConfig as QuicServerConfig, TransportConfig};
|
use quinn::{crypto::rustls::HandshakeData, Endpoint, ServerConfig as QuicServerConfig, TransportConfig};
|
||||||
|
|
@ -14,34 +12,18 @@ use rustls::ServerConfig;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
net::TcpListener,
|
net::TcpListener,
|
||||||
sync::watch,
|
time::{timeout, Duration},
|
||||||
time::{sleep, timeout, Duration},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
impl<T> Proxy<T>
|
impl<T> Proxy<T>
|
||||||
where
|
where
|
||||||
T: Connect + Clone + Sync + Send + 'static,
|
T: Connect + Clone + Sync + Send + 'static,
|
||||||
{
|
{
|
||||||
async fn cert_service(&self, server_crypto_tx: watch::Sender<Option<Arc<ServerCrypto>>>) {
|
|
||||||
info!("Start cert watch service");
|
|
||||||
loop {
|
|
||||||
if let Ok(server_crypto) = self.globals.backends.generate_server_crypto().await {
|
|
||||||
if let Err(_e) = server_crypto_tx.send(Some(Arc::new(server_crypto))) {
|
|
||||||
error!("Failed to populate server crypto");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
error!("Failed to update certs");
|
|
||||||
}
|
|
||||||
sleep(Duration::from_secs(CERTS_WATCH_DELAY_SECS.into())).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TCP Listener Service, i.e., http/2 and http/1.1
|
// TCP Listener Service, i.e., http/2 and http/1.1
|
||||||
async fn listener_service(
|
async fn listener_service(
|
||||||
&self,
|
&self,
|
||||||
server: Http<LocalExecutor>,
|
server: Http<LocalExecutor>,
|
||||||
mut server_crypto_rx: watch::Receiver<Option<Arc<ServerCrypto>>>,
|
mut server_crypto_rx: ReloaderReceiver<ServerCryptoBase>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let tcp_listener = TcpListener::bind(&self.listening_on).await?;
|
let tcp_listener = TcpListener::bind(&self.listening_on).await?;
|
||||||
info!("Start TCP proxy serving with HTTPS request for configured host names");
|
info!("Start TCP proxy serving with HTTPS request for configured host names");
|
||||||
|
|
@ -105,9 +87,14 @@ where
|
||||||
}
|
}
|
||||||
_ = server_crypto_rx.changed() => {
|
_ = server_crypto_rx.changed() => {
|
||||||
if server_crypto_rx.borrow().is_none() {
|
if server_crypto_rx.borrow().is_none() {
|
||||||
|
error!("Reloader is broken");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
let server_crypto = server_crypto_rx.borrow().clone().unwrap();
|
let cert_keys_map = server_crypto_rx.borrow().clone().unwrap();
|
||||||
|
let Some(server_crypto): Option<Arc<ServerCrypto>> = (&cert_keys_map).try_into().ok() else {
|
||||||
|
error!("Failed to update server crypto");
|
||||||
|
break;
|
||||||
|
};
|
||||||
server_crypto_map = Some(server_crypto.inner_local_map.clone());
|
server_crypto_map = Some(server_crypto.inner_local_map.clone());
|
||||||
}
|
}
|
||||||
else => break
|
else => break
|
||||||
|
|
@ -117,7 +104,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
async fn listener_service_h3(&self, mut server_crypto_rx: watch::Receiver<Option<Arc<ServerCrypto>>>) -> Result<()> {
|
async fn listener_service_h3(&self, mut server_crypto_rx: ReloaderReceiver<ServerCryptoBase>) -> Result<()> {
|
||||||
info!("Start UDP proxy serving with HTTP/3 request for configured host names");
|
info!("Start UDP proxy serving with HTTP/3 request for configured host names");
|
||||||
// first set as null config server
|
// first set as null config server
|
||||||
let rustls_server_config = ServerConfig::builder()
|
let rustls_server_config = ServerConfig::builder()
|
||||||
|
|
@ -129,13 +116,13 @@ where
|
||||||
|
|
||||||
let mut transport_config_quic = TransportConfig::default();
|
let mut transport_config_quic = TransportConfig::default();
|
||||||
transport_config_quic
|
transport_config_quic
|
||||||
.max_concurrent_bidi_streams(self.globals.h3_max_concurrent_bidistream)
|
.max_concurrent_bidi_streams(self.globals.proxy_config.h3_max_concurrent_bidistream)
|
||||||
.max_concurrent_uni_streams(self.globals.h3_max_concurrent_unistream)
|
.max_concurrent_uni_streams(self.globals.proxy_config.h3_max_concurrent_unistream)
|
||||||
.max_idle_timeout(self.globals.h3_max_idle_timeout);
|
.max_idle_timeout(self.globals.proxy_config.h3_max_idle_timeout);
|
||||||
|
|
||||||
let mut server_config_h3 = QuicServerConfig::with_crypto(Arc::new(rustls_server_config));
|
let mut server_config_h3 = QuicServerConfig::with_crypto(Arc::new(rustls_server_config));
|
||||||
server_config_h3.transport = Arc::new(transport_config_quic);
|
server_config_h3.transport = Arc::new(transport_config_quic);
|
||||||
server_config_h3.concurrent_connections(self.globals.h3_max_concurrent_connections);
|
server_config_h3.concurrent_connections(self.globals.proxy_config.h3_max_concurrent_connections);
|
||||||
let endpoint = Endpoint::server(server_config_h3, self.listening_on)?;
|
let endpoint = Endpoint::server(server_config_h3, self.listening_on)?;
|
||||||
|
|
||||||
let mut server_crypto: Option<Arc<ServerCrypto>> = None;
|
let mut server_crypto: Option<Arc<ServerCrypto>> = None;
|
||||||
|
|
@ -146,29 +133,24 @@ where
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let mut conn: quinn::Connecting = new_conn.unwrap();
|
let mut conn: quinn::Connecting = new_conn.unwrap();
|
||||||
let hsd = match conn.handshake_data().await {
|
let Ok(hsd) = conn.handshake_data().await else {
|
||||||
Ok(h) => h,
|
continue
|
||||||
Err(_) => continue
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let hsd_downcast = match hsd.downcast::<HandshakeData>() {
|
let Ok(hsd_downcast) = hsd.downcast::<HandshakeData>() else {
|
||||||
Ok(d) => d,
|
continue
|
||||||
Err(_) => continue
|
|
||||||
};
|
};
|
||||||
let new_server_name = match hsd_downcast.server_name {
|
let Some(new_server_name) = hsd_downcast.server_name else {
|
||||||
Some(sn) => sn.to_server_name_vec(),
|
warn!("HTTP/3 no SNI is given");
|
||||||
None => {
|
continue;
|
||||||
warn!("HTTP/3 no SNI is given");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
debug!(
|
debug!(
|
||||||
"HTTP/3 connection incoming (SNI {:?})",
|
"HTTP/3 connection incoming (SNI {:?})",
|
||||||
new_server_name.0
|
new_server_name
|
||||||
);
|
);
|
||||||
// TODO: server_nameをここで出してどんどん深く投げていくのは効率が悪い。connecting -> connectionsの後でいいのでは?
|
// TODO: server_nameをここで出してどんどん深く投げていくのは効率が悪い。connecting -> connectionsの後でいいのでは?
|
||||||
// TODO: 通常のTLSと同じenumか何かにまとめたい
|
// TODO: 通常のTLSと同じenumか何かにまとめたい
|
||||||
let fut = self.clone().connection_serve_h3(conn, new_server_name);
|
let fut = self.clone().connection_serve_h3(conn, new_server_name.to_server_name_vec());
|
||||||
self.globals.runtime_handle.spawn(async move {
|
self.globals.runtime_handle.spawn(async move {
|
||||||
// Timeout is based on underlying quic
|
// Timeout is based on underlying quic
|
||||||
if let Err(e) = fut.await {
|
if let Err(e) = fut.await {
|
||||||
|
|
@ -178,12 +160,18 @@ where
|
||||||
}
|
}
|
||||||
_ = server_crypto_rx.changed() => {
|
_ = server_crypto_rx.changed() => {
|
||||||
if server_crypto_rx.borrow().is_none() {
|
if server_crypto_rx.borrow().is_none() {
|
||||||
|
error!("Reloader is broken");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
server_crypto = server_crypto_rx.borrow().clone();
|
let cert_keys_map = server_crypto_rx.borrow().clone().unwrap();
|
||||||
if server_crypto.is_some(){
|
|
||||||
endpoint.set_server_config(Some(QuicServerConfig::with_crypto(server_crypto.clone().unwrap().inner_global_no_client_auth.clone())));
|
server_crypto = (&cert_keys_map).try_into().ok();
|
||||||
}
|
let Some(inner) = server_crypto.clone() else {
|
||||||
|
error!("Failed to update server crypto for h3");
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
endpoint.set_server_config(Some(QuicServerConfig::with_crypto(inner.clone().inner_global_no_client_auth.clone())));
|
||||||
|
|
||||||
}
|
}
|
||||||
else => break
|
else => break
|
||||||
}
|
}
|
||||||
|
|
@ -193,7 +181,14 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start_with_tls(self, server: Http<LocalExecutor>) -> Result<()> {
|
pub async fn start_with_tls(self, server: Http<LocalExecutor>) -> Result<()> {
|
||||||
let (tx, rx) = watch::channel::<Option<Arc<ServerCrypto>>>(None);
|
let (cert_reloader_service, cert_reloader_rx) = ReloaderService::<CryptoReloader, ServerCryptoBase>::new(
|
||||||
|
&self.globals.clone(),
|
||||||
|
CERTS_WATCH_DELAY_SECS,
|
||||||
|
!LOAD_CERTS_ONLY_WHEN_UPDATED,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| anyhow::anyhow!(e))?;
|
||||||
|
|
||||||
#[cfg(not(feature = "http3"))]
|
#[cfg(not(feature = "http3"))]
|
||||||
{
|
{
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
|
|
@ -212,15 +207,15 @@ where
|
||||||
}
|
}
|
||||||
#[cfg(feature = "http3")]
|
#[cfg(feature = "http3")]
|
||||||
{
|
{
|
||||||
if self.globals.http3 {
|
if self.globals.proxy_config.http3 {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_= self.cert_service(tx) => {
|
_= cert_reloader_service.start() => {
|
||||||
error!("Cert service for TLS exited");
|
error!("Cert service for TLS exited");
|
||||||
},
|
},
|
||||||
_ = self.listener_service(server, rx.clone()) => {
|
_ = self.listener_service(server, cert_reloader_rx.clone()) => {
|
||||||
error!("TCP proxy service for TLS exited");
|
error!("TCP proxy service for TLS exited");
|
||||||
},
|
},
|
||||||
_= self.listener_service_h3(rx) => {
|
_= self.listener_service_h3(cert_reloader_rx) => {
|
||||||
error!("UDP proxy service for QUIC exited");
|
error!("UDP proxy service for QUIC exited");
|
||||||
},
|
},
|
||||||
else => {
|
else => {
|
||||||
|
|
@ -231,10 +226,10 @@ where
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_= self.cert_service(tx) => {
|
_= cert_reloader_service.start() => {
|
||||||
error!("Cert service for TLS exited");
|
error!("Cert service for TLS exited");
|
||||||
},
|
},
|
||||||
_ = self.listener_service(server, rx) => {
|
_ = self.listener_service(server, cert_reloader_rx) => {
|
||||||
error!("TCP proxy service for TLS exited");
|
error!("TCP proxy service for TLS exited");
|
||||||
},
|
},
|
||||||
else => {
|
else => {
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,13 @@ impl From<&[u8]> for ServerNameBytesExp {
|
||||||
Self(b.to_ascii_lowercase())
|
Self(b.to_ascii_lowercase())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl TryInto<String> for &ServerNameBytesExp {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
fn try_into(self) -> Result<String, Self::Error> {
|
||||||
|
let s = std::str::from_utf8(&self.0)?;
|
||||||
|
Ok(s.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Path name, like "/path/ok", represented in bytes-based struct
|
/// Path name, like "/path/ok", represented in bytes-based struct
|
||||||
/// for searching hashmap or key list by exact or longest-prefix matching
|
/// for searching hashmap or key list by exact or longest-prefix matching
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue