fix: bug for dynamic reloading of config files

This commit is contained in:
Jun Kurihara 2023-08-09 14:07:40 +09:00
commit 7c2205f275
No known key found for this signature in database
GPG key ID: D992B3E3DE1DED23
5 changed files with 44 additions and 12 deletions

View file

@ -10,6 +10,7 @@
### Bugfix
- Fix: fix `server` in the response header (`rpxy_lib` -> `rpxy`)
- Fix: fix bug for hot-reloading configuration file (Add termination notification receiver in proxy services)
## 0.5.0

View file

@ -1,8 +1,7 @@
# TODO List
- [Done in 0.6.0] ~~Fix strategy for `h2c` requests on forwarded requests upstream. This needs to update forwarder definition. Also, maybe forwarder would have a cache corresponding to the following task.~~
- [Done in 0.6.0] But we need more sophistication on `Forwarder` struct. ~~Fix strategy for `h2c` requests on forwarded requests upstream. This needs to update forwarder definition. Also, maybe forwarder would have a cache corresponding to the following task.~~
- [Try in v0.6.0] **Cache option for the response with `Cache-Control: public` header directive ([#55](https://github.com/junkurihara/rust-rpxy/issues/55))**
- Fix dynamic reloading of configuration file
- Improvement of path matcher
- More flexible option for rewriting path
- Refactoring
@ -33,5 +32,7 @@
~~Benchmark with other reverse proxy implementations like Sozu ([#58](https://github.com/junkurihara/rust-rpxy/issues/58)) Currently, Sozu can work only on `amd64` format due to its HTTP message parser limitation... Since the main developer have only `arm64` (Apple M1) laptops, so we should do that on VPS?~~
- Done in v0.4.0:
~~Split `rpxy` source codes into `rpxy-lib` and `rpxy-bin` to make the core part (reverse proxy) isolated from the misc part like toml file loader. This is in order to make the configuration-related part more flexible (related to [#33](https://github.com/junkurihara/rust-rpxy/issues/33))~~
- Done in 0.6.0:
~~Fix dynamic reloading of configuration file~~
- etc.

View file

@ -84,7 +84,7 @@ async fn rpxy_service_without_watcher(
return Err(anyhow::anyhow!(e));
}
};
entrypoint(&proxy_conf, &app_conf, &runtime_handle)
entrypoint(&proxy_conf, &app_conf, &runtime_handle, None)
.await
.map_err(|e| anyhow::anyhow!(e))
}
@ -105,10 +105,13 @@ async fn rpxy_service_with_watcher(
}
};
// Notifier for proxy service termination
let term_notify = std::sync::Arc::new(tokio::sync::Notify::new());
// Continuous monitoring
loop {
tokio::select! {
_ = entrypoint(&proxy_conf, &app_conf, &runtime_handle) => {
_ = entrypoint(&proxy_conf, &app_conf, &runtime_handle, Some(term_notify.clone())) => {
error!("rpxy entrypoint exited");
break;
}
@ -127,7 +130,9 @@ async fn rpxy_service_with_watcher(
continue;
}
};
info!("Configuration updated. Force to re-bind TCP/UDP sockets");
info!("Configuration updated. Terminate all spawned proxy services and force to re-bind TCP/UDP sockets");
term_notify.notify_waiters();
// tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
}
else => break
}

View file

@ -36,6 +36,7 @@ pub async fn entrypoint<T>(
proxy_config: &ProxyConfig,
app_config_list: &AppConfigList<T>,
runtime_handle: &tokio::runtime::Handle,
term_notify: Option<Arc<tokio::sync::Notify>>,
) -> Result<()>
where
T: CryptoSource + Clone + Send + Sync + 'static,
@ -68,7 +69,7 @@ where
runtime_handle: runtime_handle.clone(),
});
// TODO: HTTP2 only client is needed for http2 cleartext case
// build message handler including a request forwarder
let msg_handler = Arc::new(
HttpMessageHandlerBuilder::default()
.forwarder(Arc::new(Forwarder::new().await))
@ -91,7 +92,7 @@ where
.build()
.unwrap();
globals.runtime_handle.spawn(proxy.start())
globals.runtime_handle.spawn(proxy.start(term_notify.clone()))
}));
// wait for all future

View file

@ -8,6 +8,7 @@ use std::{net::SocketAddr, sync::Arc};
use tokio::{
io::{AsyncRead, AsyncWrite},
runtime::Handle,
sync::Notify,
time::{timeout, Duration},
};
@ -123,7 +124,7 @@ where
}
/// Entrypoint for HTTP/1.1 and HTTP/2 servers
pub async fn start(self) -> Result<()> {
pub async fn start(self, term_notify: Option<Arc<Notify>>) -> Result<()> {
let mut server = Http::new();
server.http1_keep_alive(self.globals.proxy_config.keepalive);
server.http2_max_concurrent_streams(self.globals.proxy_config.max_concurrent_streams);
@ -131,12 +132,35 @@ where
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
let server = server.with_executor(executor);
if self.tls_enabled {
self.start_with_tls(server).await?;
} else {
self.start_without_tls(server).await?;
let listening_on = self.listening_on;
let proxy_service = async {
if self.tls_enabled {
self.start_with_tls(server).await
} else {
self.start_without_tls(server).await
}
};
match term_notify {
Some(term) => {
tokio::select! {
_ = proxy_service => {
warn!("Proxy service got down");
}
_ = term.notified() => {
info!("Proxy service listening on {} receives term signal", listening_on);
}
}
}
None => {
proxy_service.await?;
warn!("Proxy service got down");
}
}
// proxy_service.await?;
Ok(())
}
}