fix: bug for dynamic reloading of config files
This commit is contained in:
parent
3d60175c11
commit
7c2205f275
5 changed files with 44 additions and 12 deletions
|
|
@ -10,6 +10,7 @@
|
||||||
### Bugfix
|
### Bugfix
|
||||||
|
|
||||||
- Fix: fix `server` in the response header (`rpxy_lib` -> `rpxy`)
|
- Fix: fix `server` in the response header (`rpxy_lib` -> `rpxy`)
|
||||||
|
- Fix: fix bug for hot-reloading configuration file (Add termination notification receiver in proxy services)
|
||||||
|
|
||||||
## 0.5.0
|
## 0.5.0
|
||||||
|
|
||||||
|
|
|
||||||
5
TODO.md
5
TODO.md
|
|
@ -1,8 +1,7 @@
|
||||||
# TODO List
|
# TODO List
|
||||||
|
|
||||||
- [Done in 0.6.0] ~~Fix strategy for `h2c` requests on forwarded requests upstream. This needs to update forwarder definition. Also, maybe forwarder would have a cache corresponding to the following task.~~
|
- [Done in 0.6.0] But we need more sophistication on `Forwarder` struct. ~~Fix strategy for `h2c` requests on forwarded requests upstream. This needs to update forwarder definition. Also, maybe forwarder would have a cache corresponding to the following task.~~
|
||||||
- [Try in v0.6.0] **Cache option for the response with `Cache-Control: public` header directive ([#55](https://github.com/junkurihara/rust-rpxy/issues/55))**
|
- [Try in v0.6.0] **Cache option for the response with `Cache-Control: public` header directive ([#55](https://github.com/junkurihara/rust-rpxy/issues/55))**
|
||||||
- Fix dynamic reloading of configuration file
|
|
||||||
- Improvement of path matcher
|
- Improvement of path matcher
|
||||||
- More flexible option for rewriting path
|
- More flexible option for rewriting path
|
||||||
- Refactoring
|
- Refactoring
|
||||||
|
|
@ -33,5 +32,7 @@
|
||||||
~~Benchmark with other reverse proxy implementations like Sozu ([#58](https://github.com/junkurihara/rust-rpxy/issues/58)) Currently, Sozu can work only on `amd64` format due to its HTTP message parser limitation... Since the main developer have only `arm64` (Apple M1) laptops, so we should do that on VPS?~~
|
~~Benchmark with other reverse proxy implementations like Sozu ([#58](https://github.com/junkurihara/rust-rpxy/issues/58)) Currently, Sozu can work only on `amd64` format due to its HTTP message parser limitation... Since the main developer have only `arm64` (Apple M1) laptops, so we should do that on VPS?~~
|
||||||
- Done in v0.4.0:
|
- Done in v0.4.0:
|
||||||
~~Split `rpxy` source codes into `rpxy-lib` and `rpxy-bin` to make the core part (reverse proxy) isolated from the misc part like toml file loader. This is in order to make the configuration-related part more flexible (related to [#33](https://github.com/junkurihara/rust-rpxy/issues/33))~~
|
~~Split `rpxy` source codes into `rpxy-lib` and `rpxy-bin` to make the core part (reverse proxy) isolated from the misc part like toml file loader. This is in order to make the configuration-related part more flexible (related to [#33](https://github.com/junkurihara/rust-rpxy/issues/33))~~
|
||||||
|
- Done in 0.6.0:
|
||||||
|
~~Fix dynamic reloading of configuration file~~
|
||||||
|
|
||||||
- etc.
|
- etc.
|
||||||
|
|
|
||||||
|
|
@ -84,7 +84,7 @@ async fn rpxy_service_without_watcher(
|
||||||
return Err(anyhow::anyhow!(e));
|
return Err(anyhow::anyhow!(e));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
entrypoint(&proxy_conf, &app_conf, &runtime_handle)
|
entrypoint(&proxy_conf, &app_conf, &runtime_handle, None)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| anyhow::anyhow!(e))
|
.map_err(|e| anyhow::anyhow!(e))
|
||||||
}
|
}
|
||||||
|
|
@ -105,10 +105,13 @@ async fn rpxy_service_with_watcher(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Notifier for proxy service termination
|
||||||
|
let term_notify = std::sync::Arc::new(tokio::sync::Notify::new());
|
||||||
|
|
||||||
// Continuous monitoring
|
// Continuous monitoring
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = entrypoint(&proxy_conf, &app_conf, &runtime_handle) => {
|
_ = entrypoint(&proxy_conf, &app_conf, &runtime_handle, Some(term_notify.clone())) => {
|
||||||
error!("rpxy entrypoint exited");
|
error!("rpxy entrypoint exited");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -127,7 +130,9 @@ async fn rpxy_service_with_watcher(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
info!("Configuration updated. Force to re-bind TCP/UDP sockets");
|
info!("Configuration updated. Terminate all spawned proxy services and force to re-bind TCP/UDP sockets");
|
||||||
|
term_notify.notify_waiters();
|
||||||
|
// tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
||||||
}
|
}
|
||||||
else => break
|
else => break
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ pub async fn entrypoint<T>(
|
||||||
proxy_config: &ProxyConfig,
|
proxy_config: &ProxyConfig,
|
||||||
app_config_list: &AppConfigList<T>,
|
app_config_list: &AppConfigList<T>,
|
||||||
runtime_handle: &tokio::runtime::Handle,
|
runtime_handle: &tokio::runtime::Handle,
|
||||||
|
term_notify: Option<Arc<tokio::sync::Notify>>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
T: CryptoSource + Clone + Send + Sync + 'static,
|
T: CryptoSource + Clone + Send + Sync + 'static,
|
||||||
|
|
@ -68,7 +69,7 @@ where
|
||||||
runtime_handle: runtime_handle.clone(),
|
runtime_handle: runtime_handle.clone(),
|
||||||
});
|
});
|
||||||
|
|
||||||
// TODO: HTTP2 only client is needed for http2 cleartext case
|
// build message handler including a request forwarder
|
||||||
let msg_handler = Arc::new(
|
let msg_handler = Arc::new(
|
||||||
HttpMessageHandlerBuilder::default()
|
HttpMessageHandlerBuilder::default()
|
||||||
.forwarder(Arc::new(Forwarder::new().await))
|
.forwarder(Arc::new(Forwarder::new().await))
|
||||||
|
|
@ -91,7 +92,7 @@ where
|
||||||
.build()
|
.build()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
globals.runtime_handle.spawn(proxy.start())
|
globals.runtime_handle.spawn(proxy.start(term_notify.clone()))
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// wait for all future
|
// wait for all future
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ use std::{net::SocketAddr, sync::Arc};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
io::{AsyncRead, AsyncWrite},
|
io::{AsyncRead, AsyncWrite},
|
||||||
runtime::Handle,
|
runtime::Handle,
|
||||||
|
sync::Notify,
|
||||||
time::{timeout, Duration},
|
time::{timeout, Duration},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -123,7 +124,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Entrypoint for HTTP/1.1 and HTTP/2 servers
|
/// Entrypoint for HTTP/1.1 and HTTP/2 servers
|
||||||
pub async fn start(self) -> Result<()> {
|
pub async fn start(self, term_notify: Option<Arc<Notify>>) -> Result<()> {
|
||||||
let mut server = Http::new();
|
let mut server = Http::new();
|
||||||
server.http1_keep_alive(self.globals.proxy_config.keepalive);
|
server.http1_keep_alive(self.globals.proxy_config.keepalive);
|
||||||
server.http2_max_concurrent_streams(self.globals.proxy_config.max_concurrent_streams);
|
server.http2_max_concurrent_streams(self.globals.proxy_config.max_concurrent_streams);
|
||||||
|
|
@ -131,11 +132,34 @@ where
|
||||||
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
|
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
|
||||||
let server = server.with_executor(executor);
|
let server = server.with_executor(executor);
|
||||||
|
|
||||||
|
let listening_on = self.listening_on;
|
||||||
|
|
||||||
|
let proxy_service = async {
|
||||||
if self.tls_enabled {
|
if self.tls_enabled {
|
||||||
self.start_with_tls(server).await?;
|
self.start_with_tls(server).await
|
||||||
} else {
|
} else {
|
||||||
self.start_without_tls(server).await?;
|
self.start_without_tls(server).await
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match term_notify {
|
||||||
|
Some(term) => {
|
||||||
|
tokio::select! {
|
||||||
|
_ = proxy_service => {
|
||||||
|
warn!("Proxy service got down");
|
||||||
|
}
|
||||||
|
_ = term.notified() => {
|
||||||
|
info!("Proxy service listening on {} receives term signal", listening_on);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
proxy_service.await?;
|
||||||
|
warn!("Proxy service got down");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// proxy_service.await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue